file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
domain_randomization.py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from random import randint
from typing import List, Mapping, Optional, Tuple, Union
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.domain_randomization.domain_randomizer import DomainRandomizer
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import all_envs, inner_env, remove_env
from pyrado.environments.base import Env
from pyrado.environments.sim_base import SimEnv
from pyrado.utils.input_output import completion_context, print_cbt
class DomainRandWrapper(EnvWrapper, Serializable):
"""Base class for environment wrappers which call a `DomainRandomizer` to randomize the domain parameters"""
def __init__(self, wrapped_env: Union[SimEnv, EnvWrapper], randomizer: Optional[DomainRandomizer]):
"""
Constructor
:param wrapped_env: environment to wrap
:param randomizer: `DomainRandomizer` object holding the probability distribution of all randomizable
domain parameters, pass `None` if you want to subclass wrapping another `DomainRandWrapper`
and use its randomizer
"""
if not isinstance(inner_env(wrapped_env), SimEnv):
raise pyrado.TypeErr(given=wrapped_env, expected_type=SimEnv)
if not isinstance(randomizer, DomainRandomizer) and randomizer is not None:
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
Serializable._init(self, locals())
# Invoke EnvWrapper's constructor
super().__init__(wrapped_env)
self._randomizer = randomizer
@property
def randomizer(self) -> DomainRandomizer:
return self._randomizer
@randomizer.setter
def randomizer(self, randomizer: DomainRandomizer):
if not isinstance(randomizer, DomainRandomizer):
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
self._randomizer = randomizer
class MetaDomainRandWrapper(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which wraps another `DomainRandWrapper` to adapt its parameters,
called domain distribution parameters.
"""
def __init__(self, wrapped_rand_env: DomainRandWrapper, dp_mapping: Mapping[int, Tuple[str, str]]):
"""
Constructor
:param wrapped_rand_env: randomized environment to wrap
:param dp_mapping: mapping from index of the numpy array (coming from the algorithm) to domain parameter name
(e.g. mass, length) and the domain distribution parameter (e.g. mean, std)
.. code-block:: python
# For the mapping arg use the this dict constructor
```
m = {0: ('name1', 'parameter_type1'), 1: ('name2', 'parameter_type2')}
```
"""
if not isinstance(wrapped_rand_env, DomainRandWrapper):
raise pyrado.TypeErr(given=wrapped_rand_env, expected_type=DomainRandWrapper)
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_rand_env, None)
self.dp_mapping = dp_mapping
@property
def randomizer(self) -> DomainRandomizer:
# Forward to the wrapped DomainRandWrapper
return self._wrapped_env.randomizer
@randomizer.setter
def randomizer(self, dr: DomainRandomizer):
# Forward to the wrapped DomainRandWrapper
self._wrapped_env.randomizer = dr
def adapt_randomizer(self, domain_distr_param_values: np.ndarray):
# Check the input dimension and reshape if necessary
if domain_distr_param_values.ndim == 1:
pass
elif domain_distr_param_values.ndim == 2:
domain_distr_param_values = domain_distr_param_values.ravel()
else:
raise pyrado.ShapeErr(given=domain_distr_param_values, expected_match=(1,))
# Reconfigure the wrapped environment's DomainRandomizer
for i, value in enumerate(domain_distr_param_values):
dp_name, ddp_name = self.dp_mapping.get(i)
self._wrapped_env.randomizer.adapt_one_distr_param(dp_name, ddp_name, value)
class DomainRandWrapperLive(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env at every reset.
Thus every rollout is done with different domain parameters.
"""
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is called to draw a parameter dict
self._randomizer.randomize(num_samples=1)
domain_param = self._randomizer.get_params(fmt="dict", dtype="numpy")
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
class DomainRandWrapperBuffer(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env using a buffer of domain parameter sets.
At every call of the reset method this wrapper cycles through that buffer.
"""
def __init__(self, wrapped_env, randomizer: Optional[DomainRandomizer], selection: Optional[str] = "cyclic"):
"""
Constructor
:param wrapped_env: environment to wrap around
:param randomizer: `DomainRandomizer` object that manages the randomization. If `None`, the user has to set the
buffer manually, the circular reset however works the same way
:param selection: method to draw samples from the buffer, either cyclic or random
"""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_env, randomizer)
self._ring_idx = None
self._buffer = None
self.selection = selection
@property
def ring_idx(self) -> int:
"""Get the buffer's index."""
return self._ring_idx
@ring_idx.setter
def ring_idx(self, idx: int):
"""Set the buffer's index."""
if not (isinstance(idx, int) or not 0 <= idx < len(self._buffer)):
raise pyrado.ValueErr(given=idx, ge_constraint="0 (int)", l_constraint=len(self._buffer))
self._ring_idx = idx
@property
def selection(self) -> str:
"""Get the selection method."""
return self._selection
@selection.setter
def selection(self, selection: str):
"""Set the selection method."""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
self._selection = selection
def fill_buffer(self, num_domains: int):
"""
Fill the internal buffer with domains.
:param num_domains: number of randomized domain parameter sets to store in the buffer
"""
if self._randomizer is None:
raise pyrado.TypeErr(msg="The randomizer must not be None to call fill_buffer()!")
if not isinstance(num_domains, int) or num_domains < 0:
raise pyrado.ValueErr(given=num_domains, g_constraint="0 (int)")
self._randomizer.randomize(num_domains)
self._buffer = self._randomizer.get_params(-1, fmt="list", dtype="numpy")
self._ring_idx = 0
@property
def buffer(self):
"""Get the domain parameter buffer."""
return self._buffer
@buffer.setter
def buffer(self, buffer: Union[List[dict], dict]):
"""
Set the domain parameter buffer.
Depends on the way the buffer has been saved, see the `DomainRandomizer.get_params()` arguments.
:param buffer: list of dicts, each describing a domain ,or just one dict for one domain
"""
if not (isinstance(buffer, list) or isinstance(buffer, dict)):
raise pyrado.TypeErr(given=buffer, expected_type=[list, dict])
self._buffer = buffer
def | (self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is requested
if isinstance(self._buffer, dict):
# The buffer consists of one domain parameter set
domain_param = self._buffer
elif isinstance(self._buffer, list):
# The buffer consists of a list of domain parameter sets
domain_param = self._buffer[self._ring_idx] # first selection will be index 0
if self._selection == "cyclic":
self._ring_idx = (self._ring_idx + 1) % len(self._buffer)
elif self._selection == "random":
self._ring_idx = randint(0, len(self._buffer) - 1)
else:
raise pyrado.TypeErr(given=self._buffer, expected_type=[dict, list])
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
def _get_state(self, state_dict: dict):
super()._get_state(state_dict)
state_dict["buffer"] = self._buffer
state_dict["ring_idx"] = self._ring_idx
def _set_state(self, state_dict: dict, copying: bool = False):
super()._set_state(state_dict, copying)
self._buffer = state_dict["buffer"]
self._ring_idx = state_dict["ring_idx"]
def remove_all_dr_wrappers(env: Env, verbose: bool = False):
"""
Go through the environment chain and remove all wrappers of type `DomainRandWrapper` (and subclasses).
:param env: env chain with domain randomization wrappers
:param verbose: choose if status messages should be printed
:return: env chain without domain randomization wrappers
"""
while any(isinstance(subenv, DomainRandWrapper) for subenv in all_envs(env)):
if verbose:
with completion_context(
f"Found domain randomization wrapper of type {type(env).__name__}. Removing it now",
color="y",
bright=True,
):
env = remove_env(env, DomainRandWrapper)
else:
env = remove_env(env, DomainRandWrapper)
return env
| reset | identifier_name |
domain_randomization.py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from random import randint
from typing import List, Mapping, Optional, Tuple, Union
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.domain_randomization.domain_randomizer import DomainRandomizer
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import all_envs, inner_env, remove_env
from pyrado.environments.base import Env
from pyrado.environments.sim_base import SimEnv
from pyrado.utils.input_output import completion_context, print_cbt
class DomainRandWrapper(EnvWrapper, Serializable):
"""Base class for environment wrappers which call a `DomainRandomizer` to randomize the domain parameters"""
def __init__(self, wrapped_env: Union[SimEnv, EnvWrapper], randomizer: Optional[DomainRandomizer]):
"""
Constructor
:param wrapped_env: environment to wrap
:param randomizer: `DomainRandomizer` object holding the probability distribution of all randomizable
domain parameters, pass `None` if you want to subclass wrapping another `DomainRandWrapper`
and use its randomizer
"""
if not isinstance(inner_env(wrapped_env), SimEnv):
raise pyrado.TypeErr(given=wrapped_env, expected_type=SimEnv)
if not isinstance(randomizer, DomainRandomizer) and randomizer is not None:
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
Serializable._init(self, locals())
# Invoke EnvWrapper's constructor
super().__init__(wrapped_env)
self._randomizer = randomizer
@property
def randomizer(self) -> DomainRandomizer:
return self._randomizer
@randomizer.setter
def randomizer(self, randomizer: DomainRandomizer):
if not isinstance(randomizer, DomainRandomizer):
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
self._randomizer = randomizer
class MetaDomainRandWrapper(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which wraps another `DomainRandWrapper` to adapt its parameters,
called domain distribution parameters.
"""
def __init__(self, wrapped_rand_env: DomainRandWrapper, dp_mapping: Mapping[int, Tuple[str, str]]):
"""
Constructor
:param wrapped_rand_env: randomized environment to wrap
:param dp_mapping: mapping from index of the numpy array (coming from the algorithm) to domain parameter name
(e.g. mass, length) and the domain distribution parameter (e.g. mean, std)
.. code-block:: python
# For the mapping arg use the this dict constructor
```
m = {0: ('name1', 'parameter_type1'), 1: ('name2', 'parameter_type2')}
```
"""
if not isinstance(wrapped_rand_env, DomainRandWrapper):
raise pyrado.TypeErr(given=wrapped_rand_env, expected_type=DomainRandWrapper)
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_rand_env, None)
self.dp_mapping = dp_mapping
@property
def randomizer(self) -> DomainRandomizer:
# Forward to the wrapped DomainRandWrapper
return self._wrapped_env.randomizer
@randomizer.setter
def randomizer(self, dr: DomainRandomizer):
# Forward to the wrapped DomainRandWrapper
self._wrapped_env.randomizer = dr
def adapt_randomizer(self, domain_distr_param_values: np.ndarray):
# Check the input dimension and reshape if necessary
if domain_distr_param_values.ndim == 1:
pass
elif domain_distr_param_values.ndim == 2:
domain_distr_param_values = domain_distr_param_values.ravel()
else:
raise pyrado.ShapeErr(given=domain_distr_param_values, expected_match=(1,))
# Reconfigure the wrapped environment's DomainRandomizer
for i, value in enumerate(domain_distr_param_values):
dp_name, ddp_name = self.dp_mapping.get(i)
self._wrapped_env.randomizer.adapt_one_distr_param(dp_name, ddp_name, value)
class DomainRandWrapperLive(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env at every reset.
Thus every rollout is done with different domain parameters.
"""
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is called to draw a parameter dict
self._randomizer.randomize(num_samples=1)
domain_param = self._randomizer.get_params(fmt="dict", dtype="numpy")
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
class DomainRandWrapperBuffer(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env using a buffer of domain parameter sets.
At every call of the reset method this wrapper cycles through that buffer.
"""
def __init__(self, wrapped_env, randomizer: Optional[DomainRandomizer], selection: Optional[str] = "cyclic"):
"""
Constructor
:param wrapped_env: environment to wrap around
:param randomizer: `DomainRandomizer` object that manages the randomization. If `None`, the user has to set the
buffer manually, the circular reset however works the same way
:param selection: method to draw samples from the buffer, either cyclic or random
"""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_env, randomizer)
self._ring_idx = None
self._buffer = None
self.selection = selection
@property
def ring_idx(self) -> int:
"""Get the buffer's index."""
return self._ring_idx
@ring_idx.setter
def ring_idx(self, idx: int):
"""Set the buffer's index."""
if not (isinstance(idx, int) or not 0 <= idx < len(self._buffer)):
raise pyrado.ValueErr(given=idx, ge_constraint="0 (int)", l_constraint=len(self._buffer))
self._ring_idx = idx
@property
def selection(self) -> str:
"""Get the selection method."""
return self._selection
@selection.setter
def selection(self, selection: str):
|
def fill_buffer(self, num_domains: int):
"""
Fill the internal buffer with domains.
:param num_domains: number of randomized domain parameter sets to store in the buffer
"""
if self._randomizer is None:
raise pyrado.TypeErr(msg="The randomizer must not be None to call fill_buffer()!")
if not isinstance(num_domains, int) or num_domains < 0:
raise pyrado.ValueErr(given=num_domains, g_constraint="0 (int)")
self._randomizer.randomize(num_domains)
self._buffer = self._randomizer.get_params(-1, fmt="list", dtype="numpy")
self._ring_idx = 0
@property
def buffer(self):
"""Get the domain parameter buffer."""
return self._buffer
@buffer.setter
def buffer(self, buffer: Union[List[dict], dict]):
"""
Set the domain parameter buffer.
Depends on the way the buffer has been saved, see the `DomainRandomizer.get_params()` arguments.
:param buffer: list of dicts, each describing a domain ,or just one dict for one domain
"""
if not (isinstance(buffer, list) or isinstance(buffer, dict)):
raise pyrado.TypeErr(given=buffer, expected_type=[list, dict])
self._buffer = buffer
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is requested
if isinstance(self._buffer, dict):
# The buffer consists of one domain parameter set
domain_param = self._buffer
elif isinstance(self._buffer, list):
# The buffer consists of a list of domain parameter sets
domain_param = self._buffer[self._ring_idx] # first selection will be index 0
if self._selection == "cyclic":
self._ring_idx = (self._ring_idx + 1) % len(self._buffer)
elif self._selection == "random":
self._ring_idx = randint(0, len(self._buffer) - 1)
else:
raise pyrado.TypeErr(given=self._buffer, expected_type=[dict, list])
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
def _get_state(self, state_dict: dict):
super()._get_state(state_dict)
state_dict["buffer"] = self._buffer
state_dict["ring_idx"] = self._ring_idx
def _set_state(self, state_dict: dict, copying: bool = False):
super()._set_state(state_dict, copying)
self._buffer = state_dict["buffer"]
self._ring_idx = state_dict["ring_idx"]
def remove_all_dr_wrappers(env: Env, verbose: bool = False):
"""
Go through the environment chain and remove all wrappers of type `DomainRandWrapper` (and subclasses).
:param env: env chain with domain randomization wrappers
:param verbose: choose if status messages should be printed
:return: env chain without domain randomization wrappers
"""
while any(isinstance(subenv, DomainRandWrapper) for subenv in all_envs(env)):
if verbose:
with completion_context(
f"Found domain randomization wrapper of type {type(env).__name__}. Removing it now",
color="y",
bright=True,
):
env = remove_env(env, DomainRandWrapper)
else:
env = remove_env(env, DomainRandWrapper)
return env
| """Set the selection method."""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
self._selection = selection | identifier_body |
borrow_set.rs | use crate::borrow_check::place_ext::PlaceExt;
use crate::borrow_check::nll::ToRegionVid;
use crate::borrow_check::path_utils::allow_two_phase_borrow;
use crate::dataflow::indexes::BorrowIndex;
use crate::dataflow::move_paths::MoveData;
use rustc::mir::traversal;
use rustc::mir::visit::{PlaceContext, Visitor, NonUseContext, MutatingUseContext};
use rustc::mir::{self, Location, Body, Local};
use rustc::ty::{RegionVid, TyCtxt};
use rustc::util::nodemap::{FxHashMap, FxHashSet};
use rustc_index::vec::IndexVec;
use rustc_index::bit_set::BitSet;
use std::fmt;
use std::ops::Index;
crate struct BorrowSet<'tcx> {
/// The fundamental map relating bitvector indexes to the borrows
/// in the MIR.
crate borrows: IndexVec<BorrowIndex, BorrowData<'tcx>>,
/// Each borrow is also uniquely identified in the MIR by the
/// `Location` of the assignment statement in which it appears on
/// the right hand side; we map each such location to the
/// corresponding `BorrowIndex`.
crate location_map: FxHashMap<Location, BorrowIndex>,
/// Locations which activate borrows.
/// NOTE: a given location may activate more than one borrow in the future
/// when more general two-phase borrow support is introduced, but for now we
/// only need to store one borrow index.
crate activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
/// Map from local to all the borrows on that local.
crate local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
crate locals_state_at_exit: LocalsStateAtExit,
}
impl<'tcx> Index<BorrowIndex> for BorrowSet<'tcx> {
type Output = BorrowData<'tcx>;
fn index(&self, index: BorrowIndex) -> &BorrowData<'tcx> {
&self.borrows[index]
}
}
/// Location where a two-phase borrow is activated, if a borrow
/// is in fact a two-phase borrow.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
crate enum TwoPhaseActivation {
NotTwoPhase,
NotActivated,
ActivatedAt(Location),
}
#[derive(Debug, Clone)]
crate struct BorrowData<'tcx> {
/// Location where the borrow reservation starts.
/// In many cases, this will be equal to the activation location but not always.
crate reserve_location: Location,
/// Location where the borrow is activated.
crate activation_location: TwoPhaseActivation,
/// What kind of borrow this is
crate kind: mir::BorrowKind,
/// The region for which this borrow is live
crate region: RegionVid,
/// Place from which we are borrowing
crate borrowed_place: mir::Place<'tcx>,
/// Place to which the borrow was stored
crate assigned_place: mir::Place<'tcx>,
}
impl<'tcx> fmt::Display for BorrowData<'tcx> {
fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
let kind = match self.kind {
mir::BorrowKind::Shared => "",
mir::BorrowKind::Shallow => "shallow ",
mir::BorrowKind::Unique => "uniq ",
mir::BorrowKind::Mut { .. } => "mut ",
};
write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place)
}
}
crate enum LocalsStateAtExit {
AllAreInvalidated,
SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> }
}
impl LocalsStateAtExit {
fn | (
locals_are_invalidated_at_exit: bool,
body: &Body<'tcx>,
move_data: &MoveData<'tcx>
) -> Self {
struct HasStorageDead(BitSet<Local>);
impl<'tcx> Visitor<'tcx> for HasStorageDead {
fn visit_local(&mut self, local: &Local, ctx: PlaceContext, _: Location) {
if ctx == PlaceContext::NonUse(NonUseContext::StorageDead) {
self.0.insert(*local);
}
}
}
if locals_are_invalidated_at_exit {
LocalsStateAtExit::AllAreInvalidated
} else {
let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len()));
has_storage_dead.visit_body(body);
let mut has_storage_dead_or_moved = has_storage_dead.0;
for move_out in &move_data.moves {
if let Some(index) = move_data.base_local(move_out.path) {
has_storage_dead_or_moved.insert(index);
}
}
LocalsStateAtExit::SomeAreInvalidated{ has_storage_dead_or_moved }
}
}
}
impl<'tcx> BorrowSet<'tcx> {
pub fn build(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
locals_are_invalidated_at_exit: bool,
move_data: &MoveData<'tcx>,
) -> Self {
let mut visitor = GatherBorrows {
tcx,
body,
idx_vec: IndexVec::new(),
location_map: Default::default(),
activation_map: Default::default(),
local_map: Default::default(),
pending_activations: Default::default(),
locals_state_at_exit:
LocalsStateAtExit::build(locals_are_invalidated_at_exit, body, move_data),
};
for (block, block_data) in traversal::preorder(body) {
visitor.visit_basic_block_data(block, block_data);
}
BorrowSet {
borrows: visitor.idx_vec,
location_map: visitor.location_map,
activation_map: visitor.activation_map,
local_map: visitor.local_map,
locals_state_at_exit: visitor.locals_state_at_exit,
}
}
crate fn activations_at_location(&self, location: Location) -> &[BorrowIndex] {
self.activation_map
.get(&location)
.map(|activations| &activations[..])
.unwrap_or(&[])
}
}
struct GatherBorrows<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
idx_vec: IndexVec<BorrowIndex, BorrowData<'tcx>>,
location_map: FxHashMap<Location, BorrowIndex>,
activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
/// When we encounter a 2-phase borrow statement, it will always
/// be assigning into a temporary TEMP:
///
/// TEMP = &foo
///
/// We add TEMP into this map with `b`, where `b` is the index of
/// the borrow. When we find a later use of this activation, we
/// remove from the map (and add to the "tombstone" set below).
pending_activations: FxHashMap<mir::Local, BorrowIndex>,
locals_state_at_exit: LocalsStateAtExit,
}
impl<'a, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'tcx> {
fn visit_assign(
&mut self,
assigned_place: &mir::Place<'tcx>,
rvalue: &mir::Rvalue<'tcx>,
location: mir::Location,
) {
if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue {
if borrowed_place.ignore_borrow(
self.tcx, self.body, &self.locals_state_at_exit) {
return;
}
let region = region.to_region_vid();
let borrow = BorrowData {
kind,
region,
reserve_location: location,
activation_location: TwoPhaseActivation::NotTwoPhase,
borrowed_place: borrowed_place.clone(),
assigned_place: assigned_place.clone(),
};
let idx = self.idx_vec.push(borrow);
self.location_map.insert(location, idx);
self.insert_as_pending_if_two_phase(location, &assigned_place, kind, idx);
if let mir::PlaceBase::Local(local) = borrowed_place.base {
self.local_map.entry(local).or_default().insert(idx);
}
}
self.super_assign(assigned_place, rvalue, location)
}
fn visit_local(
&mut self,
temp: &Local,
context: PlaceContext,
location: Location,
) {
if !context.is_use() {
return;
}
// We found a use of some temporary TMP
// check whether we (earlier) saw a 2-phase borrow like
//
// TMP = &mut place
if let Some(&borrow_index) = self.pending_activations.get(temp) {
let borrow_data = &mut self.idx_vec[borrow_index];
// Watch out: the use of TMP in the borrow itself
// doesn't count as an activation. =)
if borrow_data.reserve_location == location &&
context == PlaceContext::MutatingUse(MutatingUseContext::Store)
{
return;
}
if let TwoPhaseActivation::ActivatedAt(other_location) =
borrow_data.activation_location {
span_bug!(
self.body.source_info(location).span,
"found two uses for 2-phase borrow temporary {:?}: \
{:?} and {:?}",
temp,
location,
other_location,
);
}
// Otherwise, this is the unique later use that we expect.
// Double check: This borrow is indeed a two-phase borrow (that is,
// we are 'transitioning' from `NotActivated` to `ActivatedAt`) and
// we've not found any other activations (checked above).
assert_eq!(
borrow_data.activation_location,
TwoPhaseActivation::NotActivated,
"never found an activation for this borrow!",
);
self.activation_map
.entry(location)
.or_default()
.push(borrow_index);
borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location);
}
}
fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: mir::Location) {
if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue {
// double-check that we already registered a BorrowData for this
let borrow_index = self.location_map[&location];
let borrow_data = &self.idx_vec[borrow_index];
assert_eq!(borrow_data.reserve_location, location);
assert_eq!(borrow_data.kind, kind);
assert_eq!(borrow_data.region, region.to_region_vid());
assert_eq!(borrow_data.borrowed_place, *place);
}
return self.super_rvalue(rvalue, location);
}
}
impl<'a, 'tcx> GatherBorrows<'a, 'tcx> {
/// If this is a two-phase borrow, then we will record it
/// as "pending" until we find the activating use.
fn insert_as_pending_if_two_phase(
&mut self,
start_location: Location,
assigned_place: &mir::Place<'tcx>,
kind: mir::BorrowKind,
borrow_index: BorrowIndex,
) {
debug!(
"Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})",
start_location, assigned_place, borrow_index,
);
if !allow_two_phase_borrow(kind) {
debug!(" -> {:?}", start_location);
return;
}
// When we encounter a 2-phase borrow statement, it will always
// be assigning into a temporary TEMP:
//
// TEMP = &foo
//
// so extract `temp`.
let temp = if let &mir::Place {
base: mir::PlaceBase::Local(temp),
projection: box [],
} = assigned_place {
temp
} else {
span_bug!(
self.body.source_info(start_location).span,
"expected 2-phase borrow to assign to a local, not `{:?}`",
assigned_place,
);
};
// Consider the borrow not activated to start. When we find an activation, we'll update
// this field.
{
let borrow_data = &mut self.idx_vec[borrow_index];
borrow_data.activation_location = TwoPhaseActivation::NotActivated;
}
// Insert `temp` into the list of pending activations. From
// now on, we'll be on the lookout for a use of it. Note that
// we are guaranteed that this use will come after the
// assignment.
let old_value = self.pending_activations.insert(temp, borrow_index);
if let Some(old_index) = old_value {
span_bug!(self.body.source_info(start_location).span,
"found already pending activation for temp: {:?} \
at borrow_index: {:?} with associated data {:?}",
temp, old_index, self.idx_vec[old_index]);
}
}
}
| build | identifier_name |
borrow_set.rs | use crate::borrow_check::place_ext::PlaceExt;
use crate::borrow_check::nll::ToRegionVid;
use crate::borrow_check::path_utils::allow_two_phase_borrow;
use crate::dataflow::indexes::BorrowIndex;
use crate::dataflow::move_paths::MoveData;
use rustc::mir::traversal;
use rustc::mir::visit::{PlaceContext, Visitor, NonUseContext, MutatingUseContext};
use rustc::mir::{self, Location, Body, Local};
use rustc::ty::{RegionVid, TyCtxt};
use rustc::util::nodemap::{FxHashMap, FxHashSet};
use rustc_index::vec::IndexVec;
use rustc_index::bit_set::BitSet;
use std::fmt;
use std::ops::Index;
crate struct BorrowSet<'tcx> {
/// The fundamental map relating bitvector indexes to the borrows
/// in the MIR.
crate borrows: IndexVec<BorrowIndex, BorrowData<'tcx>>,
/// Each borrow is also uniquely identified in the MIR by the
/// `Location` of the assignment statement in which it appears on
/// the right hand side; we map each such location to the
/// corresponding `BorrowIndex`.
crate location_map: FxHashMap<Location, BorrowIndex>,
/// Locations which activate borrows.
/// NOTE: a given location may activate more than one borrow in the future
/// when more general two-phase borrow support is introduced, but for now we
/// only need to store one borrow index.
crate activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
/// Map from local to all the borrows on that local.
crate local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
crate locals_state_at_exit: LocalsStateAtExit,
}
impl<'tcx> Index<BorrowIndex> for BorrowSet<'tcx> {
type Output = BorrowData<'tcx>;
fn index(&self, index: BorrowIndex) -> &BorrowData<'tcx> {
&self.borrows[index]
}
}
/// Location where a two-phase borrow is activated, if a borrow
/// is in fact a two-phase borrow.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
crate enum TwoPhaseActivation {
NotTwoPhase,
NotActivated,
ActivatedAt(Location),
}
#[derive(Debug, Clone)]
crate struct BorrowData<'tcx> {
/// Location where the borrow reservation starts.
/// In many cases, this will be equal to the activation location but not always.
crate reserve_location: Location,
/// Location where the borrow is activated.
crate activation_location: TwoPhaseActivation,
/// What kind of borrow this is
crate kind: mir::BorrowKind,
/// The region for which this borrow is live
crate region: RegionVid,
/// Place from which we are borrowing
crate borrowed_place: mir::Place<'tcx>,
/// Place to which the borrow was stored
crate assigned_place: mir::Place<'tcx>,
}
impl<'tcx> fmt::Display for BorrowData<'tcx> {
fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
let kind = match self.kind {
mir::BorrowKind::Shared => "",
mir::BorrowKind::Shallow => "shallow ",
mir::BorrowKind::Unique => "uniq ",
mir::BorrowKind::Mut { .. } => "mut ",
};
write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place)
}
}
crate enum LocalsStateAtExit {
AllAreInvalidated,
SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> }
}
impl LocalsStateAtExit {
fn build(
locals_are_invalidated_at_exit: bool,
body: &Body<'tcx>,
move_data: &MoveData<'tcx>
) -> Self {
struct HasStorageDead(BitSet<Local>);
impl<'tcx> Visitor<'tcx> for HasStorageDead {
fn visit_local(&mut self, local: &Local, ctx: PlaceContext, _: Location) {
if ctx == PlaceContext::NonUse(NonUseContext::StorageDead) |
}
}
if locals_are_invalidated_at_exit {
LocalsStateAtExit::AllAreInvalidated
} else {
let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len()));
has_storage_dead.visit_body(body);
let mut has_storage_dead_or_moved = has_storage_dead.0;
for move_out in &move_data.moves {
if let Some(index) = move_data.base_local(move_out.path) {
has_storage_dead_or_moved.insert(index);
}
}
LocalsStateAtExit::SomeAreInvalidated{ has_storage_dead_or_moved }
}
}
}
impl<'tcx> BorrowSet<'tcx> {
pub fn build(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
locals_are_invalidated_at_exit: bool,
move_data: &MoveData<'tcx>,
) -> Self {
let mut visitor = GatherBorrows {
tcx,
body,
idx_vec: IndexVec::new(),
location_map: Default::default(),
activation_map: Default::default(),
local_map: Default::default(),
pending_activations: Default::default(),
locals_state_at_exit:
LocalsStateAtExit::build(locals_are_invalidated_at_exit, body, move_data),
};
for (block, block_data) in traversal::preorder(body) {
visitor.visit_basic_block_data(block, block_data);
}
BorrowSet {
borrows: visitor.idx_vec,
location_map: visitor.location_map,
activation_map: visitor.activation_map,
local_map: visitor.local_map,
locals_state_at_exit: visitor.locals_state_at_exit,
}
}
crate fn activations_at_location(&self, location: Location) -> &[BorrowIndex] {
self.activation_map
.get(&location)
.map(|activations| &activations[..])
.unwrap_or(&[])
}
}
struct GatherBorrows<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
idx_vec: IndexVec<BorrowIndex, BorrowData<'tcx>>,
location_map: FxHashMap<Location, BorrowIndex>,
activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
/// When we encounter a 2-phase borrow statement, it will always
/// be assigning into a temporary TEMP:
///
/// TEMP = &foo
///
/// We add TEMP into this map with `b`, where `b` is the index of
/// the borrow. When we find a later use of this activation, we
/// remove from the map (and add to the "tombstone" set below).
pending_activations: FxHashMap<mir::Local, BorrowIndex>,
locals_state_at_exit: LocalsStateAtExit,
}
impl<'a, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'tcx> {
fn visit_assign(
&mut self,
assigned_place: &mir::Place<'tcx>,
rvalue: &mir::Rvalue<'tcx>,
location: mir::Location,
) {
if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue {
if borrowed_place.ignore_borrow(
self.tcx, self.body, &self.locals_state_at_exit) {
return;
}
let region = region.to_region_vid();
let borrow = BorrowData {
kind,
region,
reserve_location: location,
activation_location: TwoPhaseActivation::NotTwoPhase,
borrowed_place: borrowed_place.clone(),
assigned_place: assigned_place.clone(),
};
let idx = self.idx_vec.push(borrow);
self.location_map.insert(location, idx);
self.insert_as_pending_if_two_phase(location, &assigned_place, kind, idx);
if let mir::PlaceBase::Local(local) = borrowed_place.base {
self.local_map.entry(local).or_default().insert(idx);
}
}
self.super_assign(assigned_place, rvalue, location)
}
fn visit_local(
&mut self,
temp: &Local,
context: PlaceContext,
location: Location,
) {
if !context.is_use() {
return;
}
// We found a use of some temporary TMP
// check whether we (earlier) saw a 2-phase borrow like
//
// TMP = &mut place
if let Some(&borrow_index) = self.pending_activations.get(temp) {
let borrow_data = &mut self.idx_vec[borrow_index];
// Watch out: the use of TMP in the borrow itself
// doesn't count as an activation. =)
if borrow_data.reserve_location == location &&
context == PlaceContext::MutatingUse(MutatingUseContext::Store)
{
return;
}
if let TwoPhaseActivation::ActivatedAt(other_location) =
borrow_data.activation_location {
span_bug!(
self.body.source_info(location).span,
"found two uses for 2-phase borrow temporary {:?}: \
{:?} and {:?}",
temp,
location,
other_location,
);
}
// Otherwise, this is the unique later use that we expect.
// Double check: This borrow is indeed a two-phase borrow (that is,
// we are 'transitioning' from `NotActivated` to `ActivatedAt`) and
// we've not found any other activations (checked above).
assert_eq!(
borrow_data.activation_location,
TwoPhaseActivation::NotActivated,
"never found an activation for this borrow!",
);
self.activation_map
.entry(location)
.or_default()
.push(borrow_index);
borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location);
}
}
fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: mir::Location) {
if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue {
// double-check that we already registered a BorrowData for this
let borrow_index = self.location_map[&location];
let borrow_data = &self.idx_vec[borrow_index];
assert_eq!(borrow_data.reserve_location, location);
assert_eq!(borrow_data.kind, kind);
assert_eq!(borrow_data.region, region.to_region_vid());
assert_eq!(borrow_data.borrowed_place, *place);
}
return self.super_rvalue(rvalue, location);
}
}
impl<'a, 'tcx> GatherBorrows<'a, 'tcx> {
/// If this is a two-phase borrow, then we will record it
/// as "pending" until we find the activating use.
fn insert_as_pending_if_two_phase(
&mut self,
start_location: Location,
assigned_place: &mir::Place<'tcx>,
kind: mir::BorrowKind,
borrow_index: BorrowIndex,
) {
debug!(
"Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})",
start_location, assigned_place, borrow_index,
);
if !allow_two_phase_borrow(kind) {
debug!(" -> {:?}", start_location);
return;
}
// When we encounter a 2-phase borrow statement, it will always
// be assigning into a temporary TEMP:
//
// TEMP = &foo
//
// so extract `temp`.
let temp = if let &mir::Place {
base: mir::PlaceBase::Local(temp),
projection: box [],
} = assigned_place {
temp
} else {
span_bug!(
self.body.source_info(start_location).span,
"expected 2-phase borrow to assign to a local, not `{:?}`",
assigned_place,
);
};
// Consider the borrow not activated to start. When we find an activation, we'll update
// this field.
{
let borrow_data = &mut self.idx_vec[borrow_index];
borrow_data.activation_location = TwoPhaseActivation::NotActivated;
}
// Insert `temp` into the list of pending activations. From
// now on, we'll be on the lookout for a use of it. Note that
// we are guaranteed that this use will come after the
// assignment.
let old_value = self.pending_activations.insert(temp, borrow_index);
if let Some(old_index) = old_value {
span_bug!(self.body.source_info(start_location).span,
"found already pending activation for temp: {:?} \
at borrow_index: {:?} with associated data {:?}",
temp, old_index, self.idx_vec[old_index]);
}
}
}
| {
self.0.insert(*local);
} | conditional_block |
borrow_set.rs | use crate::borrow_check::place_ext::PlaceExt;
use crate::borrow_check::nll::ToRegionVid;
use crate::borrow_check::path_utils::allow_two_phase_borrow;
use crate::dataflow::indexes::BorrowIndex;
use crate::dataflow::move_paths::MoveData;
use rustc::mir::traversal;
use rustc::mir::visit::{PlaceContext, Visitor, NonUseContext, MutatingUseContext};
use rustc::mir::{self, Location, Body, Local};
use rustc::ty::{RegionVid, TyCtxt};
use rustc::util::nodemap::{FxHashMap, FxHashSet};
use rustc_index::vec::IndexVec;
use rustc_index::bit_set::BitSet;
use std::fmt;
use std::ops::Index;
crate struct BorrowSet<'tcx> {
/// The fundamental map relating bitvector indexes to the borrows
/// in the MIR.
crate borrows: IndexVec<BorrowIndex, BorrowData<'tcx>>,
/// Each borrow is also uniquely identified in the MIR by the
/// `Location` of the assignment statement in which it appears on
/// the right hand side; we map each such location to the
/// corresponding `BorrowIndex`.
crate location_map: FxHashMap<Location, BorrowIndex>,
/// Locations which activate borrows.
/// NOTE: a given location may activate more than one borrow in the future
/// when more general two-phase borrow support is introduced, but for now we
/// only need to store one borrow index.
crate activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
/// Map from local to all the borrows on that local.
crate local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
crate locals_state_at_exit: LocalsStateAtExit,
}
impl<'tcx> Index<BorrowIndex> for BorrowSet<'tcx> {
type Output = BorrowData<'tcx>;
fn index(&self, index: BorrowIndex) -> &BorrowData<'tcx> {
&self.borrows[index]
}
}
/// Location where a two-phase borrow is activated, if a borrow
/// is in fact a two-phase borrow.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
crate enum TwoPhaseActivation {
NotTwoPhase,
NotActivated,
ActivatedAt(Location),
}
#[derive(Debug, Clone)]
crate struct BorrowData<'tcx> {
/// Location where the borrow reservation starts.
/// In many cases, this will be equal to the activation location but not always.
crate reserve_location: Location,
/// Location where the borrow is activated.
crate activation_location: TwoPhaseActivation,
/// What kind of borrow this is
crate kind: mir::BorrowKind,
/// The region for which this borrow is live
crate region: RegionVid,
/// Place from which we are borrowing
crate borrowed_place: mir::Place<'tcx>,
/// Place to which the borrow was stored
crate assigned_place: mir::Place<'tcx>,
}
impl<'tcx> fmt::Display for BorrowData<'tcx> {
fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
let kind = match self.kind {
mir::BorrowKind::Shared => "",
mir::BorrowKind::Shallow => "shallow ",
mir::BorrowKind::Unique => "uniq ",
mir::BorrowKind::Mut { .. } => "mut ",
};
write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place)
}
}
crate enum LocalsStateAtExit {
AllAreInvalidated,
SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> }
}
impl LocalsStateAtExit {
fn build(
locals_are_invalidated_at_exit: bool,
body: &Body<'tcx>,
move_data: &MoveData<'tcx>
) -> Self {
struct HasStorageDead(BitSet<Local>);
impl<'tcx> Visitor<'tcx> for HasStorageDead {
fn visit_local(&mut self, local: &Local, ctx: PlaceContext, _: Location) {
if ctx == PlaceContext::NonUse(NonUseContext::StorageDead) {
self.0.insert(*local);
}
}
}
if locals_are_invalidated_at_exit {
LocalsStateAtExit::AllAreInvalidated
} else {
let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len()));
has_storage_dead.visit_body(body);
let mut has_storage_dead_or_moved = has_storage_dead.0;
for move_out in &move_data.moves {
if let Some(index) = move_data.base_local(move_out.path) {
has_storage_dead_or_moved.insert(index);
}
} | }
}
impl<'tcx> BorrowSet<'tcx> {
pub fn build(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
locals_are_invalidated_at_exit: bool,
move_data: &MoveData<'tcx>,
) -> Self {
let mut visitor = GatherBorrows {
tcx,
body,
idx_vec: IndexVec::new(),
location_map: Default::default(),
activation_map: Default::default(),
local_map: Default::default(),
pending_activations: Default::default(),
locals_state_at_exit:
LocalsStateAtExit::build(locals_are_invalidated_at_exit, body, move_data),
};
for (block, block_data) in traversal::preorder(body) {
visitor.visit_basic_block_data(block, block_data);
}
BorrowSet {
borrows: visitor.idx_vec,
location_map: visitor.location_map,
activation_map: visitor.activation_map,
local_map: visitor.local_map,
locals_state_at_exit: visitor.locals_state_at_exit,
}
}
crate fn activations_at_location(&self, location: Location) -> &[BorrowIndex] {
self.activation_map
.get(&location)
.map(|activations| &activations[..])
.unwrap_or(&[])
}
}
struct GatherBorrows<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
idx_vec: IndexVec<BorrowIndex, BorrowData<'tcx>>,
location_map: FxHashMap<Location, BorrowIndex>,
activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
/// When we encounter a 2-phase borrow statement, it will always
/// be assigning into a temporary TEMP:
///
/// TEMP = &foo
///
/// We add TEMP into this map with `b`, where `b` is the index of
/// the borrow. When we find a later use of this activation, we
/// remove from the map (and add to the "tombstone" set below).
pending_activations: FxHashMap<mir::Local, BorrowIndex>,
locals_state_at_exit: LocalsStateAtExit,
}
impl<'a, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'tcx> {
fn visit_assign(
&mut self,
assigned_place: &mir::Place<'tcx>,
rvalue: &mir::Rvalue<'tcx>,
location: mir::Location,
) {
if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue {
if borrowed_place.ignore_borrow(
self.tcx, self.body, &self.locals_state_at_exit) {
return;
}
let region = region.to_region_vid();
let borrow = BorrowData {
kind,
region,
reserve_location: location,
activation_location: TwoPhaseActivation::NotTwoPhase,
borrowed_place: borrowed_place.clone(),
assigned_place: assigned_place.clone(),
};
let idx = self.idx_vec.push(borrow);
self.location_map.insert(location, idx);
self.insert_as_pending_if_two_phase(location, &assigned_place, kind, idx);
if let mir::PlaceBase::Local(local) = borrowed_place.base {
self.local_map.entry(local).or_default().insert(idx);
}
}
self.super_assign(assigned_place, rvalue, location)
}
fn visit_local(
&mut self,
temp: &Local,
context: PlaceContext,
location: Location,
) {
if !context.is_use() {
return;
}
// We found a use of some temporary TMP
// check whether we (earlier) saw a 2-phase borrow like
//
// TMP = &mut place
if let Some(&borrow_index) = self.pending_activations.get(temp) {
let borrow_data = &mut self.idx_vec[borrow_index];
// Watch out: the use of TMP in the borrow itself
// doesn't count as an activation. =)
if borrow_data.reserve_location == location &&
context == PlaceContext::MutatingUse(MutatingUseContext::Store)
{
return;
}
if let TwoPhaseActivation::ActivatedAt(other_location) =
borrow_data.activation_location {
span_bug!(
self.body.source_info(location).span,
"found two uses for 2-phase borrow temporary {:?}: \
{:?} and {:?}",
temp,
location,
other_location,
);
}
// Otherwise, this is the unique later use that we expect.
// Double check: This borrow is indeed a two-phase borrow (that is,
// we are 'transitioning' from `NotActivated` to `ActivatedAt`) and
// we've not found any other activations (checked above).
assert_eq!(
borrow_data.activation_location,
TwoPhaseActivation::NotActivated,
"never found an activation for this borrow!",
);
self.activation_map
.entry(location)
.or_default()
.push(borrow_index);
borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location);
}
}
fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: mir::Location) {
if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue {
// double-check that we already registered a BorrowData for this
let borrow_index = self.location_map[&location];
let borrow_data = &self.idx_vec[borrow_index];
assert_eq!(borrow_data.reserve_location, location);
assert_eq!(borrow_data.kind, kind);
assert_eq!(borrow_data.region, region.to_region_vid());
assert_eq!(borrow_data.borrowed_place, *place);
}
return self.super_rvalue(rvalue, location);
}
}
impl<'a, 'tcx> GatherBorrows<'a, 'tcx> {
/// If this is a two-phase borrow, then we will record it
/// as "pending" until we find the activating use.
fn insert_as_pending_if_two_phase(
&mut self,
start_location: Location,
assigned_place: &mir::Place<'tcx>,
kind: mir::BorrowKind,
borrow_index: BorrowIndex,
) {
debug!(
"Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})",
start_location, assigned_place, borrow_index,
);
if !allow_two_phase_borrow(kind) {
debug!(" -> {:?}", start_location);
return;
}
// When we encounter a 2-phase borrow statement, it will always
// be assigning into a temporary TEMP:
//
// TEMP = &foo
//
// so extract `temp`.
let temp = if let &mir::Place {
base: mir::PlaceBase::Local(temp),
projection: box [],
} = assigned_place {
temp
} else {
span_bug!(
self.body.source_info(start_location).span,
"expected 2-phase borrow to assign to a local, not `{:?}`",
assigned_place,
);
};
// Consider the borrow not activated to start. When we find an activation, we'll update
// this field.
{
let borrow_data = &mut self.idx_vec[borrow_index];
borrow_data.activation_location = TwoPhaseActivation::NotActivated;
}
// Insert `temp` into the list of pending activations. From
// now on, we'll be on the lookout for a use of it. Note that
// we are guaranteed that this use will come after the
// assignment.
let old_value = self.pending_activations.insert(temp, borrow_index);
if let Some(old_index) = old_value {
span_bug!(self.body.source_info(start_location).span,
"found already pending activation for temp: {:?} \
at borrow_index: {:?} with associated data {:?}",
temp, old_index, self.idx_vec[old_index]);
}
}
} | LocalsStateAtExit::SomeAreInvalidated{ has_storage_dead_or_moved }
} | random_line_split |
train_denoise_clouds.py | import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.functional as tf
import torch.utils.data
import time
from tqdm import tqdm
import model_denoise_clouds as model
import argparse
try:
import nvidia_smi
NVIDIA_SMI = True
except:
NVIDIA_SMI = False
import sys
import os
import pathlib
import zarr
class Dataset(torch.utils.data.Dataset):
"""
Dataset class that will provide data during training. Modify it accordingly
for your dataset. This one shows how to do augmenting during training for a
very simple training set
"""
def __init__(self, n_training):
"""
Args:
n_training (int): number of training examples including augmenting
"""
super(Dataset, self).__init__()
self.n_training = n_training
f_matrix = zarr.open('training_matrices.zarr', 'r')
self.matrix = f_matrix['matrix'][:]
self.eigenvals = f_matrix['largest_eval'][:]
n_samples_matrix, _, _ = self.matrix.shape
f_surface = zarr.open('training_surfaces_libnoise.zarr', 'r')
self.surface = 1.0 - f_surface['surface'][:]
n_samples_surface, _ = self.surface.shape
f_clouds = zarr.open('training_clouds.zarr', 'r')
self.clouds = f_clouds['clouds'][:]
n_samples_clouds, _ = self.clouds.shape
self.index_matrix = np.random.randint(low=0, high=n_samples_matrix, size=self.n_training)
self.index_surface = np.random.randint(low=0, high=n_samples_surface, size=self.n_training)
self.index_clouds = np.random.randint(low=0, high=n_samples_clouds, size=(5, self.n_training))
def __getitem__(self, index):
Phi = self.matrix[self.index_matrix[index], :, :].astype('float32')
rho = 0.4 / self.eigenvals[self.index_matrix[index]]
Phi_split = Phi.reshape((5, 24, 3072))
surface = np.random.uniform(low=0.2, high=1.0) * self.surface[self.index_surface[index], :]
clouds = np.random.uniform(low=0.2, high=1.0, size=5)[:, None] * self.clouds[self.index_clouds[:, index], :]
d_split = np.zeros((5, 24))
for i in range(5):
d_split[i, :] = Phi_split[i, :, :] @ (clouds[i, :] + (1.0 - clouds[i, :])**2 * surface)
return Phi_split, surface.astype('float32'), clouds.astype('float32'), rho.astype('float32'), d_split.astype('float32')
def __len__(self):
return self.n_training
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename+'.best')
class Training(object):
def __init__(self, batch_size, validation_split=0.2, gpu=0, smooth=0.05, K=3, model_class='conv1d'):
self.cuda = torch.cuda.is_available()
self.gpu = gpu
self.smooth = smooth
self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu")
# self.device = 'cpu'
self.batch_size = batch_size
self.model_class = model_class
self.K = K
if (NVIDIA_SMI):
nvidia_smi.nvmlInit()
self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(self.gpu)
print("Computing in {0} : {1}".format(self.device, nvidia_smi.nvmlDeviceGetName(self.handle)))
self.validation_split = validation_split
kwargs = {'num_workers': 4, 'pin_memory': False} if self.cuda else {}
if (model_class == 'conv1d'):
self.model = model.Network(K=self.K, L=32, device=self.device, model_class=model_class).to(self.device)
if (model_class == 'conv2d'):
self.model = model.Network(K=self.K, L=32, NSIDE=16, device=self.device, model_class=model_class).to(self.device)
print('N. total parameters : {0}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
self.train_dataset = Dataset(n_training=20000)
self.validation_dataset = Dataset(n_training=2000)
# Data loaders that will inject data during training
self.train_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
self.validation_loader = torch.utils.data.DataLoader(self.validation_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
def init_optimize(self, epochs, lr, weight_decay, scheduler):
self.lr = lr
self.weight_decay = weight_decay
print('Learning rate : {0}'.format(lr))
self.n_epochs = epochs
if (self.model_class == 'conv1d'):
root = 'trained_denoise_clouds_1d'
if (self.model_class == 'conv2d'):
root = 'trained_denoise_clouds_2d'
p = pathlib.Path(f'{root}/')
p.mkdir(parents=True, exist_ok=True)
current_time = time.strftime("%Y-%m-%d-%H:%M:%S")
self.out_name = f'{root}/{current_time}'
# Copy model
file = model.__file__.split('/')[-1]
shutil.copyfile(model.__file__, '{0}_model.py'.format(self.out_name))
shutil.copyfile('{0}/{1}'.format(os.path.dirname(os.path.abspath(__file__)), file), '{0}_trainer.py'.format(self.out_name))
self.file_mode = 'w'
f = open('{0}_call.dat'.format(self.out_name), 'w')
f.write('python ' + ' '.join(sys.argv))
f.close()
f = open('{0}_hyper.dat'.format(self.out_name), 'w')
f.write('Learning_rate Weight_decay \n')
f.write('{0} {1}'.format(self.lr, self.weight_decay)) | self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
self.loss_fn = nn.MSELoss().to(self.device)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=scheduler, gamma=0.5)
np.random.seed(123)
self.surf0 = torch.tensor(np.random.rand(self.batch_size, 3072).astype('float32')).to(self.device)
self.surf0 = torch.zeros((self.batch_size, 3072)).to(self.device)
self.clouds0 = torch.tensor(np.random.rand(self.batch_size, 5, 3072).astype('float32')).to(self.device)
self.clouds0 = torch.zeros((self.batch_size, 5, 3072)).to(self.device)
torch.backends.cudnn.benchmark = True
def optimize(self):
self.loss = []
self.loss_val = []
best_loss = 1e10
trainF = open('{0}.loss.csv'.format(self.out_name), self.file_mode)
print('Model : {0}'.format(self.out_name))
for epoch in range(1, self.n_epochs + 1):
self.train(epoch)
self.test(epoch)
self.scheduler.step()
trainF.write('{},{},{}\n'.format(
epoch, self.loss[-1], self.loss_val[-1]))
trainF.flush()
is_best = self.loss_val[-1] < best_loss
best_loss = min(self.loss_val[-1], best_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'best_loss': best_loss,
'optimizer': self.optimizer.state_dict(),
}, is_best, filename='{0}.pth'.format(self.out_name))
trainF.close()
def train(self, epoch):
self.model.train()
print("Epoch {0}/{1}".format(epoch, self.n_epochs))
t = tqdm(self.train_loader)
loss_avg = 0.0
n = 1
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
self.optimizer.zero_grad()
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
loss.backward()
self.optimizer.step()
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
if (NVIDIA_SMI):
tmp = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)
t.set_postfix(loss=loss_avg, lr=current_lr, gpu=tmp.gpu, mem=tmp.memory)
else:
t.set_postfix(loss=loss_avg, lr=current_lr)
self.loss.append(loss_avg)
def test(self, epoch):
self.model.eval()
t = tqdm(self.validation_loader)
n = 1
loss_avg = 0.0
with torch.no_grad():
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
t.set_postfix(loss=loss_avg)
self.loss_val.append(loss_avg)
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Train neural network')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float,
metavar='LR', help='Learning rate')
parser.add_argument('--wd', '--weigth-decay', default=0.0, type=float,
metavar='WD', help='Weigth decay')
parser.add_argument('--gpu', '--gpu', default=0, type=int,
metavar='GPU', help='GPU')
parser.add_argument('--smooth', '--smoothing-factor', default=0.05, type=float,
metavar='SM', help='Smoothing factor for loss')
parser.add_argument('--epochs', '--epochs', default=100, type=int,
metavar='EPOCHS', help='Number of epochs')
parser.add_argument('--scheduler', '--scheduler', default=100, type=int,
metavar='SCHEDULER', help='Number of epochs before applying scheduler')
parser.add_argument('--batch', '--batch', default=32, type=int,
metavar='BATCH', help='Batch size')
parser.add_argument('--model', '--model', default='conv1d', type=str,
metavar='MODEL', help='Model class')
parser.add_argument('--k', '--k', default=15, type=int,
metavar='K', help='K')
parsed = vars(parser.parse_args())
deepnet = Training(batch_size=parsed['batch'], gpu=parsed['gpu'], smooth=parsed['smooth'], K=parsed['k'], model_class=parsed['model'])
deepnet.init_optimize(parsed['epochs'], lr=parsed['lr'], weight_decay=parsed['wd'], scheduler=parsed['scheduler'])
deepnet.optimize() | f.close()
| random_line_split |
train_denoise_clouds.py | import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.functional as tf
import torch.utils.data
import time
from tqdm import tqdm
import model_denoise_clouds as model
import argparse
try:
import nvidia_smi
NVIDIA_SMI = True
except:
NVIDIA_SMI = False
import sys
import os
import pathlib
import zarr
class Dataset(torch.utils.data.Dataset):
|
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename+'.best')
class Training(object):
def __init__(self, batch_size, validation_split=0.2, gpu=0, smooth=0.05, K=3, model_class='conv1d'):
self.cuda = torch.cuda.is_available()
self.gpu = gpu
self.smooth = smooth
self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu")
# self.device = 'cpu'
self.batch_size = batch_size
self.model_class = model_class
self.K = K
if (NVIDIA_SMI):
nvidia_smi.nvmlInit()
self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(self.gpu)
print("Computing in {0} : {1}".format(self.device, nvidia_smi.nvmlDeviceGetName(self.handle)))
self.validation_split = validation_split
kwargs = {'num_workers': 4, 'pin_memory': False} if self.cuda else {}
if (model_class == 'conv1d'):
self.model = model.Network(K=self.K, L=32, device=self.device, model_class=model_class).to(self.device)
if (model_class == 'conv2d'):
self.model = model.Network(K=self.K, L=32, NSIDE=16, device=self.device, model_class=model_class).to(self.device)
print('N. total parameters : {0}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
self.train_dataset = Dataset(n_training=20000)
self.validation_dataset = Dataset(n_training=2000)
# Data loaders that will inject data during training
self.train_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
self.validation_loader = torch.utils.data.DataLoader(self.validation_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
def init_optimize(self, epochs, lr, weight_decay, scheduler):
self.lr = lr
self.weight_decay = weight_decay
print('Learning rate : {0}'.format(lr))
self.n_epochs = epochs
if (self.model_class == 'conv1d'):
root = 'trained_denoise_clouds_1d'
if (self.model_class == 'conv2d'):
root = 'trained_denoise_clouds_2d'
p = pathlib.Path(f'{root}/')
p.mkdir(parents=True, exist_ok=True)
current_time = time.strftime("%Y-%m-%d-%H:%M:%S")
self.out_name = f'{root}/{current_time}'
# Copy model
file = model.__file__.split('/')[-1]
shutil.copyfile(model.__file__, '{0}_model.py'.format(self.out_name))
shutil.copyfile('{0}/{1}'.format(os.path.dirname(os.path.abspath(__file__)), file), '{0}_trainer.py'.format(self.out_name))
self.file_mode = 'w'
f = open('{0}_call.dat'.format(self.out_name), 'w')
f.write('python ' + ' '.join(sys.argv))
f.close()
f = open('{0}_hyper.dat'.format(self.out_name), 'w')
f.write('Learning_rate Weight_decay \n')
f.write('{0} {1}'.format(self.lr, self.weight_decay))
f.close()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
self.loss_fn = nn.MSELoss().to(self.device)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=scheduler, gamma=0.5)
np.random.seed(123)
self.surf0 = torch.tensor(np.random.rand(self.batch_size, 3072).astype('float32')).to(self.device)
self.surf0 = torch.zeros((self.batch_size, 3072)).to(self.device)
self.clouds0 = torch.tensor(np.random.rand(self.batch_size, 5, 3072).astype('float32')).to(self.device)
self.clouds0 = torch.zeros((self.batch_size, 5, 3072)).to(self.device)
torch.backends.cudnn.benchmark = True
def optimize(self):
self.loss = []
self.loss_val = []
best_loss = 1e10
trainF = open('{0}.loss.csv'.format(self.out_name), self.file_mode)
print('Model : {0}'.format(self.out_name))
for epoch in range(1, self.n_epochs + 1):
self.train(epoch)
self.test(epoch)
self.scheduler.step()
trainF.write('{},{},{}\n'.format(
epoch, self.loss[-1], self.loss_val[-1]))
trainF.flush()
is_best = self.loss_val[-1] < best_loss
best_loss = min(self.loss_val[-1], best_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'best_loss': best_loss,
'optimizer': self.optimizer.state_dict(),
}, is_best, filename='{0}.pth'.format(self.out_name))
trainF.close()
def train(self, epoch):
self.model.train()
print("Epoch {0}/{1}".format(epoch, self.n_epochs))
t = tqdm(self.train_loader)
loss_avg = 0.0
n = 1
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
self.optimizer.zero_grad()
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
loss.backward()
self.optimizer.step()
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
if (NVIDIA_SMI):
tmp = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)
t.set_postfix(loss=loss_avg, lr=current_lr, gpu=tmp.gpu, mem=tmp.memory)
else:
t.set_postfix(loss=loss_avg, lr=current_lr)
self.loss.append(loss_avg)
def test(self, epoch):
self.model.eval()
t = tqdm(self.validation_loader)
n = 1
loss_avg = 0.0
with torch.no_grad():
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
t.set_postfix(loss=loss_avg)
self.loss_val.append(loss_avg)
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Train neural network')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float,
metavar='LR', help='Learning rate')
parser.add_argument('--wd', '--weigth-decay', default=0.0, type=float,
metavar='WD', help='Weigth decay')
parser.add_argument('--gpu', '--gpu', default=0, type=int,
metavar='GPU', help='GPU')
parser.add_argument('--smooth', '--smoothing-factor', default=0.05, type=float,
metavar='SM', help='Smoothing factor for loss')
parser.add_argument('--epochs', '--epochs', default=100, type=int,
metavar='EPOCHS', help='Number of epochs')
parser.add_argument('--scheduler', '--scheduler', default=100, type=int,
metavar='SCHEDULER', help='Number of epochs before applying scheduler')
parser.add_argument('--batch', '--batch', default=32, type=int,
metavar='BATCH', help='Batch size')
parser.add_argument('--model', '--model', default='conv1d', type=str,
metavar='MODEL', help='Model class')
parser.add_argument('--k', '--k', default=15, type=int,
metavar='K', help='K')
parsed = vars(parser.parse_args())
deepnet = Training(batch_size=parsed['batch'], gpu=parsed['gpu'], smooth=parsed['smooth'], K=parsed['k'], model_class=parsed['model'])
deepnet.init_optimize(parsed['epochs'], lr=parsed['lr'], weight_decay=parsed['wd'], scheduler=parsed['scheduler'])
deepnet.optimize() | """
Dataset class that will provide data during training. Modify it accordingly
for your dataset. This one shows how to do augmenting during training for a
very simple training set
"""
def __init__(self, n_training):
"""
Args:
n_training (int): number of training examples including augmenting
"""
super(Dataset, self).__init__()
self.n_training = n_training
f_matrix = zarr.open('training_matrices.zarr', 'r')
self.matrix = f_matrix['matrix'][:]
self.eigenvals = f_matrix['largest_eval'][:]
n_samples_matrix, _, _ = self.matrix.shape
f_surface = zarr.open('training_surfaces_libnoise.zarr', 'r')
self.surface = 1.0 - f_surface['surface'][:]
n_samples_surface, _ = self.surface.shape
f_clouds = zarr.open('training_clouds.zarr', 'r')
self.clouds = f_clouds['clouds'][:]
n_samples_clouds, _ = self.clouds.shape
self.index_matrix = np.random.randint(low=0, high=n_samples_matrix, size=self.n_training)
self.index_surface = np.random.randint(low=0, high=n_samples_surface, size=self.n_training)
self.index_clouds = np.random.randint(low=0, high=n_samples_clouds, size=(5, self.n_training))
def __getitem__(self, index):
Phi = self.matrix[self.index_matrix[index], :, :].astype('float32')
rho = 0.4 / self.eigenvals[self.index_matrix[index]]
Phi_split = Phi.reshape((5, 24, 3072))
surface = np.random.uniform(low=0.2, high=1.0) * self.surface[self.index_surface[index], :]
clouds = np.random.uniform(low=0.2, high=1.0, size=5)[:, None] * self.clouds[self.index_clouds[:, index], :]
d_split = np.zeros((5, 24))
for i in range(5):
d_split[i, :] = Phi_split[i, :, :] @ (clouds[i, :] + (1.0 - clouds[i, :])**2 * surface)
return Phi_split, surface.astype('float32'), clouds.astype('float32'), rho.astype('float32'), d_split.astype('float32')
def __len__(self):
return self.n_training | identifier_body |
train_denoise_clouds.py | import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.functional as tf
import torch.utils.data
import time
from tqdm import tqdm
import model_denoise_clouds as model
import argparse
try:
import nvidia_smi
NVIDIA_SMI = True
except:
NVIDIA_SMI = False
import sys
import os
import pathlib
import zarr
class Dataset(torch.utils.data.Dataset):
"""
Dataset class that will provide data during training. Modify it accordingly
for your dataset. This one shows how to do augmenting during training for a
very simple training set
"""
def __init__(self, n_training):
"""
Args:
n_training (int): number of training examples including augmenting
"""
super(Dataset, self).__init__()
self.n_training = n_training
f_matrix = zarr.open('training_matrices.zarr', 'r')
self.matrix = f_matrix['matrix'][:]
self.eigenvals = f_matrix['largest_eval'][:]
n_samples_matrix, _, _ = self.matrix.shape
f_surface = zarr.open('training_surfaces_libnoise.zarr', 'r')
self.surface = 1.0 - f_surface['surface'][:]
n_samples_surface, _ = self.surface.shape
f_clouds = zarr.open('training_clouds.zarr', 'r')
self.clouds = f_clouds['clouds'][:]
n_samples_clouds, _ = self.clouds.shape
self.index_matrix = np.random.randint(low=0, high=n_samples_matrix, size=self.n_training)
self.index_surface = np.random.randint(low=0, high=n_samples_surface, size=self.n_training)
self.index_clouds = np.random.randint(low=0, high=n_samples_clouds, size=(5, self.n_training))
def | (self, index):
Phi = self.matrix[self.index_matrix[index], :, :].astype('float32')
rho = 0.4 / self.eigenvals[self.index_matrix[index]]
Phi_split = Phi.reshape((5, 24, 3072))
surface = np.random.uniform(low=0.2, high=1.0) * self.surface[self.index_surface[index], :]
clouds = np.random.uniform(low=0.2, high=1.0, size=5)[:, None] * self.clouds[self.index_clouds[:, index], :]
d_split = np.zeros((5, 24))
for i in range(5):
d_split[i, :] = Phi_split[i, :, :] @ (clouds[i, :] + (1.0 - clouds[i, :])**2 * surface)
return Phi_split, surface.astype('float32'), clouds.astype('float32'), rho.astype('float32'), d_split.astype('float32')
def __len__(self):
return self.n_training
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename+'.best')
class Training(object):
def __init__(self, batch_size, validation_split=0.2, gpu=0, smooth=0.05, K=3, model_class='conv1d'):
self.cuda = torch.cuda.is_available()
self.gpu = gpu
self.smooth = smooth
self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu")
# self.device = 'cpu'
self.batch_size = batch_size
self.model_class = model_class
self.K = K
if (NVIDIA_SMI):
nvidia_smi.nvmlInit()
self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(self.gpu)
print("Computing in {0} : {1}".format(self.device, nvidia_smi.nvmlDeviceGetName(self.handle)))
self.validation_split = validation_split
kwargs = {'num_workers': 4, 'pin_memory': False} if self.cuda else {}
if (model_class == 'conv1d'):
self.model = model.Network(K=self.K, L=32, device=self.device, model_class=model_class).to(self.device)
if (model_class == 'conv2d'):
self.model = model.Network(K=self.K, L=32, NSIDE=16, device=self.device, model_class=model_class).to(self.device)
print('N. total parameters : {0}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
self.train_dataset = Dataset(n_training=20000)
self.validation_dataset = Dataset(n_training=2000)
# Data loaders that will inject data during training
self.train_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
self.validation_loader = torch.utils.data.DataLoader(self.validation_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
def init_optimize(self, epochs, lr, weight_decay, scheduler):
self.lr = lr
self.weight_decay = weight_decay
print('Learning rate : {0}'.format(lr))
self.n_epochs = epochs
if (self.model_class == 'conv1d'):
root = 'trained_denoise_clouds_1d'
if (self.model_class == 'conv2d'):
root = 'trained_denoise_clouds_2d'
p = pathlib.Path(f'{root}/')
p.mkdir(parents=True, exist_ok=True)
current_time = time.strftime("%Y-%m-%d-%H:%M:%S")
self.out_name = f'{root}/{current_time}'
# Copy model
file = model.__file__.split('/')[-1]
shutil.copyfile(model.__file__, '{0}_model.py'.format(self.out_name))
shutil.copyfile('{0}/{1}'.format(os.path.dirname(os.path.abspath(__file__)), file), '{0}_trainer.py'.format(self.out_name))
self.file_mode = 'w'
f = open('{0}_call.dat'.format(self.out_name), 'w')
f.write('python ' + ' '.join(sys.argv))
f.close()
f = open('{0}_hyper.dat'.format(self.out_name), 'w')
f.write('Learning_rate Weight_decay \n')
f.write('{0} {1}'.format(self.lr, self.weight_decay))
f.close()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
self.loss_fn = nn.MSELoss().to(self.device)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=scheduler, gamma=0.5)
np.random.seed(123)
self.surf0 = torch.tensor(np.random.rand(self.batch_size, 3072).astype('float32')).to(self.device)
self.surf0 = torch.zeros((self.batch_size, 3072)).to(self.device)
self.clouds0 = torch.tensor(np.random.rand(self.batch_size, 5, 3072).astype('float32')).to(self.device)
self.clouds0 = torch.zeros((self.batch_size, 5, 3072)).to(self.device)
torch.backends.cudnn.benchmark = True
def optimize(self):
self.loss = []
self.loss_val = []
best_loss = 1e10
trainF = open('{0}.loss.csv'.format(self.out_name), self.file_mode)
print('Model : {0}'.format(self.out_name))
for epoch in range(1, self.n_epochs + 1):
self.train(epoch)
self.test(epoch)
self.scheduler.step()
trainF.write('{},{},{}\n'.format(
epoch, self.loss[-1], self.loss_val[-1]))
trainF.flush()
is_best = self.loss_val[-1] < best_loss
best_loss = min(self.loss_val[-1], best_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'best_loss': best_loss,
'optimizer': self.optimizer.state_dict(),
}, is_best, filename='{0}.pth'.format(self.out_name))
trainF.close()
def train(self, epoch):
self.model.train()
print("Epoch {0}/{1}".format(epoch, self.n_epochs))
t = tqdm(self.train_loader)
loss_avg = 0.0
n = 1
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
self.optimizer.zero_grad()
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
loss.backward()
self.optimizer.step()
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
if (NVIDIA_SMI):
tmp = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)
t.set_postfix(loss=loss_avg, lr=current_lr, gpu=tmp.gpu, mem=tmp.memory)
else:
t.set_postfix(loss=loss_avg, lr=current_lr)
self.loss.append(loss_avg)
def test(self, epoch):
self.model.eval()
t = tqdm(self.validation_loader)
n = 1
loss_avg = 0.0
with torch.no_grad():
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
t.set_postfix(loss=loss_avg)
self.loss_val.append(loss_avg)
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Train neural network')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float,
metavar='LR', help='Learning rate')
parser.add_argument('--wd', '--weigth-decay', default=0.0, type=float,
metavar='WD', help='Weigth decay')
parser.add_argument('--gpu', '--gpu', default=0, type=int,
metavar='GPU', help='GPU')
parser.add_argument('--smooth', '--smoothing-factor', default=0.05, type=float,
metavar='SM', help='Smoothing factor for loss')
parser.add_argument('--epochs', '--epochs', default=100, type=int,
metavar='EPOCHS', help='Number of epochs')
parser.add_argument('--scheduler', '--scheduler', default=100, type=int,
metavar='SCHEDULER', help='Number of epochs before applying scheduler')
parser.add_argument('--batch', '--batch', default=32, type=int,
metavar='BATCH', help='Batch size')
parser.add_argument('--model', '--model', default='conv1d', type=str,
metavar='MODEL', help='Model class')
parser.add_argument('--k', '--k', default=15, type=int,
metavar='K', help='K')
parsed = vars(parser.parse_args())
deepnet = Training(batch_size=parsed['batch'], gpu=parsed['gpu'], smooth=parsed['smooth'], K=parsed['k'], model_class=parsed['model'])
deepnet.init_optimize(parsed['epochs'], lr=parsed['lr'], weight_decay=parsed['wd'], scheduler=parsed['scheduler'])
deepnet.optimize() | __getitem__ | identifier_name |
train_denoise_clouds.py | import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.functional as tf
import torch.utils.data
import time
from tqdm import tqdm
import model_denoise_clouds as model
import argparse
try:
import nvidia_smi
NVIDIA_SMI = True
except:
NVIDIA_SMI = False
import sys
import os
import pathlib
import zarr
class Dataset(torch.utils.data.Dataset):
"""
Dataset class that will provide data during training. Modify it accordingly
for your dataset. This one shows how to do augmenting during training for a
very simple training set
"""
def __init__(self, n_training):
"""
Args:
n_training (int): number of training examples including augmenting
"""
super(Dataset, self).__init__()
self.n_training = n_training
f_matrix = zarr.open('training_matrices.zarr', 'r')
self.matrix = f_matrix['matrix'][:]
self.eigenvals = f_matrix['largest_eval'][:]
n_samples_matrix, _, _ = self.matrix.shape
f_surface = zarr.open('training_surfaces_libnoise.zarr', 'r')
self.surface = 1.0 - f_surface['surface'][:]
n_samples_surface, _ = self.surface.shape
f_clouds = zarr.open('training_clouds.zarr', 'r')
self.clouds = f_clouds['clouds'][:]
n_samples_clouds, _ = self.clouds.shape
self.index_matrix = np.random.randint(low=0, high=n_samples_matrix, size=self.n_training)
self.index_surface = np.random.randint(low=0, high=n_samples_surface, size=self.n_training)
self.index_clouds = np.random.randint(low=0, high=n_samples_clouds, size=(5, self.n_training))
def __getitem__(self, index):
Phi = self.matrix[self.index_matrix[index], :, :].astype('float32')
rho = 0.4 / self.eigenvals[self.index_matrix[index]]
Phi_split = Phi.reshape((5, 24, 3072))
surface = np.random.uniform(low=0.2, high=1.0) * self.surface[self.index_surface[index], :]
clouds = np.random.uniform(low=0.2, high=1.0, size=5)[:, None] * self.clouds[self.index_clouds[:, index], :]
d_split = np.zeros((5, 24))
for i in range(5):
|
return Phi_split, surface.astype('float32'), clouds.astype('float32'), rho.astype('float32'), d_split.astype('float32')
def __len__(self):
return self.n_training
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename+'.best')
class Training(object):
def __init__(self, batch_size, validation_split=0.2, gpu=0, smooth=0.05, K=3, model_class='conv1d'):
self.cuda = torch.cuda.is_available()
self.gpu = gpu
self.smooth = smooth
self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu")
# self.device = 'cpu'
self.batch_size = batch_size
self.model_class = model_class
self.K = K
if (NVIDIA_SMI):
nvidia_smi.nvmlInit()
self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(self.gpu)
print("Computing in {0} : {1}".format(self.device, nvidia_smi.nvmlDeviceGetName(self.handle)))
self.validation_split = validation_split
kwargs = {'num_workers': 4, 'pin_memory': False} if self.cuda else {}
if (model_class == 'conv1d'):
self.model = model.Network(K=self.K, L=32, device=self.device, model_class=model_class).to(self.device)
if (model_class == 'conv2d'):
self.model = model.Network(K=self.K, L=32, NSIDE=16, device=self.device, model_class=model_class).to(self.device)
print('N. total parameters : {0}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
self.train_dataset = Dataset(n_training=20000)
self.validation_dataset = Dataset(n_training=2000)
# Data loaders that will inject data during training
self.train_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
self.validation_loader = torch.utils.data.DataLoader(self.validation_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
def init_optimize(self, epochs, lr, weight_decay, scheduler):
self.lr = lr
self.weight_decay = weight_decay
print('Learning rate : {0}'.format(lr))
self.n_epochs = epochs
if (self.model_class == 'conv1d'):
root = 'trained_denoise_clouds_1d'
if (self.model_class == 'conv2d'):
root = 'trained_denoise_clouds_2d'
p = pathlib.Path(f'{root}/')
p.mkdir(parents=True, exist_ok=True)
current_time = time.strftime("%Y-%m-%d-%H:%M:%S")
self.out_name = f'{root}/{current_time}'
# Copy model
file = model.__file__.split('/')[-1]
shutil.copyfile(model.__file__, '{0}_model.py'.format(self.out_name))
shutil.copyfile('{0}/{1}'.format(os.path.dirname(os.path.abspath(__file__)), file), '{0}_trainer.py'.format(self.out_name))
self.file_mode = 'w'
f = open('{0}_call.dat'.format(self.out_name), 'w')
f.write('python ' + ' '.join(sys.argv))
f.close()
f = open('{0}_hyper.dat'.format(self.out_name), 'w')
f.write('Learning_rate Weight_decay \n')
f.write('{0} {1}'.format(self.lr, self.weight_decay))
f.close()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
self.loss_fn = nn.MSELoss().to(self.device)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=scheduler, gamma=0.5)
np.random.seed(123)
self.surf0 = torch.tensor(np.random.rand(self.batch_size, 3072).astype('float32')).to(self.device)
self.surf0 = torch.zeros((self.batch_size, 3072)).to(self.device)
self.clouds0 = torch.tensor(np.random.rand(self.batch_size, 5, 3072).astype('float32')).to(self.device)
self.clouds0 = torch.zeros((self.batch_size, 5, 3072)).to(self.device)
torch.backends.cudnn.benchmark = True
def optimize(self):
self.loss = []
self.loss_val = []
best_loss = 1e10
trainF = open('{0}.loss.csv'.format(self.out_name), self.file_mode)
print('Model : {0}'.format(self.out_name))
for epoch in range(1, self.n_epochs + 1):
self.train(epoch)
self.test(epoch)
self.scheduler.step()
trainF.write('{},{},{}\n'.format(
epoch, self.loss[-1], self.loss_val[-1]))
trainF.flush()
is_best = self.loss_val[-1] < best_loss
best_loss = min(self.loss_val[-1], best_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'best_loss': best_loss,
'optimizer': self.optimizer.state_dict(),
}, is_best, filename='{0}.pth'.format(self.out_name))
trainF.close()
def train(self, epoch):
self.model.train()
print("Epoch {0}/{1}".format(epoch, self.n_epochs))
t = tqdm(self.train_loader)
loss_avg = 0.0
n = 1
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
self.optimizer.zero_grad()
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
loss.backward()
self.optimizer.step()
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
if (NVIDIA_SMI):
tmp = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)
t.set_postfix(loss=loss_avg, lr=current_lr, gpu=tmp.gpu, mem=tmp.memory)
else:
t.set_postfix(loss=loss_avg, lr=current_lr)
self.loss.append(loss_avg)
def test(self, epoch):
self.model.eval()
t = tqdm(self.validation_loader)
n = 1
loss_avg = 0.0
with torch.no_grad():
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
t.set_postfix(loss=loss_avg)
self.loss_val.append(loss_avg)
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Train neural network')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float,
metavar='LR', help='Learning rate')
parser.add_argument('--wd', '--weigth-decay', default=0.0, type=float,
metavar='WD', help='Weigth decay')
parser.add_argument('--gpu', '--gpu', default=0, type=int,
metavar='GPU', help='GPU')
parser.add_argument('--smooth', '--smoothing-factor', default=0.05, type=float,
metavar='SM', help='Smoothing factor for loss')
parser.add_argument('--epochs', '--epochs', default=100, type=int,
metavar='EPOCHS', help='Number of epochs')
parser.add_argument('--scheduler', '--scheduler', default=100, type=int,
metavar='SCHEDULER', help='Number of epochs before applying scheduler')
parser.add_argument('--batch', '--batch', default=32, type=int,
metavar='BATCH', help='Batch size')
parser.add_argument('--model', '--model', default='conv1d', type=str,
metavar='MODEL', help='Model class')
parser.add_argument('--k', '--k', default=15, type=int,
metavar='K', help='K')
parsed = vars(parser.parse_args())
deepnet = Training(batch_size=parsed['batch'], gpu=parsed['gpu'], smooth=parsed['smooth'], K=parsed['k'], model_class=parsed['model'])
deepnet.init_optimize(parsed['epochs'], lr=parsed['lr'], weight_decay=parsed['wd'], scheduler=parsed['scheduler'])
deepnet.optimize() | d_split[i, :] = Phi_split[i, :, :] @ (clouds[i, :] + (1.0 - clouds[i, :])**2 * surface) | conditional_block |
create_load_files.py | import pandas as pd
import calendar
import re
import csv
import codecs
from src.clean_text import give_clean_words_list
import pyspark.sql.types as types
from pyspark.sql import SQLContext
from pyspark import SparkConf, SparkContext
import numpy as np
import operator
from collections import defaultdict
def bags_format(line):
|
conf = SparkConf().setAppName('word count')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
schema = types.StructType([types.StructField('Word', types.StringType(), False),
types.StructField('Count', types.IntegerType(), False),
types.StructField('Percent', types.FloatType(), False)])
genre_list = ["crime", "fantasy", "young-adult", "romance", "comedy", "dystopia",
"action", "historical", "non-fiction", "science fiction", "self-help"]
no_of_genres = len(genre_list)
for genre in genre_list:
crime_words = sc.textFile('../../data/word_counts/' + genre + '_counts')
if genre == "young-adult":
genre = 'YA'
elif genre == "non-fiction":
genre = "NF"
elif genre == "science fiction":
genre = "SF"
elif genre == "self-help":
genre = "SH"
genre_bag_words = crime_words.map(bags_format)
genre_word_df = sqlContext.createDataFrame(genre_bag_words, schema=schema).cache()
genre_word_df.createOrReplaceTempView(genre + '_words')
# Input Files
books_data = pd.read_csv("../../data/batch_1/books.csv")
img_price_data = pd.read_csv("../../data/batch_1/image_and_price.csv")
desc_data = pd.read_csv("../../data/batch_1/description_all.csv")
amazon_data = pd.read_csv("../../data/batch_1/amazon.csv")
reviews_data = pd.read_csv("../../data/batch_1/reviews_users.csv")
# Created Files
out = csv.writer(codecs.open("../../data/batch_1/final_1.csv", "w", "utf-8"), delimiter=",", quoting=csv.QUOTE_ALL)
out.writerow(["ID", "Book Title", "ISBN", "Rating", "Author", "Language", "Pages", "Publication", "Publish Date",
"Publish Month", "Publish Year", "Genres", "Image", "Google Play", "Google Play URL", "Barnes and Noble",
"Barnes and Noble URL", "Indie Bound", "Indie Bound URL",
"Amazon", "Amazon URL", "R1", "R1 URL", "R2", "R2 URL", "R3", "R3 URL", "R4", "R4 URL", "R5", "R5 URL",
"GoodReads_Description", "Wiki_Description", "Readgeek_Description", "Riffle_Description",
"Amazon_Description", "crime", "fantasy", "young-adult", "romance", "comedy", "dystopia", "action",
"historical", "non-fiction", "science fiction", "self-help"])
out_user_review = csv.writer(codecs.open("../../data/batch_1/final_2.csv", "w", "utf-8"), delimiter=",",
quoting=csv.QUOTE_ALL)
out_user_review.writerow(["ID", "Book Title", "ISBN", "User ID", "User Name", "User URL", "Rating",
"Review Data", "Review"])
book_id = 100000 - 1
reviews_index = -1
month_dict = {v: k for k, v in enumerate(calendar.month_abbr)}
for index, row in books_data.iterrows():
book_id += 1
title = row["Book Title"]
print(str(index) + " " + title)
# Books CSV data
isbn = row["ISBN"]
rating = row["Rating"]
author = row["Author"]
if type(author) is float:
author = ""
language = row["Language"]
if type(language) is float:
language = ""
pages = row["Pages"]
if pages == "[]":
pages = -1
publication = row["Publication"]
if type(publication) is float:
publication = ""
url = row["Book URL"]
if type(url) is float:
url = ""
date_str = row["Publish Date"]
if type(date_str) is float:
pub_date = -1
pub_month = -1
pub_year = -1
else:
date_str_list = date_str.split(" ")
if len(date_str_list) == 3:
pub_month = int(month_dict[date_str_list[0][0:3]])
pub_date = int(re.findall(r'\d+', date_str_list[1])[0])
pub_year = int(date_str_list[2])
elif len(date_str_list) == 2:
pub_month = int(month_dict[date_str_list[0][0:3]])
pub_year = int(date_str_list[1])
elif len(date_str_list) == 1:
pub_date = -1
pub_month = -1
pub_year = int(date_str_list[0])
genres = row["Genres"]
if type(genres) is float:
genres = ""
# Image Price data
img_url = img_price_data["Book Image"][index]
if type(img_url) is float:
img_url = ""
gp_price = img_price_data["Google Play"][index]
if type(gp_price) is float or "http" in gp_price:
gp_price = ""
gp_url = img_price_data["Google Play URL"][index]
if type(gp_url) is float:
gp_url = ""
bnb_price = img_price_data["Barnes and Noble"][index]
if type(bnb_price) is float or "http" in bnb_price:
bnb_price = ""
bnb_url = img_price_data["Barnes and Noble URL"][index]
if type(bnb_url) is float:
bnb_url = ""
indie_price = img_price_data["Indie Bound"][index]
if type(indie_price) is float or "http" in indie_price:
indie_price = ""
indie_url = img_price_data["Indie Bound URL"][index]
if type(indie_url) is float:
indie_url = ""
# Amazon Data
amazon_price = amazon_data["Amazon Price"][index]
if type(amazon_price) is float or "http" in amazon_price:
amazon_price = ""
r1 = amazon_data["R1 Title"][index]
if type(r1) is float:
r1 = ""
r1_url = amazon_data["R1 URL"][index]
if type(r1_url) is float:
r1_url = ""
r2 = amazon_data["R2 Title"][index]
if type(r2) is float:
r2 = ""
r2_url = amazon_data["R2 URL"][index]
if type(r2_url) is float:
r2_url = ""
r3 = amazon_data["R3 Title"][index]
if type(r3) is float:
r3 = ""
r3_url = amazon_data["R3 URL"][index]
if type(r3_url) is float:
r3_url = ""
r4 = amazon_data["R4 Title"][index]
if type(r4) is float:
r4 = ""
r4_url = amazon_data["R4 URL"][index]
if type(r4_url) is float:
r4_url = ""
r5 = amazon_data["R5 Title"][index]
if type(r5) is float:
r5 = ""
r5_url = amazon_data["R5 URL"][index]
if type(r5_url) is float:
r5_url = ""
amazon_url = amazon_data["Book URL"][index]
if type(amazon_url) is float:
amazon_url = ""
# Description data
good_reads_desc = desc_data["GoodReads Description"][index]
if type(good_reads_desc) is float:
good_reads_desc = ""
good_reads_desc = give_clean_words_list(good_reads_desc)
wiki_desc = desc_data["Wikipedia Description"][index]
if type(wiki_desc) is float:
wiki_desc = ""
wiki_desc = give_clean_words_list(wiki_desc)
read_geek_desc = desc_data["Readgeek Description"][index]
if type(read_geek_desc) is float:
read_geek_desc = ""
read_geek_desc = give_clean_words_list(read_geek_desc)
riffle_desc = desc_data["Riffle Description"][index]
if type(riffle_desc) is float:
riffle_desc = ""
riffle_desc = give_clean_words_list(riffle_desc)
amazon_desc = desc_data["Amazon Description"][index]
if type(amazon_desc) is float:
amazon_desc = ""
amazon_desc = give_clean_words_list(amazon_desc)
output_result = [book_id, title, isbn, rating, author, language, pages, publication, pub_date, pub_month, pub_year,
genres, img_url, gp_price, gp_url, bnb_price, bnb_url, indie_price, indie_url, amazon_price,
amazon_url, r1, r1_url, r2, r2_url, r3, r3_url, r4, r4_url, r5, r5_url, good_reads_desc, wiki_desc,
read_geek_desc, riffle_desc, amazon_desc]
# Reviews File
no_comments = 0
while True:
reviews_index += 1
try:
review_isbn = reviews_data["ISBN"][reviews_index]
review_book_title = reviews_data["Book Title"][reviews_index]
except KeyError:
break
if review_book_title == title or review_isbn == isbn:
user_id = reviews_data["User ID"][reviews_index]
user_name = reviews_data["User Name"][reviews_index]
user_url = reviews_data["User URL"][reviews_index]
review_date = reviews_data["Review Date"][reviews_index]
review_text = reviews_data["Review"][reviews_index]
user_rating = reviews_data["Rating"][reviews_index]
if type(review_text) is float:
review_text = ""
review_text = give_clean_words_list(review_text)
out_user_review.writerow([book_id, title, isbn, user_id, user_name, user_url, user_rating,
review_date, review_text])
no_comments += 1
else:
reviews_index -= 1
break
print("Comments = " + str(no_comments))
# Genre Classifier
tokens = good_reads_desc + wiki_desc + read_geek_desc + riffle_desc + amazon_desc + author.split(" ")
desc_tokens = sc.parallelize(tokens)
words = desc_tokens.map(lambda w: (w, 1))
wordcount = words.reduceByKey(operator.add)
total_word_count = wordcount.map(lambda x: x[1]).sum()
words_with_count_percents = wordcount.map(lambda line: (line[0], line[1], (line[1] / total_word_count)))
df = sqlContext.createDataFrame(words_with_count_percents, schema=schema).cache()
df.createOrReplaceTempView('description')
genre_scores = []
for genre in genre_list:
if genre == "young-adult":
genre = 'YA'
elif genre == "non-fiction":
genre = "NF"
elif genre == "science fiction":
genre = "SF"
elif genre == "self-help":
genre = "SH"
common_words = sqlContext.sql('''
select description.word as Word, description.Count as desc_count,''' +
genre + '''_words.Count as genre_count, description.Percent as desc_percent,
''' + genre + '''_words.Percent as genre_percent from description join
''' + genre + '''_words on description.Word = ''' + genre + '''_words.Word
''')
common_words.createOrReplaceTempView('common_words')
word_score = sqlContext.sql('''
select desc_percent * genre_percent as score from common_words
''')
word_score.createOrReplaceTempView('word_score')
genre_score = sqlContext.sql('''
select sum(score) as Genre_score from word_score
''')
genre_scores.append(genre_score.select('Genre_score').head(1)[0][0])
try:
genre_scores = np.array(genre_scores)
index_scores = np.argsort(genre_scores)
genre_scores = np.sort(genre_scores)
except TypeError:
for genre in genre_list:
output_result.append(0)
out.writerow(output_result)
continue
base = genre_scores[-4]
percent_changes = []
for i in range(0, 3):
curr = genre_scores[no_of_genres - 1 - i]
percent_changes.append(((curr - base) / base) * 100)
percent_changes = np.array(percent_changes)
base_percent = (100 - percent_changes.sum()) / 4
genre_dict = dict()
genre_dict = defaultdict(lambda: 0, genre_dict)
for i in range(0, 3):
genre_index = index_scores[no_of_genres - 1 - i]
genre_name = genre_list[genre_index]
genre_value = percent_changes[i] + base_percent
print(genre_name + " " + str(genre_value) + "%")
genre_dict[genre_name] = genre_value
genre_dict[genre_list[index_scores[-4]]] = base_percent
print(genre_list[index_scores[-4]] + " " + str(base_percent) + "%\n")
for genre in genre_list:
output_result.append(genre_dict[genre])
out.writerow(output_result)
| contents = line.split()
return str(contents[0]), int(contents[1]), float(contents[2]) | identifier_body |
create_load_files.py | import pandas as pd
import calendar
import re
import csv
import codecs
from src.clean_text import give_clean_words_list
import pyspark.sql.types as types
from pyspark.sql import SQLContext
from pyspark import SparkConf, SparkContext
import numpy as np
import operator
from collections import defaultdict
def bags_format(line):
contents = line.split()
return str(contents[0]), int(contents[1]), float(contents[2])
conf = SparkConf().setAppName('word count')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
schema = types.StructType([types.StructField('Word', types.StringType(), False),
types.StructField('Count', types.IntegerType(), False),
types.StructField('Percent', types.FloatType(), False)])
genre_list = ["crime", "fantasy", "young-adult", "romance", "comedy", "dystopia",
"action", "historical", "non-fiction", "science fiction", "self-help"]
no_of_genres = len(genre_list)
for genre in genre_list:
crime_words = sc.textFile('../../data/word_counts/' + genre + '_counts')
if genre == "young-adult":
genre = 'YA'
elif genre == "non-fiction":
genre = "NF"
elif genre == "science fiction":
genre = "SF"
elif genre == "self-help":
genre = "SH"
genre_bag_words = crime_words.map(bags_format)
genre_word_df = sqlContext.createDataFrame(genre_bag_words, schema=schema).cache()
genre_word_df.createOrReplaceTempView(genre + '_words')
# Input Files
books_data = pd.read_csv("../../data/batch_1/books.csv")
img_price_data = pd.read_csv("../../data/batch_1/image_and_price.csv")
desc_data = pd.read_csv("../../data/batch_1/description_all.csv")
amazon_data = pd.read_csv("../../data/batch_1/amazon.csv")
reviews_data = pd.read_csv("../../data/batch_1/reviews_users.csv")
# Created Files
out = csv.writer(codecs.open("../../data/batch_1/final_1.csv", "w", "utf-8"), delimiter=",", quoting=csv.QUOTE_ALL)
out.writerow(["ID", "Book Title", "ISBN", "Rating", "Author", "Language", "Pages", "Publication", "Publish Date",
"Publish Month", "Publish Year", "Genres", "Image", "Google Play", "Google Play URL", "Barnes and Noble",
"Barnes and Noble URL", "Indie Bound", "Indie Bound URL",
"Amazon", "Amazon URL", "R1", "R1 URL", "R2", "R2 URL", "R3", "R3 URL", "R4", "R4 URL", "R5", "R5 URL",
"GoodReads_Description", "Wiki_Description", "Readgeek_Description", "Riffle_Description",
"Amazon_Description", "crime", "fantasy", "young-adult", "romance", "comedy", "dystopia", "action",
"historical", "non-fiction", "science fiction", "self-help"])
out_user_review = csv.writer(codecs.open("../../data/batch_1/final_2.csv", "w", "utf-8"), delimiter=",",
quoting=csv.QUOTE_ALL)
out_user_review.writerow(["ID", "Book Title", "ISBN", "User ID", "User Name", "User URL", "Rating",
"Review Data", "Review"])
book_id = 100000 - 1
reviews_index = -1
month_dict = {v: k for k, v in enumerate(calendar.month_abbr)}
for index, row in books_data.iterrows():
book_id += 1
title = row["Book Title"]
print(str(index) + " " + title)
# Books CSV data
isbn = row["ISBN"]
rating = row["Rating"]
author = row["Author"]
if type(author) is float:
author = ""
language = row["Language"]
if type(language) is float:
language = ""
pages = row["Pages"]
if pages == "[]":
pages = -1
publication = row["Publication"]
if type(publication) is float:
publication = ""
url = row["Book URL"]
if type(url) is float:
url = ""
date_str = row["Publish Date"]
if type(date_str) is float:
pub_date = -1
pub_month = -1
pub_year = -1
else:
date_str_list = date_str.split(" ")
if len(date_str_list) == 3:
pub_month = int(month_dict[date_str_list[0][0:3]])
pub_date = int(re.findall(r'\d+', date_str_list[1])[0])
pub_year = int(date_str_list[2])
elif len(date_str_list) == 2:
pub_month = int(month_dict[date_str_list[0][0:3]])
pub_year = int(date_str_list[1])
elif len(date_str_list) == 1:
pub_date = -1
pub_month = -1
pub_year = int(date_str_list[0])
genres = row["Genres"]
if type(genres) is float:
genres = ""
# Image Price data
img_url = img_price_data["Book Image"][index]
if type(img_url) is float:
img_url = ""
gp_price = img_price_data["Google Play"][index]
if type(gp_price) is float or "http" in gp_price:
gp_price = ""
gp_url = img_price_data["Google Play URL"][index]
if type(gp_url) is float:
gp_url = ""
bnb_price = img_price_data["Barnes and Noble"][index]
if type(bnb_price) is float or "http" in bnb_price:
bnb_price = ""
bnb_url = img_price_data["Barnes and Noble URL"][index]
if type(bnb_url) is float:
bnb_url = ""
indie_price = img_price_data["Indie Bound"][index]
if type(indie_price) is float or "http" in indie_price:
indie_price = ""
indie_url = img_price_data["Indie Bound URL"][index]
if type(indie_url) is float:
indie_url = ""
# Amazon Data
amazon_price = amazon_data["Amazon Price"][index]
if type(amazon_price) is float or "http" in amazon_price:
amazon_price = ""
r1 = amazon_data["R1 Title"][index]
if type(r1) is float:
r1 = ""
r1_url = amazon_data["R1 URL"][index]
if type(r1_url) is float:
r1_url = ""
r2 = amazon_data["R2 Title"][index]
if type(r2) is float:
r2 = ""
r2_url = amazon_data["R2 URL"][index]
if type(r2_url) is float:
r2_url = ""
r3 = amazon_data["R3 Title"][index]
if type(r3) is float:
r3 = ""
r3_url = amazon_data["R3 URL"][index]
if type(r3_url) is float:
r3_url = ""
r4 = amazon_data["R4 Title"][index]
if type(r4) is float:
r4 = ""
r4_url = amazon_data["R4 URL"][index]
if type(r4_url) is float:
r4_url = ""
r5 = amazon_data["R5 Title"][index]
if type(r5) is float:
|
r5_url = amazon_data["R5 URL"][index]
if type(r5_url) is float:
r5_url = ""
amazon_url = amazon_data["Book URL"][index]
if type(amazon_url) is float:
amazon_url = ""
# Description data
good_reads_desc = desc_data["GoodReads Description"][index]
if type(good_reads_desc) is float:
good_reads_desc = ""
good_reads_desc = give_clean_words_list(good_reads_desc)
wiki_desc = desc_data["Wikipedia Description"][index]
if type(wiki_desc) is float:
wiki_desc = ""
wiki_desc = give_clean_words_list(wiki_desc)
read_geek_desc = desc_data["Readgeek Description"][index]
if type(read_geek_desc) is float:
read_geek_desc = ""
read_geek_desc = give_clean_words_list(read_geek_desc)
riffle_desc = desc_data["Riffle Description"][index]
if type(riffle_desc) is float:
riffle_desc = ""
riffle_desc = give_clean_words_list(riffle_desc)
amazon_desc = desc_data["Amazon Description"][index]
if type(amazon_desc) is float:
amazon_desc = ""
amazon_desc = give_clean_words_list(amazon_desc)
output_result = [book_id, title, isbn, rating, author, language, pages, publication, pub_date, pub_month, pub_year,
genres, img_url, gp_price, gp_url, bnb_price, bnb_url, indie_price, indie_url, amazon_price,
amazon_url, r1, r1_url, r2, r2_url, r3, r3_url, r4, r4_url, r5, r5_url, good_reads_desc, wiki_desc,
read_geek_desc, riffle_desc, amazon_desc]
# Reviews File
no_comments = 0
while True:
reviews_index += 1
try:
review_isbn = reviews_data["ISBN"][reviews_index]
review_book_title = reviews_data["Book Title"][reviews_index]
except KeyError:
break
if review_book_title == title or review_isbn == isbn:
user_id = reviews_data["User ID"][reviews_index]
user_name = reviews_data["User Name"][reviews_index]
user_url = reviews_data["User URL"][reviews_index]
review_date = reviews_data["Review Date"][reviews_index]
review_text = reviews_data["Review"][reviews_index]
user_rating = reviews_data["Rating"][reviews_index]
if type(review_text) is float:
review_text = ""
review_text = give_clean_words_list(review_text)
out_user_review.writerow([book_id, title, isbn, user_id, user_name, user_url, user_rating,
review_date, review_text])
no_comments += 1
else:
reviews_index -= 1
break
print("Comments = " + str(no_comments))
# Genre Classifier
tokens = good_reads_desc + wiki_desc + read_geek_desc + riffle_desc + amazon_desc + author.split(" ")
desc_tokens = sc.parallelize(tokens)
words = desc_tokens.map(lambda w: (w, 1))
wordcount = words.reduceByKey(operator.add)
total_word_count = wordcount.map(lambda x: x[1]).sum()
words_with_count_percents = wordcount.map(lambda line: (line[0], line[1], (line[1] / total_word_count)))
df = sqlContext.createDataFrame(words_with_count_percents, schema=schema).cache()
df.createOrReplaceTempView('description')
genre_scores = []
for genre in genre_list:
if genre == "young-adult":
genre = 'YA'
elif genre == "non-fiction":
genre = "NF"
elif genre == "science fiction":
genre = "SF"
elif genre == "self-help":
genre = "SH"
common_words = sqlContext.sql('''
select description.word as Word, description.Count as desc_count,''' +
genre + '''_words.Count as genre_count, description.Percent as desc_percent,
''' + genre + '''_words.Percent as genre_percent from description join
''' + genre + '''_words on description.Word = ''' + genre + '''_words.Word
''')
common_words.createOrReplaceTempView('common_words')
word_score = sqlContext.sql('''
select desc_percent * genre_percent as score from common_words
''')
word_score.createOrReplaceTempView('word_score')
genre_score = sqlContext.sql('''
select sum(score) as Genre_score from word_score
''')
genre_scores.append(genre_score.select('Genre_score').head(1)[0][0])
try:
genre_scores = np.array(genre_scores)
index_scores = np.argsort(genre_scores)
genre_scores = np.sort(genre_scores)
except TypeError:
for genre in genre_list:
output_result.append(0)
out.writerow(output_result)
continue
base = genre_scores[-4]
percent_changes = []
for i in range(0, 3):
curr = genre_scores[no_of_genres - 1 - i]
percent_changes.append(((curr - base) / base) * 100)
percent_changes = np.array(percent_changes)
base_percent = (100 - percent_changes.sum()) / 4
genre_dict = dict()
genre_dict = defaultdict(lambda: 0, genre_dict)
for i in range(0, 3):
genre_index = index_scores[no_of_genres - 1 - i]
genre_name = genre_list[genre_index]
genre_value = percent_changes[i] + base_percent
print(genre_name + " " + str(genre_value) + "%")
genre_dict[genre_name] = genre_value
genre_dict[genre_list[index_scores[-4]]] = base_percent
print(genre_list[index_scores[-4]] + " " + str(base_percent) + "%\n")
for genre in genre_list:
output_result.append(genre_dict[genre])
out.writerow(output_result)
| r5 = "" | conditional_block |
create_load_files.py | import pandas as pd
import calendar
import re
import csv
import codecs
from src.clean_text import give_clean_words_list
import pyspark.sql.types as types
from pyspark.sql import SQLContext
from pyspark import SparkConf, SparkContext
import numpy as np
import operator
from collections import defaultdict
def bags_format(line):
contents = line.split()
return str(contents[0]), int(contents[1]), float(contents[2])
conf = SparkConf().setAppName('word count')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
schema = types.StructType([types.StructField('Word', types.StringType(), False),
types.StructField('Count', types.IntegerType(), False),
types.StructField('Percent', types.FloatType(), False)])
genre_list = ["crime", "fantasy", "young-adult", "romance", "comedy", "dystopia",
"action", "historical", "non-fiction", "science fiction", "self-help"]
no_of_genres = len(genre_list)
for genre in genre_list:
crime_words = sc.textFile('../../data/word_counts/' + genre + '_counts')
if genre == "young-adult":
genre = 'YA' |
elif genre == "science fiction":
genre = "SF"
elif genre == "self-help":
genre = "SH"
genre_bag_words = crime_words.map(bags_format)
genre_word_df = sqlContext.createDataFrame(genre_bag_words, schema=schema).cache()
genre_word_df.createOrReplaceTempView(genre + '_words')
# Input Files
books_data = pd.read_csv("../../data/batch_1/books.csv")
img_price_data = pd.read_csv("../../data/batch_1/image_and_price.csv")
desc_data = pd.read_csv("../../data/batch_1/description_all.csv")
amazon_data = pd.read_csv("../../data/batch_1/amazon.csv")
reviews_data = pd.read_csv("../../data/batch_1/reviews_users.csv")
# Created Files
out = csv.writer(codecs.open("../../data/batch_1/final_1.csv", "w", "utf-8"), delimiter=",", quoting=csv.QUOTE_ALL)
out.writerow(["ID", "Book Title", "ISBN", "Rating", "Author", "Language", "Pages", "Publication", "Publish Date",
"Publish Month", "Publish Year", "Genres", "Image", "Google Play", "Google Play URL", "Barnes and Noble",
"Barnes and Noble URL", "Indie Bound", "Indie Bound URL",
"Amazon", "Amazon URL", "R1", "R1 URL", "R2", "R2 URL", "R3", "R3 URL", "R4", "R4 URL", "R5", "R5 URL",
"GoodReads_Description", "Wiki_Description", "Readgeek_Description", "Riffle_Description",
"Amazon_Description", "crime", "fantasy", "young-adult", "romance", "comedy", "dystopia", "action",
"historical", "non-fiction", "science fiction", "self-help"])
out_user_review = csv.writer(codecs.open("../../data/batch_1/final_2.csv", "w", "utf-8"), delimiter=",",
quoting=csv.QUOTE_ALL)
out_user_review.writerow(["ID", "Book Title", "ISBN", "User ID", "User Name", "User URL", "Rating",
"Review Data", "Review"])
book_id = 100000 - 1
reviews_index = -1
month_dict = {v: k for k, v in enumerate(calendar.month_abbr)}
for index, row in books_data.iterrows():
book_id += 1
title = row["Book Title"]
print(str(index) + " " + title)
# Books CSV data
isbn = row["ISBN"]
rating = row["Rating"]
author = row["Author"]
if type(author) is float:
author = ""
language = row["Language"]
if type(language) is float:
language = ""
pages = row["Pages"]
if pages == "[]":
pages = -1
publication = row["Publication"]
if type(publication) is float:
publication = ""
url = row["Book URL"]
if type(url) is float:
url = ""
date_str = row["Publish Date"]
if type(date_str) is float:
pub_date = -1
pub_month = -1
pub_year = -1
else:
date_str_list = date_str.split(" ")
if len(date_str_list) == 3:
pub_month = int(month_dict[date_str_list[0][0:3]])
pub_date = int(re.findall(r'\d+', date_str_list[1])[0])
pub_year = int(date_str_list[2])
elif len(date_str_list) == 2:
pub_month = int(month_dict[date_str_list[0][0:3]])
pub_year = int(date_str_list[1])
elif len(date_str_list) == 1:
pub_date = -1
pub_month = -1
pub_year = int(date_str_list[0])
genres = row["Genres"]
if type(genres) is float:
genres = ""
# Image Price data
img_url = img_price_data["Book Image"][index]
if type(img_url) is float:
img_url = ""
gp_price = img_price_data["Google Play"][index]
if type(gp_price) is float or "http" in gp_price:
gp_price = ""
gp_url = img_price_data["Google Play URL"][index]
if type(gp_url) is float:
gp_url = ""
bnb_price = img_price_data["Barnes and Noble"][index]
if type(bnb_price) is float or "http" in bnb_price:
bnb_price = ""
bnb_url = img_price_data["Barnes and Noble URL"][index]
if type(bnb_url) is float:
bnb_url = ""
indie_price = img_price_data["Indie Bound"][index]
if type(indie_price) is float or "http" in indie_price:
indie_price = ""
indie_url = img_price_data["Indie Bound URL"][index]
if type(indie_url) is float:
indie_url = ""
# Amazon Data
amazon_price = amazon_data["Amazon Price"][index]
if type(amazon_price) is float or "http" in amazon_price:
amazon_price = ""
r1 = amazon_data["R1 Title"][index]
if type(r1) is float:
r1 = ""
r1_url = amazon_data["R1 URL"][index]
if type(r1_url) is float:
r1_url = ""
r2 = amazon_data["R2 Title"][index]
if type(r2) is float:
r2 = ""
r2_url = amazon_data["R2 URL"][index]
if type(r2_url) is float:
r2_url = ""
r3 = amazon_data["R3 Title"][index]
if type(r3) is float:
r3 = ""
r3_url = amazon_data["R3 URL"][index]
if type(r3_url) is float:
r3_url = ""
r4 = amazon_data["R4 Title"][index]
if type(r4) is float:
r4 = ""
r4_url = amazon_data["R4 URL"][index]
if type(r4_url) is float:
r4_url = ""
r5 = amazon_data["R5 Title"][index]
if type(r5) is float:
r5 = ""
r5_url = amazon_data["R5 URL"][index]
if type(r5_url) is float:
r5_url = ""
amazon_url = amazon_data["Book URL"][index]
if type(amazon_url) is float:
amazon_url = ""
# Description data
good_reads_desc = desc_data["GoodReads Description"][index]
if type(good_reads_desc) is float:
good_reads_desc = ""
good_reads_desc = give_clean_words_list(good_reads_desc)
wiki_desc = desc_data["Wikipedia Description"][index]
if type(wiki_desc) is float:
wiki_desc = ""
wiki_desc = give_clean_words_list(wiki_desc)
read_geek_desc = desc_data["Readgeek Description"][index]
if type(read_geek_desc) is float:
read_geek_desc = ""
read_geek_desc = give_clean_words_list(read_geek_desc)
riffle_desc = desc_data["Riffle Description"][index]
if type(riffle_desc) is float:
riffle_desc = ""
riffle_desc = give_clean_words_list(riffle_desc)
amazon_desc = desc_data["Amazon Description"][index]
if type(amazon_desc) is float:
amazon_desc = ""
amazon_desc = give_clean_words_list(amazon_desc)
output_result = [book_id, title, isbn, rating, author, language, pages, publication, pub_date, pub_month, pub_year,
genres, img_url, gp_price, gp_url, bnb_price, bnb_url, indie_price, indie_url, amazon_price,
amazon_url, r1, r1_url, r2, r2_url, r3, r3_url, r4, r4_url, r5, r5_url, good_reads_desc, wiki_desc,
read_geek_desc, riffle_desc, amazon_desc]
# Reviews File
no_comments = 0
while True:
reviews_index += 1
try:
review_isbn = reviews_data["ISBN"][reviews_index]
review_book_title = reviews_data["Book Title"][reviews_index]
except KeyError:
break
if review_book_title == title or review_isbn == isbn:
user_id = reviews_data["User ID"][reviews_index]
user_name = reviews_data["User Name"][reviews_index]
user_url = reviews_data["User URL"][reviews_index]
review_date = reviews_data["Review Date"][reviews_index]
review_text = reviews_data["Review"][reviews_index]
user_rating = reviews_data["Rating"][reviews_index]
if type(review_text) is float:
review_text = ""
review_text = give_clean_words_list(review_text)
out_user_review.writerow([book_id, title, isbn, user_id, user_name, user_url, user_rating,
review_date, review_text])
no_comments += 1
else:
reviews_index -= 1
break
print("Comments = " + str(no_comments))
# Genre Classifier
tokens = good_reads_desc + wiki_desc + read_geek_desc + riffle_desc + amazon_desc + author.split(" ")
desc_tokens = sc.parallelize(tokens)
words = desc_tokens.map(lambda w: (w, 1))
wordcount = words.reduceByKey(operator.add)
total_word_count = wordcount.map(lambda x: x[1]).sum()
words_with_count_percents = wordcount.map(lambda line: (line[0], line[1], (line[1] / total_word_count)))
df = sqlContext.createDataFrame(words_with_count_percents, schema=schema).cache()
df.createOrReplaceTempView('description')
genre_scores = []
for genre in genre_list:
if genre == "young-adult":
genre = 'YA'
elif genre == "non-fiction":
genre = "NF"
elif genre == "science fiction":
genre = "SF"
elif genre == "self-help":
genre = "SH"
common_words = sqlContext.sql('''
select description.word as Word, description.Count as desc_count,''' +
genre + '''_words.Count as genre_count, description.Percent as desc_percent,
''' + genre + '''_words.Percent as genre_percent from description join
''' + genre + '''_words on description.Word = ''' + genre + '''_words.Word
''')
common_words.createOrReplaceTempView('common_words')
word_score = sqlContext.sql('''
select desc_percent * genre_percent as score from common_words
''')
word_score.createOrReplaceTempView('word_score')
genre_score = sqlContext.sql('''
select sum(score) as Genre_score from word_score
''')
genre_scores.append(genre_score.select('Genre_score').head(1)[0][0])
try:
genre_scores = np.array(genre_scores)
index_scores = np.argsort(genre_scores)
genre_scores = np.sort(genre_scores)
except TypeError:
for genre in genre_list:
output_result.append(0)
out.writerow(output_result)
continue
base = genre_scores[-4]
percent_changes = []
for i in range(0, 3):
curr = genre_scores[no_of_genres - 1 - i]
percent_changes.append(((curr - base) / base) * 100)
percent_changes = np.array(percent_changes)
base_percent = (100 - percent_changes.sum()) / 4
genre_dict = dict()
genre_dict = defaultdict(lambda: 0, genre_dict)
for i in range(0, 3):
genre_index = index_scores[no_of_genres - 1 - i]
genre_name = genre_list[genre_index]
genre_value = percent_changes[i] + base_percent
print(genre_name + " " + str(genre_value) + "%")
genre_dict[genre_name] = genre_value
genre_dict[genre_list[index_scores[-4]]] = base_percent
print(genre_list[index_scores[-4]] + " " + str(base_percent) + "%\n")
for genre in genre_list:
output_result.append(genre_dict[genre])
out.writerow(output_result) |
elif genre == "non-fiction":
genre = "NF" | random_line_split |
create_load_files.py | import pandas as pd
import calendar
import re
import csv
import codecs
from src.clean_text import give_clean_words_list
import pyspark.sql.types as types
from pyspark.sql import SQLContext
from pyspark import SparkConf, SparkContext
import numpy as np
import operator
from collections import defaultdict
def | (line):
contents = line.split()
return str(contents[0]), int(contents[1]), float(contents[2])
conf = SparkConf().setAppName('word count')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
schema = types.StructType([types.StructField('Word', types.StringType(), False),
types.StructField('Count', types.IntegerType(), False),
types.StructField('Percent', types.FloatType(), False)])
genre_list = ["crime", "fantasy", "young-adult", "romance", "comedy", "dystopia",
"action", "historical", "non-fiction", "science fiction", "self-help"]
no_of_genres = len(genre_list)
for genre in genre_list:
crime_words = sc.textFile('../../data/word_counts/' + genre + '_counts')
if genre == "young-adult":
genre = 'YA'
elif genre == "non-fiction":
genre = "NF"
elif genre == "science fiction":
genre = "SF"
elif genre == "self-help":
genre = "SH"
genre_bag_words = crime_words.map(bags_format)
genre_word_df = sqlContext.createDataFrame(genre_bag_words, schema=schema).cache()
genre_word_df.createOrReplaceTempView(genre + '_words')
# Input Files
books_data = pd.read_csv("../../data/batch_1/books.csv")
img_price_data = pd.read_csv("../../data/batch_1/image_and_price.csv")
desc_data = pd.read_csv("../../data/batch_1/description_all.csv")
amazon_data = pd.read_csv("../../data/batch_1/amazon.csv")
reviews_data = pd.read_csv("../../data/batch_1/reviews_users.csv")
# Created Files
out = csv.writer(codecs.open("../../data/batch_1/final_1.csv", "w", "utf-8"), delimiter=",", quoting=csv.QUOTE_ALL)
out.writerow(["ID", "Book Title", "ISBN", "Rating", "Author", "Language", "Pages", "Publication", "Publish Date",
"Publish Month", "Publish Year", "Genres", "Image", "Google Play", "Google Play URL", "Barnes and Noble",
"Barnes and Noble URL", "Indie Bound", "Indie Bound URL",
"Amazon", "Amazon URL", "R1", "R1 URL", "R2", "R2 URL", "R3", "R3 URL", "R4", "R4 URL", "R5", "R5 URL",
"GoodReads_Description", "Wiki_Description", "Readgeek_Description", "Riffle_Description",
"Amazon_Description", "crime", "fantasy", "young-adult", "romance", "comedy", "dystopia", "action",
"historical", "non-fiction", "science fiction", "self-help"])
out_user_review = csv.writer(codecs.open("../../data/batch_1/final_2.csv", "w", "utf-8"), delimiter=",",
quoting=csv.QUOTE_ALL)
out_user_review.writerow(["ID", "Book Title", "ISBN", "User ID", "User Name", "User URL", "Rating",
"Review Data", "Review"])
book_id = 100000 - 1
reviews_index = -1
month_dict = {v: k for k, v in enumerate(calendar.month_abbr)}
for index, row in books_data.iterrows():
book_id += 1
title = row["Book Title"]
print(str(index) + " " + title)
# Books CSV data
isbn = row["ISBN"]
rating = row["Rating"]
author = row["Author"]
if type(author) is float:
author = ""
language = row["Language"]
if type(language) is float:
language = ""
pages = row["Pages"]
if pages == "[]":
pages = -1
publication = row["Publication"]
if type(publication) is float:
publication = ""
url = row["Book URL"]
if type(url) is float:
url = ""
date_str = row["Publish Date"]
if type(date_str) is float:
pub_date = -1
pub_month = -1
pub_year = -1
else:
date_str_list = date_str.split(" ")
if len(date_str_list) == 3:
pub_month = int(month_dict[date_str_list[0][0:3]])
pub_date = int(re.findall(r'\d+', date_str_list[1])[0])
pub_year = int(date_str_list[2])
elif len(date_str_list) == 2:
pub_month = int(month_dict[date_str_list[0][0:3]])
pub_year = int(date_str_list[1])
elif len(date_str_list) == 1:
pub_date = -1
pub_month = -1
pub_year = int(date_str_list[0])
genres = row["Genres"]
if type(genres) is float:
genres = ""
# Image Price data
img_url = img_price_data["Book Image"][index]
if type(img_url) is float:
img_url = ""
gp_price = img_price_data["Google Play"][index]
if type(gp_price) is float or "http" in gp_price:
gp_price = ""
gp_url = img_price_data["Google Play URL"][index]
if type(gp_url) is float:
gp_url = ""
bnb_price = img_price_data["Barnes and Noble"][index]
if type(bnb_price) is float or "http" in bnb_price:
bnb_price = ""
bnb_url = img_price_data["Barnes and Noble URL"][index]
if type(bnb_url) is float:
bnb_url = ""
indie_price = img_price_data["Indie Bound"][index]
if type(indie_price) is float or "http" in indie_price:
indie_price = ""
indie_url = img_price_data["Indie Bound URL"][index]
if type(indie_url) is float:
indie_url = ""
# Amazon Data
amazon_price = amazon_data["Amazon Price"][index]
if type(amazon_price) is float or "http" in amazon_price:
amazon_price = ""
r1 = amazon_data["R1 Title"][index]
if type(r1) is float:
r1 = ""
r1_url = amazon_data["R1 URL"][index]
if type(r1_url) is float:
r1_url = ""
r2 = amazon_data["R2 Title"][index]
if type(r2) is float:
r2 = ""
r2_url = amazon_data["R2 URL"][index]
if type(r2_url) is float:
r2_url = ""
r3 = amazon_data["R3 Title"][index]
if type(r3) is float:
r3 = ""
r3_url = amazon_data["R3 URL"][index]
if type(r3_url) is float:
r3_url = ""
r4 = amazon_data["R4 Title"][index]
if type(r4) is float:
r4 = ""
r4_url = amazon_data["R4 URL"][index]
if type(r4_url) is float:
r4_url = ""
r5 = amazon_data["R5 Title"][index]
if type(r5) is float:
r5 = ""
r5_url = amazon_data["R5 URL"][index]
if type(r5_url) is float:
r5_url = ""
amazon_url = amazon_data["Book URL"][index]
if type(amazon_url) is float:
amazon_url = ""
# Description data
good_reads_desc = desc_data["GoodReads Description"][index]
if type(good_reads_desc) is float:
good_reads_desc = ""
good_reads_desc = give_clean_words_list(good_reads_desc)
wiki_desc = desc_data["Wikipedia Description"][index]
if type(wiki_desc) is float:
wiki_desc = ""
wiki_desc = give_clean_words_list(wiki_desc)
read_geek_desc = desc_data["Readgeek Description"][index]
if type(read_geek_desc) is float:
read_geek_desc = ""
read_geek_desc = give_clean_words_list(read_geek_desc)
riffle_desc = desc_data["Riffle Description"][index]
if type(riffle_desc) is float:
riffle_desc = ""
riffle_desc = give_clean_words_list(riffle_desc)
amazon_desc = desc_data["Amazon Description"][index]
if type(amazon_desc) is float:
amazon_desc = ""
amazon_desc = give_clean_words_list(amazon_desc)
output_result = [book_id, title, isbn, rating, author, language, pages, publication, pub_date, pub_month, pub_year,
genres, img_url, gp_price, gp_url, bnb_price, bnb_url, indie_price, indie_url, amazon_price,
amazon_url, r1, r1_url, r2, r2_url, r3, r3_url, r4, r4_url, r5, r5_url, good_reads_desc, wiki_desc,
read_geek_desc, riffle_desc, amazon_desc]
# Reviews File
no_comments = 0
while True:
reviews_index += 1
try:
review_isbn = reviews_data["ISBN"][reviews_index]
review_book_title = reviews_data["Book Title"][reviews_index]
except KeyError:
break
if review_book_title == title or review_isbn == isbn:
user_id = reviews_data["User ID"][reviews_index]
user_name = reviews_data["User Name"][reviews_index]
user_url = reviews_data["User URL"][reviews_index]
review_date = reviews_data["Review Date"][reviews_index]
review_text = reviews_data["Review"][reviews_index]
user_rating = reviews_data["Rating"][reviews_index]
if type(review_text) is float:
review_text = ""
review_text = give_clean_words_list(review_text)
out_user_review.writerow([book_id, title, isbn, user_id, user_name, user_url, user_rating,
review_date, review_text])
no_comments += 1
else:
reviews_index -= 1
break
print("Comments = " + str(no_comments))
# Genre Classifier
tokens = good_reads_desc + wiki_desc + read_geek_desc + riffle_desc + amazon_desc + author.split(" ")
desc_tokens = sc.parallelize(tokens)
words = desc_tokens.map(lambda w: (w, 1))
wordcount = words.reduceByKey(operator.add)
total_word_count = wordcount.map(lambda x: x[1]).sum()
words_with_count_percents = wordcount.map(lambda line: (line[0], line[1], (line[1] / total_word_count)))
df = sqlContext.createDataFrame(words_with_count_percents, schema=schema).cache()
df.createOrReplaceTempView('description')
genre_scores = []
for genre in genre_list:
if genre == "young-adult":
genre = 'YA'
elif genre == "non-fiction":
genre = "NF"
elif genre == "science fiction":
genre = "SF"
elif genre == "self-help":
genre = "SH"
common_words = sqlContext.sql('''
select description.word as Word, description.Count as desc_count,''' +
genre + '''_words.Count as genre_count, description.Percent as desc_percent,
''' + genre + '''_words.Percent as genre_percent from description join
''' + genre + '''_words on description.Word = ''' + genre + '''_words.Word
''')
common_words.createOrReplaceTempView('common_words')
word_score = sqlContext.sql('''
select desc_percent * genre_percent as score from common_words
''')
word_score.createOrReplaceTempView('word_score')
genre_score = sqlContext.sql('''
select sum(score) as Genre_score from word_score
''')
genre_scores.append(genre_score.select('Genre_score').head(1)[0][0])
try:
genre_scores = np.array(genre_scores)
index_scores = np.argsort(genre_scores)
genre_scores = np.sort(genre_scores)
except TypeError:
for genre in genre_list:
output_result.append(0)
out.writerow(output_result)
continue
base = genre_scores[-4]
percent_changes = []
for i in range(0, 3):
curr = genre_scores[no_of_genres - 1 - i]
percent_changes.append(((curr - base) / base) * 100)
percent_changes = np.array(percent_changes)
base_percent = (100 - percent_changes.sum()) / 4
genre_dict = dict()
genre_dict = defaultdict(lambda: 0, genre_dict)
for i in range(0, 3):
genre_index = index_scores[no_of_genres - 1 - i]
genre_name = genre_list[genre_index]
genre_value = percent_changes[i] + base_percent
print(genre_name + " " + str(genre_value) + "%")
genre_dict[genre_name] = genre_value
genre_dict[genre_list[index_scores[-4]]] = base_percent
print(genre_list[index_scores[-4]] + " " + str(base_percent) + "%\n")
for genre in genre_list:
output_result.append(genre_dict[genre])
out.writerow(output_result)
| bags_format | identifier_name |
zigzag_graph.rs | use std::collections::HashMap;
use std::marker::PhantomData;
use std::sync::{Arc, RwLock};
use crate::crypto::feistel::{self, FeistelPrecomputed};
use crate::drgraph::{BucketGraph, Graph, BASE_DEGREE};
use crate::hasher::Hasher;
use crate::layered_drgporep::Layerable;
use crate::parameter_cache::ParameterSetMetadata;
use crate::settings;
/// The expansion degree used for ZigZag Graphs.
pub const EXP_DEGREE: usize = 8;
lazy_static! {
// This parents cache is currently used for the *expanded parents only*, generated
// by the expensive Feistel operations in the ZigZag, it doesn't contain the
// "base" (in the `Graph` terminology) parents, which are cheaper to compute.
// It is indexed by the `Graph.identifier`, to ensure that the right cache is used.
static ref PARENT_CACHE: Arc<RwLock<HashMap<String, ParentCache>>> = Arc::new(RwLock::new(HashMap::new()));
}
// ZigZagGraph will hold two different (but related) `ParentCache`,
// the first one for the `forward` direction and the second one for the `reversed`.
#[derive(Debug, Clone)]
pub struct ParentCache {
forward: Vec<Option<Vec<u32>>>,
reverse: Vec<Option<Vec<u32>>>,
// Keep the size of the cache outside the lock to be easily accessible.
cache_entries: u32,
}
impl ParentCache {
pub fn new(cache_entries: u32) -> Self {
ParentCache {
forward: vec![None; cache_entries as usize],
reverse: vec![None; cache_entries as usize],
cache_entries,
}
}
pub fn contains_forward(&self, node: u32) -> bool {
assert!(node < self.cache_entries);
self.forward[node as usize].is_some()
}
pub fn contains_reverse(&self, node: u32) -> bool {
assert!(node < self.cache_entries);
self.reverse[node as usize].is_some()
}
pub fn read_forward<F, T>(&self, node: u32, mut cb: F) -> T
where
F: FnMut(Option<&Vec<u32>>) -> T,
{
assert!(node < self.cache_entries);
cb(self.forward[node as usize].as_ref())
}
pub fn read_reverse<F, T>(&self, node: u32, mut cb: F) -> T
where
F: FnMut(Option<&Vec<u32>>) -> T,
{
assert!(node < self.cache_entries);
cb(self.reverse[node as usize].as_ref())
}
pub fn write_forward(&mut self, node: u32, parents: Vec<u32>) {
assert!(node < self.cache_entries);
let old_value = std::mem::replace(&mut self.forward[node as usize], Some(parents));
debug_assert_eq!(old_value, None);
// We shouldn't be rewriting entries (with most likely the same values),
// this would be a clear indication of a bug.
}
pub fn write_reverse(&mut self, node: u32, parents: Vec<u32>) {
assert!(node < self.cache_entries);
let old_value = std::mem::replace(&mut self.reverse[node as usize], Some(parents));
debug_assert_eq!(old_value, None);
// We shouldn't be rewriting entries (with most likely the same values),
// this would be a clear indication of a bug.
}
}
#[derive(Debug, Clone)]
pub struct ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + 'static,
{
expansion_degree: usize,
base_graph: G,
pub reversed: bool,
feistel_precomputed: FeistelPrecomputed,
id: String,
use_cache: bool,
_h: PhantomData<H>,
}
pub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>;
impl<'a, H, G> Layerable<H> for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata + 'static,
{
}
impl<H, G> ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
pub fn new(
base_graph: Option<G>,
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self {
if !cfg!(feature = "unchecked-degrees") {
assert_eq!(base_degree, BASE_DEGREE);
assert_eq!(expansion_degree, EXP_DEGREE);
}
let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching;
let base_graph = match base_graph {
Some(graph) => graph,
None => G::new(nodes, base_degree, 0, seed),
};
let bg_id = base_graph.identifier();
let res = ZigZagGraph {
base_graph,
id: format!(
"zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}",
expansion_degree, bg_id,
),
expansion_degree,
use_cache,
reversed: false,
feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index),
_h: PhantomData,
};
if use_cache {
info!("using parents cache of unlimited size",);
assert!(nodes <= std::u32::MAX as usize);
if !PARENT_CACHE.read().unwrap().contains_key(&res.id) {
PARENT_CACHE
.write()
.unwrap()
.insert(res.id.clone(), ParentCache::new(nodes as u32));
}
}
res
}
}
impl<H, G> ParameterSetMetadata for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
fn identifier(&self) -> String {
self.id.clone()
}
fn sector_size(&self) -> u64 {
self.base_graph.sector_size()
}
}
pub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq {
type BaseHasher: Hasher;
type BaseGraph: Graph<Self::BaseHasher>;
/// zigzag returns a new graph with expansion component inverted and a distinct
/// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes).
/// The name is 'weird', but so is the operation -- hence the choice.
fn zigzag(&self) -> Self;
/// Constructs a new graph.
fn base_graph(&self) -> Self::BaseGraph;
fn expansion_degree(&self) -> usize;
fn reversed(&self) -> bool;
fn expanded_parents<F, T>(&self, node: usize, cb: F) -> T
where
F: FnMut(&Vec<u32>) -> T;
fn real_index(&self, i: usize) -> usize;
fn new_zigzag(
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self;
}
impl<Z: ZigZag> Graph<Z::BaseHasher> for Z {
fn size(&self) -> usize {
self.base_graph().size()
}
fn degree(&self) -> usize {
self.base_graph().degree() + self.expansion_degree()
}
#[inline]
fn parents(&self, raw_node: usize, parents: &mut [usize]) {
// If graph is reversed, use real_index to convert index to reversed index.
// So we convert a raw reversed node to an unreversed node, calculate its parents,
// then convert the parents to reversed.
self.base_graph()
.parents(self.real_index(raw_node), parents);
for parent in parents.iter_mut().take(self.base_graph().degree()) {
*parent = self.real_index(*parent);
}
// expanded_parents takes raw_node
self.expanded_parents(raw_node, |expanded_parents| {
for (ii, value) in expanded_parents.iter().enumerate() {
parents[ii + self.base_graph().degree()] = *value as usize
}
// Pad so all nodes have correct degree.
let current_length = self.base_graph().degree() + expanded_parents.len();
for ii in 0..(self.degree() - current_length) {
if self.reversed() {
parents[ii + current_length] = self.size() - 1
} else {
parents[ii + current_length] = 0
}
}
});
assert!(parents.len() == self.degree());
if self.forward() {
parents.sort();
} else {
// Sort in reverse order.
parents.sort_by(|a, b| a.cmp(b).reverse());
}
assert!(parents.iter().all(|p| if self.forward() {
*p <= raw_node
} else {
*p >= raw_node
}));
}
fn seed(&self) -> [u32; 7] {
self.base_graph().seed()
}
fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self {
Z::new_zigzag(nodes, base_degree, expansion_degree, seed)
}
fn forward(&self) -> bool {
!self.reversed()
}
}
impl<'a, H, G> ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
// Assign `expansion_degree` parents to `node` using an invertible function. That
// means we can't just generate random values between `[0, size())`, we need to
// expand the search space (domain) to accommodate every unique parent assignment
// generated here. This can be visualized more clearly as a matrix where the each
// new parent of each new node is assigned a unique `index`:
//
//
// | Parent 1 | Parent 2 | Parent 3 |
//
// | Node 1 | 0 | 1 | 2 |
//
// | Node 2 | 3 | 4 | 5 |
//
// | Node 3 | 6 | 7 | 8 |
//
// | Node 4 | 9 | A | B |
//
// This starting `index` will be shuffled to another position to generate a
// parent-child relationship, e.g., if generating the parents for the second node,
// `permute` would be called with values `[3; 4; 5]` that would be mapped to other
// indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would
// correspond to nodes numbered `[4; 1, 2]` which will become the parents of the
// second node. In a later pass invalid parents like 2, self-referencing, and parents
// with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the
// inverse), will be removed.
//
// Since `permute` is a bijective function which has the inverse `invert_permute`,
// it is guaranteed that when looking for the parents in the `reversed` direction
// the child `node` used earlier will now actually be the parent of the output
// parents generated before (inverting the relationship). Following the example,
// in the reverse direction, when looking for the parents of, say, node 1,
// `invert_permute` (that maps back the output of `permute` to its input) would
// receive the indexes `[0; 1; 2]`, where the index `0` is guaranteed to map back
// to the index `4` that generated it earlier, corresponding to the node 2, inverting
// in fact the child-parent relationship.
fn correspondent(&self, node: usize, i: usize) -> usize {
let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index;
let feistel_keys = &[1, 2, 3, 4];
let transformed = if self.reversed {
feistel::invert_permute(
self.size() as feistel::Index * self.expansion_degree as feistel::Index,
a,
feistel_keys,
self.feistel_precomputed,
)
} else {
feistel::permute(
self.size() as feistel::Index * self.expansion_degree as feistel::Index,
a,
feistel_keys,
self.feistel_precomputed,
)
};
transformed as usize / self.expansion_degree
// Collapse the output in the matrix search space to the row of the corresponding
// node (losing the column information, that will be regenerated later when calling
// back this function in the `reversed` direction).
}
// Read the `node` entry in the parents cache (which may not exist) for
// the current direction set in the graph and return a copy of it (or
// `None` to signal a cache miss).
fn contains_parents_cache(&self, node: usize) -> bool {
if self.use_cache {
if let Some(ref cache) = PARENT_CACHE.read().unwrap().get(&self.id) {
if self.forward() {
cache.contains_forward(node as u32)
} else {
cache.contains_reverse(node as u32)
}
} else {
false
}
} else {
false
}
}
fn generate_expanded_parents(&self, node: usize) -> Vec<u32> {
(0..self.expansion_degree)
.filter_map(|i| {
let other = self.correspondent(node, i);
if self.reversed {
if other > node {
Some(other as u32)
} else {
None
}
} else if other < node {
Some(other as u32)
} else {
None
}
})
.collect()
}
}
impl<'a, H, G> ZigZag for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
type BaseHasher = H;
type BaseGraph = G;
fn new_zigzag(
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self {
Self::new(None, nodes, base_degree, expansion_degree, seed)
}
/// To zigzag a graph, we just toggle its reversed field.
/// All the real work happens when we calculate node parents on-demand.
// We always share the two caches (forward/reversed) between
// ZigZag graphs even if each graph will use only one of those
// caches (depending of its direction). This allows to propagate
// the caches across different layers, where consecutive even+odd
// layers have inverse directions.
fn zigzag(&self) -> Self {
let mut zigzag = self.clone();
zigzag.reversed = !zigzag.reversed;
zigzag
}
fn base_graph(&self) -> Self::BaseGraph {
self.base_graph.clone()
}
fn expansion_degree(&self) -> usize {
self.expansion_degree
}
fn reversed(&self) -> bool {
self.reversed
}
// TODO: Optimization: Evaluate providing an `all_parents` (and hence
// `all_expanded_parents`) method that would return the entire cache
// in a single lock operation, or at least (if the cache is not big enough)
// it would allow to batch parents calculations with that single lock. Also,
// since there is a reciprocity between forward and reversed parents,
// we would only need to compute the parents in one direction and with
// that fill both caches.
#[inline]
fn expanded_parents<F, T>(&self, node: usize, mut cb: F) -> T
where
F: FnMut(&Vec<u32>) -> T,
{
if !self.use_cache {
// No cache usage, generate on demand.
return cb(&self.generate_expanded_parents(node));
}
// Check if we need to fill the cache.
if !self.contains_parents_cache(node) {
// Cache is empty so we need to generate the parents.
let parents = self.generate_expanded_parents(node);
// Store the newly generated cached value.
let mut cache_lock = PARENT_CACHE.write().unwrap();
let cache = cache_lock
.get_mut(&self.id)
.expect("Invalid cache construction");
if self.forward() {
cache.write_forward(node as u32, parents);
} else {
cache.write_reverse(node as u32, parents);
}
}
// We made sure the cache is filled above, now we can return the value.
let cache_lock = PARENT_CACHE.read().unwrap();
let cache = cache_lock
.get(&self.id)
.expect("Invalid cache construction");
if self.forward() {
cache.read_forward(node as u32, |parents| cb(parents.unwrap()))
} else |
}
#[inline]
fn real_index(&self, i: usize) -> usize {
if self.reversed {
(self.size() - 1) - i
} else {
i
}
}
}
impl<H, G> PartialEq for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H>,
{
fn eq(&self, other: &ZigZagGraph<H, G>) -> bool {
self.base_graph == other.base_graph
&& self.expansion_degree == other.expansion_degree
&& self.reversed == other.reversed
}
}
impl<H, G> Eq for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H>,
{
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::{HashMap, HashSet};
use crate::drgraph::{new_seed, BASE_DEGREE};
use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};
fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) {
for i in 0..g.size() {
let mut parents = vec![0; g.degree()];
g.parents(i, &mut parents);
for p in parents {
if i == 0 {
assert!(p == i);
} else {
assert!(p < i);
}
}
}
}
fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) {
for i in 0..g.size() {
let mut parents = vec![0; g.degree()];
g.parents(i, &mut parents);
for p in parents {
if i == g.size() - 1 {
assert!(p == i);
} else {
assert!(p > i);
}
}
}
}
#[test]
fn zigzag_graph_zigzags_pedersen() {
test_zigzag_graph_zigzags::<PedersenHasher>();
}
#[test]
fn zigzag_graph_zigzags_sha256() {
test_zigzag_graph_zigzags::<Sha256Hasher>();
}
#[test]
fn zigzag_graph_zigzags_blake2s() {
test_zigzag_graph_zigzags::<Blake2sHasher>();
}
fn test_zigzag_graph_zigzags<H: 'static + Hasher>() {
let g = ZigZagBucketGraph::<H>::new_zigzag(50, BASE_DEGREE, EXP_DEGREE, new_seed());
let gz = g.zigzag();
assert_graph_ascending(g);
assert_graph_descending(gz);
}
#[test]
fn expansion_pedersen() {
test_expansion::<PedersenHasher>();
}
#[test]
fn expansion_sha256() {
test_expansion::<Sha256Hasher>();
}
#[test]
fn expansion_blake2s() {
test_expansion::<Blake2sHasher>();
}
fn test_expansion<H: 'static + Hasher>() {
// We need a graph.
let g = ZigZagBucketGraph::<H>::new_zigzag(25, BASE_DEGREE, EXP_DEGREE, new_seed());
// We're going to fully realize the expansion-graph component, in a HashMap.
let gcache = get_all_expanded_parents(&g);
// Here's the zigzag version of the graph.
let gz = g.zigzag();
// And a HashMap to hold the expanded parents.
let gzcache = get_all_expanded_parents(&gz);
for i in 0..gz.size() {
let parents = gzcache.get(&i).unwrap();
// Check to make sure all (expanded) node-parent relationships also exist in reverse,
// in the original graph's Hashmap.
for p in parents {
assert!(gcache[&(*p as usize)].contains(&(i as u32)));
}
}
// And then do the same check to make sure all (expanded) node-parent relationships from the original
// are present in the zigzag, just reversed.
for i in 0..g.size() {
g.expanded_parents(i, |parents| {
for p in parents.iter() {
assert!(gzcache[&(*p as usize)].contains(&(i as u32)));
}
});
}
// Having checked both ways, we know the graph and its zigzag counterpart have 'expanded' components
// which are each other's inverses. It's important that this be true.
}
fn get_all_expanded_parents<H: 'static + Hasher>(
zigzag_graph: &ZigZagBucketGraph<H>,
) -> HashMap<usize, Vec<u32>> {
let mut parents_map: HashMap<usize, Vec<u32>> = HashMap::new();
for i in 0..zigzag_graph.size() {
parents_map.insert(i, zigzag_graph.expanded_parents(i, |p| p.clone()));
}
parents_map
}
// Test that 3 (or more) rounds of the Feistel cipher can be used
// as a pseudorandom permutation, that is, each input will be mapped
// to a unique output (and though not test here, since the cipher
// is symmetric, the decryption rounds also work as the inverse
// permutation), for more details see:
// https://en.wikipedia.org/wiki/Feistel_cipher#Theoretical_work.
#[test]
fn test_shuffle() {
let n = 2_u64.pow(10);
let d = EXP_DEGREE as u64;
// Use a relatively small value of `n` as Feistel is expensive (but big
// enough that `n >> d`).
let mut shuffled: HashSet<u64> = HashSet::with_capacity((n * d) as usize);
let feistel_keys = &[1, 2, 3, 4];
let feistel_precomputed = feistel::precompute((n * d) as feistel::Index);
for i in 0..n {
for k in 0..d {
let permuted =
feistel::permute(n * d, i * d + k, feistel_keys, feistel_precomputed);
// Since the permutation implies a one-to-one correspondence,
// traversing the entire input space should generate the entire
// output space (in `shuffled`) without repetitions (since a duplicate
// output would imply there is another output that wasn't generated
// and the permutation would be incomplete).
assert!(shuffled.insert(permuted));
}
}
// Actually implied by the previous `assert!` this is left in place as an
// extra safety check that indeed the permutation preserved all the output
// space (of `n * d` nodes) without repetitions (which the `HashSet` would
// have skipped as duplicates).
assert_eq!(shuffled.len(), (n * d) as usize);
}
}
| {
cache.read_reverse(node as u32, |parents| cb(parents.unwrap()))
} | conditional_block |
zigzag_graph.rs | use std::collections::HashMap;
use std::marker::PhantomData;
use std::sync::{Arc, RwLock};
use crate::crypto::feistel::{self, FeistelPrecomputed};
use crate::drgraph::{BucketGraph, Graph, BASE_DEGREE};
use crate::hasher::Hasher;
use crate::layered_drgporep::Layerable;
use crate::parameter_cache::ParameterSetMetadata;
use crate::settings;
/// The expansion degree used for ZigZag Graphs.
pub const EXP_DEGREE: usize = 8;
lazy_static! {
// This parents cache is currently used for the *expanded parents only*, generated
// by the expensive Feistel operations in the ZigZag, it doesn't contain the
// "base" (in the `Graph` terminology) parents, which are cheaper to compute.
// It is indexed by the `Graph.identifier`, to ensure that the right cache is used.
static ref PARENT_CACHE: Arc<RwLock<HashMap<String, ParentCache>>> = Arc::new(RwLock::new(HashMap::new()));
}
// ZigZagGraph will hold two different (but related) `ParentCache`,
// the first one for the `forward` direction and the second one for the `reversed`.
#[derive(Debug, Clone)]
pub struct ParentCache {
forward: Vec<Option<Vec<u32>>>,
reverse: Vec<Option<Vec<u32>>>,
// Keep the size of the cache outside the lock to be easily accessible.
cache_entries: u32,
}
impl ParentCache {
pub fn new(cache_entries: u32) -> Self {
ParentCache {
forward: vec![None; cache_entries as usize],
reverse: vec![None; cache_entries as usize],
cache_entries,
}
}
pub fn contains_forward(&self, node: u32) -> bool {
assert!(node < self.cache_entries);
self.forward[node as usize].is_some()
}
pub fn contains_reverse(&self, node: u32) -> bool {
assert!(node < self.cache_entries);
self.reverse[node as usize].is_some()
}
pub fn read_forward<F, T>(&self, node: u32, mut cb: F) -> T
where
F: FnMut(Option<&Vec<u32>>) -> T,
{
assert!(node < self.cache_entries);
cb(self.forward[node as usize].as_ref())
}
pub fn read_reverse<F, T>(&self, node: u32, mut cb: F) -> T
where
F: FnMut(Option<&Vec<u32>>) -> T,
{
assert!(node < self.cache_entries);
cb(self.reverse[node as usize].as_ref())
}
pub fn write_forward(&mut self, node: u32, parents: Vec<u32>) {
assert!(node < self.cache_entries);
let old_value = std::mem::replace(&mut self.forward[node as usize], Some(parents));
debug_assert_eq!(old_value, None);
// We shouldn't be rewriting entries (with most likely the same values),
// this would be a clear indication of a bug.
}
pub fn write_reverse(&mut self, node: u32, parents: Vec<u32>) {
assert!(node < self.cache_entries);
let old_value = std::mem::replace(&mut self.reverse[node as usize], Some(parents));
debug_assert_eq!(old_value, None);
// We shouldn't be rewriting entries (with most likely the same values),
// this would be a clear indication of a bug.
}
}
#[derive(Debug, Clone)]
pub struct ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + 'static,
{
expansion_degree: usize,
base_graph: G,
pub reversed: bool,
feistel_precomputed: FeistelPrecomputed,
id: String,
use_cache: bool,
_h: PhantomData<H>,
}
pub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>;
impl<'a, H, G> Layerable<H> for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata + 'static,
{
}
impl<H, G> ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
pub fn new(
base_graph: Option<G>,
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self {
if !cfg!(feature = "unchecked-degrees") {
assert_eq!(base_degree, BASE_DEGREE);
assert_eq!(expansion_degree, EXP_DEGREE);
}
let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching;
let base_graph = match base_graph {
Some(graph) => graph,
None => G::new(nodes, base_degree, 0, seed),
};
let bg_id = base_graph.identifier();
let res = ZigZagGraph {
base_graph,
id: format!(
"zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}",
expansion_degree, bg_id,
),
expansion_degree,
use_cache,
reversed: false,
feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index),
_h: PhantomData,
};
if use_cache {
info!("using parents cache of unlimited size",);
assert!(nodes <= std::u32::MAX as usize);
if !PARENT_CACHE.read().unwrap().contains_key(&res.id) {
PARENT_CACHE
.write()
.unwrap()
.insert(res.id.clone(), ParentCache::new(nodes as u32));
}
}
res
}
}
impl<H, G> ParameterSetMetadata for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
fn identifier(&self) -> String {
self.id.clone()
}
fn sector_size(&self) -> u64 {
self.base_graph.sector_size()
}
}
pub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq {
type BaseHasher: Hasher;
type BaseGraph: Graph<Self::BaseHasher>;
/// zigzag returns a new graph with expansion component inverted and a distinct
/// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes).
/// The name is 'weird', but so is the operation -- hence the choice.
fn zigzag(&self) -> Self;
/// Constructs a new graph.
fn base_graph(&self) -> Self::BaseGraph;
fn expansion_degree(&self) -> usize;
fn reversed(&self) -> bool;
fn expanded_parents<F, T>(&self, node: usize, cb: F) -> T
where
F: FnMut(&Vec<u32>) -> T;
fn real_index(&self, i: usize) -> usize;
fn new_zigzag(
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self;
}
impl<Z: ZigZag> Graph<Z::BaseHasher> for Z {
fn size(&self) -> usize {
self.base_graph().size()
}
fn degree(&self) -> usize {
self.base_graph().degree() + self.expansion_degree()
}
#[inline]
fn parents(&self, raw_node: usize, parents: &mut [usize]) {
// If graph is reversed, use real_index to convert index to reversed index.
// So we convert a raw reversed node to an unreversed node, calculate its parents,
// then convert the parents to reversed.
self.base_graph()
.parents(self.real_index(raw_node), parents);
for parent in parents.iter_mut().take(self.base_graph().degree()) {
*parent = self.real_index(*parent);
}
// expanded_parents takes raw_node
self.expanded_parents(raw_node, |expanded_parents| {
for (ii, value) in expanded_parents.iter().enumerate() {
parents[ii + self.base_graph().degree()] = *value as usize
}
// Pad so all nodes have correct degree.
let current_length = self.base_graph().degree() + expanded_parents.len();
for ii in 0..(self.degree() - current_length) {
if self.reversed() {
parents[ii + current_length] = self.size() - 1
} else {
parents[ii + current_length] = 0
}
}
});
assert!(parents.len() == self.degree());
if self.forward() {
parents.sort();
} else {
// Sort in reverse order.
parents.sort_by(|a, b| a.cmp(b).reverse());
}
assert!(parents.iter().all(|p| if self.forward() {
*p <= raw_node
} else {
*p >= raw_node
}));
}
fn seed(&self) -> [u32; 7] {
self.base_graph().seed()
}
fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self {
Z::new_zigzag(nodes, base_degree, expansion_degree, seed)
}
fn forward(&self) -> bool {
!self.reversed()
}
}
impl<'a, H, G> ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
// Assign `expansion_degree` parents to `node` using an invertible function. That
// means we can't just generate random values between `[0, size())`, we need to
// expand the search space (domain) to accommodate every unique parent assignment
// generated here. This can be visualized more clearly as a matrix where the each
// new parent of each new node is assigned a unique `index`:
//
//
// | Parent 1 | Parent 2 | Parent 3 |
//
// | Node 1 | 0 | 1 | 2 |
//
// | Node 2 | 3 | 4 | 5 |
//
// | Node 3 | 6 | 7 | 8 |
//
// | Node 4 | 9 | A | B |
//
// This starting `index` will be shuffled to another position to generate a
// parent-child relationship, e.g., if generating the parents for the second node,
// `permute` would be called with values `[3; 4; 5]` that would be mapped to other
// indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would
// correspond to nodes numbered `[4; 1, 2]` which will become the parents of the
// second node. In a later pass invalid parents like 2, self-referencing, and parents
// with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the
// inverse), will be removed.
//
// Since `permute` is a bijective function which has the inverse `invert_permute`,
// it is guaranteed that when looking for the parents in the `reversed` direction
// the child `node` used earlier will now actually be the parent of the output
// parents generated before (inverting the relationship). Following the example,
// in the reverse direction, when looking for the parents of, say, node 1,
// `invert_permute` (that maps back the output of `permute` to its input) would
// receive the indexes `[0; 1; 2]`, where the index `0` is guaranteed to map back
// to the index `4` that generated it earlier, corresponding to the node 2, inverting
// in fact the child-parent relationship.
fn correspondent(&self, node: usize, i: usize) -> usize {
let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index;
let feistel_keys = &[1, 2, 3, 4];
let transformed = if self.reversed {
feistel::invert_permute(
self.size() as feistel::Index * self.expansion_degree as feistel::Index,
a,
feistel_keys,
self.feistel_precomputed,
)
} else {
feistel::permute(
self.size() as feistel::Index * self.expansion_degree as feistel::Index,
a,
feistel_keys,
self.feistel_precomputed,
)
};
transformed as usize / self.expansion_degree
// Collapse the output in the matrix search space to the row of the corresponding
// node (losing the column information, that will be regenerated later when calling
// back this function in the `reversed` direction).
}
// Read the `node` entry in the parents cache (which may not exist) for
// the current direction set in the graph and return a copy of it (or
// `None` to signal a cache miss).
fn contains_parents_cache(&self, node: usize) -> bool {
if self.use_cache {
if let Some(ref cache) = PARENT_CACHE.read().unwrap().get(&self.id) {
if self.forward() {
cache.contains_forward(node as u32)
} else {
cache.contains_reverse(node as u32)
}
} else {
false
}
} else {
false
}
}
fn generate_expanded_parents(&self, node: usize) -> Vec<u32> {
(0..self.expansion_degree)
.filter_map(|i| {
let other = self.correspondent(node, i);
if self.reversed {
if other > node {
Some(other as u32)
} else {
None
}
} else if other < node {
Some(other as u32)
} else {
None
}
})
.collect()
}
}
impl<'a, H, G> ZigZag for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
type BaseHasher = H;
type BaseGraph = G;
fn new_zigzag(
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self {
Self::new(None, nodes, base_degree, expansion_degree, seed)
}
/// To zigzag a graph, we just toggle its reversed field.
/// All the real work happens when we calculate node parents on-demand.
// We always share the two caches (forward/reversed) between
// ZigZag graphs even if each graph will use only one of those
// caches (depending of its direction). This allows to propagate
// the caches across different layers, where consecutive even+odd
// layers have inverse directions.
fn zigzag(&self) -> Self {
let mut zigzag = self.clone();
zigzag.reversed = !zigzag.reversed;
zigzag
}
fn base_graph(&self) -> Self::BaseGraph {
self.base_graph.clone()
}
fn | (&self) -> usize {
self.expansion_degree
}
fn reversed(&self) -> bool {
self.reversed
}
// TODO: Optimization: Evaluate providing an `all_parents` (and hence
// `all_expanded_parents`) method that would return the entire cache
// in a single lock operation, or at least (if the cache is not big enough)
// it would allow to batch parents calculations with that single lock. Also,
// since there is a reciprocity between forward and reversed parents,
// we would only need to compute the parents in one direction and with
// that fill both caches.
#[inline]
fn expanded_parents<F, T>(&self, node: usize, mut cb: F) -> T
where
F: FnMut(&Vec<u32>) -> T,
{
if !self.use_cache {
// No cache usage, generate on demand.
return cb(&self.generate_expanded_parents(node));
}
// Check if we need to fill the cache.
if !self.contains_parents_cache(node) {
// Cache is empty so we need to generate the parents.
let parents = self.generate_expanded_parents(node);
// Store the newly generated cached value.
let mut cache_lock = PARENT_CACHE.write().unwrap();
let cache = cache_lock
.get_mut(&self.id)
.expect("Invalid cache construction");
if self.forward() {
cache.write_forward(node as u32, parents);
} else {
cache.write_reverse(node as u32, parents);
}
}
// We made sure the cache is filled above, now we can return the value.
let cache_lock = PARENT_CACHE.read().unwrap();
let cache = cache_lock
.get(&self.id)
.expect("Invalid cache construction");
if self.forward() {
cache.read_forward(node as u32, |parents| cb(parents.unwrap()))
} else {
cache.read_reverse(node as u32, |parents| cb(parents.unwrap()))
}
}
#[inline]
fn real_index(&self, i: usize) -> usize {
if self.reversed {
(self.size() - 1) - i
} else {
i
}
}
}
impl<H, G> PartialEq for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H>,
{
fn eq(&self, other: &ZigZagGraph<H, G>) -> bool {
self.base_graph == other.base_graph
&& self.expansion_degree == other.expansion_degree
&& self.reversed == other.reversed
}
}
impl<H, G> Eq for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H>,
{
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::{HashMap, HashSet};
use crate::drgraph::{new_seed, BASE_DEGREE};
use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};
fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) {
for i in 0..g.size() {
let mut parents = vec![0; g.degree()];
g.parents(i, &mut parents);
for p in parents {
if i == 0 {
assert!(p == i);
} else {
assert!(p < i);
}
}
}
}
fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) {
for i in 0..g.size() {
let mut parents = vec![0; g.degree()];
g.parents(i, &mut parents);
for p in parents {
if i == g.size() - 1 {
assert!(p == i);
} else {
assert!(p > i);
}
}
}
}
#[test]
fn zigzag_graph_zigzags_pedersen() {
test_zigzag_graph_zigzags::<PedersenHasher>();
}
#[test]
fn zigzag_graph_zigzags_sha256() {
test_zigzag_graph_zigzags::<Sha256Hasher>();
}
#[test]
fn zigzag_graph_zigzags_blake2s() {
test_zigzag_graph_zigzags::<Blake2sHasher>();
}
fn test_zigzag_graph_zigzags<H: 'static + Hasher>() {
let g = ZigZagBucketGraph::<H>::new_zigzag(50, BASE_DEGREE, EXP_DEGREE, new_seed());
let gz = g.zigzag();
assert_graph_ascending(g);
assert_graph_descending(gz);
}
#[test]
fn expansion_pedersen() {
test_expansion::<PedersenHasher>();
}
#[test]
fn expansion_sha256() {
test_expansion::<Sha256Hasher>();
}
#[test]
fn expansion_blake2s() {
test_expansion::<Blake2sHasher>();
}
fn test_expansion<H: 'static + Hasher>() {
// We need a graph.
let g = ZigZagBucketGraph::<H>::new_zigzag(25, BASE_DEGREE, EXP_DEGREE, new_seed());
// We're going to fully realize the expansion-graph component, in a HashMap.
let gcache = get_all_expanded_parents(&g);
// Here's the zigzag version of the graph.
let gz = g.zigzag();
// And a HashMap to hold the expanded parents.
let gzcache = get_all_expanded_parents(&gz);
for i in 0..gz.size() {
let parents = gzcache.get(&i).unwrap();
// Check to make sure all (expanded) node-parent relationships also exist in reverse,
// in the original graph's Hashmap.
for p in parents {
assert!(gcache[&(*p as usize)].contains(&(i as u32)));
}
}
// And then do the same check to make sure all (expanded) node-parent relationships from the original
// are present in the zigzag, just reversed.
for i in 0..g.size() {
g.expanded_parents(i, |parents| {
for p in parents.iter() {
assert!(gzcache[&(*p as usize)].contains(&(i as u32)));
}
});
}
// Having checked both ways, we know the graph and its zigzag counterpart have 'expanded' components
// which are each other's inverses. It's important that this be true.
}
fn get_all_expanded_parents<H: 'static + Hasher>(
zigzag_graph: &ZigZagBucketGraph<H>,
) -> HashMap<usize, Vec<u32>> {
let mut parents_map: HashMap<usize, Vec<u32>> = HashMap::new();
for i in 0..zigzag_graph.size() {
parents_map.insert(i, zigzag_graph.expanded_parents(i, |p| p.clone()));
}
parents_map
}
// Test that 3 (or more) rounds of the Feistel cipher can be used
// as a pseudorandom permutation, that is, each input will be mapped
// to a unique output (and though not test here, since the cipher
// is symmetric, the decryption rounds also work as the inverse
// permutation), for more details see:
// https://en.wikipedia.org/wiki/Feistel_cipher#Theoretical_work.
#[test]
fn test_shuffle() {
let n = 2_u64.pow(10);
let d = EXP_DEGREE as u64;
// Use a relatively small value of `n` as Feistel is expensive (but big
// enough that `n >> d`).
let mut shuffled: HashSet<u64> = HashSet::with_capacity((n * d) as usize);
let feistel_keys = &[1, 2, 3, 4];
let feistel_precomputed = feistel::precompute((n * d) as feistel::Index);
for i in 0..n {
for k in 0..d {
let permuted =
feistel::permute(n * d, i * d + k, feistel_keys, feistel_precomputed);
// Since the permutation implies a one-to-one correspondence,
// traversing the entire input space should generate the entire
// output space (in `shuffled`) without repetitions (since a duplicate
// output would imply there is another output that wasn't generated
// and the permutation would be incomplete).
assert!(shuffled.insert(permuted));
}
}
// Actually implied by the previous `assert!` this is left in place as an
// extra safety check that indeed the permutation preserved all the output
// space (of `n * d` nodes) without repetitions (which the `HashSet` would
// have skipped as duplicates).
assert_eq!(shuffled.len(), (n * d) as usize);
}
}
| expansion_degree | identifier_name |
zigzag_graph.rs | use std::collections::HashMap;
use std::marker::PhantomData;
use std::sync::{Arc, RwLock};
use crate::crypto::feistel::{self, FeistelPrecomputed};
use crate::drgraph::{BucketGraph, Graph, BASE_DEGREE};
use crate::hasher::Hasher;
use crate::layered_drgporep::Layerable;
use crate::parameter_cache::ParameterSetMetadata;
use crate::settings;
/// The expansion degree used for ZigZag Graphs.
pub const EXP_DEGREE: usize = 8;
lazy_static! {
// This parents cache is currently used for the *expanded parents only*, generated
// by the expensive Feistel operations in the ZigZag, it doesn't contain the
// "base" (in the `Graph` terminology) parents, which are cheaper to compute.
// It is indexed by the `Graph.identifier`, to ensure that the right cache is used.
static ref PARENT_CACHE: Arc<RwLock<HashMap<String, ParentCache>>> = Arc::new(RwLock::new(HashMap::new()));
}
// ZigZagGraph will hold two different (but related) `ParentCache`,
// the first one for the `forward` direction and the second one for the `reversed`.
#[derive(Debug, Clone)]
pub struct ParentCache {
forward: Vec<Option<Vec<u32>>>,
reverse: Vec<Option<Vec<u32>>>,
// Keep the size of the cache outside the lock to be easily accessible.
cache_entries: u32,
}
impl ParentCache {
pub fn new(cache_entries: u32) -> Self {
ParentCache {
forward: vec![None; cache_entries as usize],
reverse: vec![None; cache_entries as usize],
cache_entries,
}
}
pub fn contains_forward(&self, node: u32) -> bool {
assert!(node < self.cache_entries);
self.forward[node as usize].is_some()
}
pub fn contains_reverse(&self, node: u32) -> bool {
assert!(node < self.cache_entries);
self.reverse[node as usize].is_some()
}
pub fn read_forward<F, T>(&self, node: u32, mut cb: F) -> T
where
F: FnMut(Option<&Vec<u32>>) -> T,
{
assert!(node < self.cache_entries);
cb(self.forward[node as usize].as_ref())
}
pub fn read_reverse<F, T>(&self, node: u32, mut cb: F) -> T
where
F: FnMut(Option<&Vec<u32>>) -> T,
{
assert!(node < self.cache_entries);
cb(self.reverse[node as usize].as_ref())
}
pub fn write_forward(&mut self, node: u32, parents: Vec<u32>) {
assert!(node < self.cache_entries);
let old_value = std::mem::replace(&mut self.forward[node as usize], Some(parents));
debug_assert_eq!(old_value, None);
// We shouldn't be rewriting entries (with most likely the same values),
// this would be a clear indication of a bug.
}
pub fn write_reverse(&mut self, node: u32, parents: Vec<u32>) {
assert!(node < self.cache_entries);
let old_value = std::mem::replace(&mut self.reverse[node as usize], Some(parents));
debug_assert_eq!(old_value, None);
// We shouldn't be rewriting entries (with most likely the same values),
// this would be a clear indication of a bug.
}
}
#[derive(Debug, Clone)]
pub struct ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + 'static,
{
expansion_degree: usize,
base_graph: G,
pub reversed: bool,
feistel_precomputed: FeistelPrecomputed,
id: String,
use_cache: bool,
_h: PhantomData<H>,
}
pub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>;
impl<'a, H, G> Layerable<H> for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata + 'static,
{
}
impl<H, G> ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
pub fn new(
base_graph: Option<G>,
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self {
if !cfg!(feature = "unchecked-degrees") {
assert_eq!(base_degree, BASE_DEGREE);
assert_eq!(expansion_degree, EXP_DEGREE);
}
let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching;
let base_graph = match base_graph {
Some(graph) => graph,
None => G::new(nodes, base_degree, 0, seed),
};
let bg_id = base_graph.identifier();
let res = ZigZagGraph {
base_graph,
id: format!(
"zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}",
expansion_degree, bg_id,
),
expansion_degree,
use_cache,
reversed: false,
feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index),
_h: PhantomData,
};
if use_cache {
info!("using parents cache of unlimited size",);
assert!(nodes <= std::u32::MAX as usize);
if !PARENT_CACHE.read().unwrap().contains_key(&res.id) {
PARENT_CACHE
.write()
.unwrap()
.insert(res.id.clone(), ParentCache::new(nodes as u32));
}
}
res
}
}
impl<H, G> ParameterSetMetadata for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
fn identifier(&self) -> String {
self.id.clone()
}
fn sector_size(&self) -> u64 {
self.base_graph.sector_size()
}
}
pub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq {
type BaseHasher: Hasher;
type BaseGraph: Graph<Self::BaseHasher>;
/// zigzag returns a new graph with expansion component inverted and a distinct
/// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes).
/// The name is 'weird', but so is the operation -- hence the choice.
fn zigzag(&self) -> Self;
/// Constructs a new graph.
fn base_graph(&self) -> Self::BaseGraph;
fn expansion_degree(&self) -> usize;
fn reversed(&self) -> bool;
fn expanded_parents<F, T>(&self, node: usize, cb: F) -> T
where
F: FnMut(&Vec<u32>) -> T;
fn real_index(&self, i: usize) -> usize;
fn new_zigzag(
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self;
}
impl<Z: ZigZag> Graph<Z::BaseHasher> for Z {
fn size(&self) -> usize {
self.base_graph().size()
}
fn degree(&self) -> usize {
self.base_graph().degree() + self.expansion_degree()
}
#[inline]
fn parents(&self, raw_node: usize, parents: &mut [usize]) {
// If graph is reversed, use real_index to convert index to reversed index.
// So we convert a raw reversed node to an unreversed node, calculate its parents,
// then convert the parents to reversed.
self.base_graph()
.parents(self.real_index(raw_node), parents);
for parent in parents.iter_mut().take(self.base_graph().degree()) {
*parent = self.real_index(*parent);
}
// expanded_parents takes raw_node
self.expanded_parents(raw_node, |expanded_parents| {
for (ii, value) in expanded_parents.iter().enumerate() {
parents[ii + self.base_graph().degree()] = *value as usize
}
// Pad so all nodes have correct degree.
let current_length = self.base_graph().degree() + expanded_parents.len();
for ii in 0..(self.degree() - current_length) {
if self.reversed() {
parents[ii + current_length] = self.size() - 1
} else {
parents[ii + current_length] = 0
}
}
});
assert!(parents.len() == self.degree());
if self.forward() {
parents.sort();
} else {
// Sort in reverse order.
parents.sort_by(|a, b| a.cmp(b).reverse());
}
assert!(parents.iter().all(|p| if self.forward() {
*p <= raw_node
} else {
*p >= raw_node
}));
}
fn seed(&self) -> [u32; 7] {
self.base_graph().seed()
}
fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self {
Z::new_zigzag(nodes, base_degree, expansion_degree, seed)
}
fn forward(&self) -> bool {
!self.reversed()
}
}
impl<'a, H, G> ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
// Assign `expansion_degree` parents to `node` using an invertible function. That
// means we can't just generate random values between `[0, size())`, we need to
// expand the search space (domain) to accommodate every unique parent assignment
// generated here. This can be visualized more clearly as a matrix where the each
// new parent of each new node is assigned a unique `index`:
//
//
// | Parent 1 | Parent 2 | Parent 3 |
//
// | Node 1 | 0 | 1 | 2 |
//
// | Node 2 | 3 | 4 | 5 |
//
// | Node 3 | 6 | 7 | 8 |
//
// | Node 4 | 9 | A | B |
//
// This starting `index` will be shuffled to another position to generate a
// parent-child relationship, e.g., if generating the parents for the second node,
// `permute` would be called with values `[3; 4; 5]` that would be mapped to other
// indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would
// correspond to nodes numbered `[4; 1, 2]` which will become the parents of the
// second node. In a later pass invalid parents like 2, self-referencing, and parents
// with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the
// inverse), will be removed.
//
// Since `permute` is a bijective function which has the inverse `invert_permute`,
// it is guaranteed that when looking for the parents in the `reversed` direction
// the child `node` used earlier will now actually be the parent of the output
// parents generated before (inverting the relationship). Following the example,
// in the reverse direction, when looking for the parents of, say, node 1,
// `invert_permute` (that maps back the output of `permute` to its input) would
// receive the indexes `[0; 1; 2]`, where the index `0` is guaranteed to map back
// to the index `4` that generated it earlier, corresponding to the node 2, inverting
// in fact the child-parent relationship.
fn correspondent(&self, node: usize, i: usize) -> usize {
let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index;
let feistel_keys = &[1, 2, 3, 4];
let transformed = if self.reversed {
feistel::invert_permute(
self.size() as feistel::Index * self.expansion_degree as feistel::Index,
a,
feistel_keys,
self.feistel_precomputed,
)
} else {
feistel::permute(
self.size() as feistel::Index * self.expansion_degree as feistel::Index,
a,
feistel_keys,
self.feistel_precomputed,
)
};
transformed as usize / self.expansion_degree
// Collapse the output in the matrix search space to the row of the corresponding
// node (losing the column information, that will be regenerated later when calling
// back this function in the `reversed` direction).
}
// Read the `node` entry in the parents cache (which may not exist) for
// the current direction set in the graph and return a copy of it (or
// `None` to signal a cache miss).
fn contains_parents_cache(&self, node: usize) -> bool {
if self.use_cache {
if let Some(ref cache) = PARENT_CACHE.read().unwrap().get(&self.id) {
if self.forward() {
cache.contains_forward(node as u32)
} else {
cache.contains_reverse(node as u32)
}
} else {
false
}
} else {
false
}
}
fn generate_expanded_parents(&self, node: usize) -> Vec<u32> {
(0..self.expansion_degree)
.filter_map(|i| {
let other = self.correspondent(node, i);
if self.reversed {
if other > node {
Some(other as u32)
} else {
None
}
} else if other < node {
Some(other as u32)
} else {
None
}
})
.collect()
}
}
impl<'a, H, G> ZigZag for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
type BaseHasher = H;
type BaseGraph = G;
fn new_zigzag(
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self |
/// To zigzag a graph, we just toggle its reversed field.
/// All the real work happens when we calculate node parents on-demand.
// We always share the two caches (forward/reversed) between
// ZigZag graphs even if each graph will use only one of those
// caches (depending of its direction). This allows to propagate
// the caches across different layers, where consecutive even+odd
// layers have inverse directions.
fn zigzag(&self) -> Self {
let mut zigzag = self.clone();
zigzag.reversed = !zigzag.reversed;
zigzag
}
fn base_graph(&self) -> Self::BaseGraph {
self.base_graph.clone()
}
fn expansion_degree(&self) -> usize {
self.expansion_degree
}
fn reversed(&self) -> bool {
self.reversed
}
// TODO: Optimization: Evaluate providing an `all_parents` (and hence
// `all_expanded_parents`) method that would return the entire cache
// in a single lock operation, or at least (if the cache is not big enough)
// it would allow to batch parents calculations with that single lock. Also,
// since there is a reciprocity between forward and reversed parents,
// we would only need to compute the parents in one direction and with
// that fill both caches.
#[inline]
fn expanded_parents<F, T>(&self, node: usize, mut cb: F) -> T
where
F: FnMut(&Vec<u32>) -> T,
{
if !self.use_cache {
// No cache usage, generate on demand.
return cb(&self.generate_expanded_parents(node));
}
// Check if we need to fill the cache.
if !self.contains_parents_cache(node) {
// Cache is empty so we need to generate the parents.
let parents = self.generate_expanded_parents(node);
// Store the newly generated cached value.
let mut cache_lock = PARENT_CACHE.write().unwrap();
let cache = cache_lock
.get_mut(&self.id)
.expect("Invalid cache construction");
if self.forward() {
cache.write_forward(node as u32, parents);
} else {
cache.write_reverse(node as u32, parents);
}
}
// We made sure the cache is filled above, now we can return the value.
let cache_lock = PARENT_CACHE.read().unwrap();
let cache = cache_lock
.get(&self.id)
.expect("Invalid cache construction");
if self.forward() {
cache.read_forward(node as u32, |parents| cb(parents.unwrap()))
} else {
cache.read_reverse(node as u32, |parents| cb(parents.unwrap()))
}
}
#[inline]
fn real_index(&self, i: usize) -> usize {
if self.reversed {
(self.size() - 1) - i
} else {
i
}
}
}
impl<H, G> PartialEq for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H>,
{
fn eq(&self, other: &ZigZagGraph<H, G>) -> bool {
self.base_graph == other.base_graph
&& self.expansion_degree == other.expansion_degree
&& self.reversed == other.reversed
}
}
impl<H, G> Eq for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H>,
{
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::{HashMap, HashSet};
use crate::drgraph::{new_seed, BASE_DEGREE};
use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};
fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) {
for i in 0..g.size() {
let mut parents = vec![0; g.degree()];
g.parents(i, &mut parents);
for p in parents {
if i == 0 {
assert!(p == i);
} else {
assert!(p < i);
}
}
}
}
fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) {
for i in 0..g.size() {
let mut parents = vec![0; g.degree()];
g.parents(i, &mut parents);
for p in parents {
if i == g.size() - 1 {
assert!(p == i);
} else {
assert!(p > i);
}
}
}
}
#[test]
fn zigzag_graph_zigzags_pedersen() {
test_zigzag_graph_zigzags::<PedersenHasher>();
}
#[test]
fn zigzag_graph_zigzags_sha256() {
test_zigzag_graph_zigzags::<Sha256Hasher>();
}
#[test]
fn zigzag_graph_zigzags_blake2s() {
test_zigzag_graph_zigzags::<Blake2sHasher>();
}
fn test_zigzag_graph_zigzags<H: 'static + Hasher>() {
let g = ZigZagBucketGraph::<H>::new_zigzag(50, BASE_DEGREE, EXP_DEGREE, new_seed());
let gz = g.zigzag();
assert_graph_ascending(g);
assert_graph_descending(gz);
}
#[test]
fn expansion_pedersen() {
test_expansion::<PedersenHasher>();
}
#[test]
fn expansion_sha256() {
test_expansion::<Sha256Hasher>();
}
#[test]
fn expansion_blake2s() {
test_expansion::<Blake2sHasher>();
}
fn test_expansion<H: 'static + Hasher>() {
// We need a graph.
let g = ZigZagBucketGraph::<H>::new_zigzag(25, BASE_DEGREE, EXP_DEGREE, new_seed());
// We're going to fully realize the expansion-graph component, in a HashMap.
let gcache = get_all_expanded_parents(&g);
// Here's the zigzag version of the graph.
let gz = g.zigzag();
// And a HashMap to hold the expanded parents.
let gzcache = get_all_expanded_parents(&gz);
for i in 0..gz.size() {
let parents = gzcache.get(&i).unwrap();
// Check to make sure all (expanded) node-parent relationships also exist in reverse,
// in the original graph's Hashmap.
for p in parents {
assert!(gcache[&(*p as usize)].contains(&(i as u32)));
}
}
// And then do the same check to make sure all (expanded) node-parent relationships from the original
// are present in the zigzag, just reversed.
for i in 0..g.size() {
g.expanded_parents(i, |parents| {
for p in parents.iter() {
assert!(gzcache[&(*p as usize)].contains(&(i as u32)));
}
});
}
// Having checked both ways, we know the graph and its zigzag counterpart have 'expanded' components
// which are each other's inverses. It's important that this be true.
}
fn get_all_expanded_parents<H: 'static + Hasher>(
zigzag_graph: &ZigZagBucketGraph<H>,
) -> HashMap<usize, Vec<u32>> {
let mut parents_map: HashMap<usize, Vec<u32>> = HashMap::new();
for i in 0..zigzag_graph.size() {
parents_map.insert(i, zigzag_graph.expanded_parents(i, |p| p.clone()));
}
parents_map
}
// Test that 3 (or more) rounds of the Feistel cipher can be used
// as a pseudorandom permutation, that is, each input will be mapped
// to a unique output (and though not test here, since the cipher
// is symmetric, the decryption rounds also work as the inverse
// permutation), for more details see:
// https://en.wikipedia.org/wiki/Feistel_cipher#Theoretical_work.
#[test]
fn test_shuffle() {
let n = 2_u64.pow(10);
let d = EXP_DEGREE as u64;
// Use a relatively small value of `n` as Feistel is expensive (but big
// enough that `n >> d`).
let mut shuffled: HashSet<u64> = HashSet::with_capacity((n * d) as usize);
let feistel_keys = &[1, 2, 3, 4];
let feistel_precomputed = feistel::precompute((n * d) as feistel::Index);
for i in 0..n {
for k in 0..d {
let permuted =
feistel::permute(n * d, i * d + k, feistel_keys, feistel_precomputed);
// Since the permutation implies a one-to-one correspondence,
// traversing the entire input space should generate the entire
// output space (in `shuffled`) without repetitions (since a duplicate
// output would imply there is another output that wasn't generated
// and the permutation would be incomplete).
assert!(shuffled.insert(permuted));
}
}
// Actually implied by the previous `assert!` this is left in place as an
// extra safety check that indeed the permutation preserved all the output
// space (of `n * d` nodes) without repetitions (which the `HashSet` would
// have skipped as duplicates).
assert_eq!(shuffled.len(), (n * d) as usize);
}
}
| {
Self::new(None, nodes, base_degree, expansion_degree, seed)
} | identifier_body |
zigzag_graph.rs | use std::collections::HashMap;
use std::marker::PhantomData;
use std::sync::{Arc, RwLock};
use crate::crypto::feistel::{self, FeistelPrecomputed};
use crate::drgraph::{BucketGraph, Graph, BASE_DEGREE};
use crate::hasher::Hasher;
use crate::layered_drgporep::Layerable;
use crate::parameter_cache::ParameterSetMetadata;
use crate::settings;
/// The expansion degree used for ZigZag Graphs.
pub const EXP_DEGREE: usize = 8;
lazy_static! {
// This parents cache is currently used for the *expanded parents only*, generated
// by the expensive Feistel operations in the ZigZag, it doesn't contain the
// "base" (in the `Graph` terminology) parents, which are cheaper to compute.
// It is indexed by the `Graph.identifier`, to ensure that the right cache is used.
static ref PARENT_CACHE: Arc<RwLock<HashMap<String, ParentCache>>> = Arc::new(RwLock::new(HashMap::new()));
}
// ZigZagGraph will hold two different (but related) `ParentCache`,
// the first one for the `forward` direction and the second one for the `reversed`.
#[derive(Debug, Clone)]
pub struct ParentCache {
forward: Vec<Option<Vec<u32>>>,
reverse: Vec<Option<Vec<u32>>>,
// Keep the size of the cache outside the lock to be easily accessible.
cache_entries: u32,
}
impl ParentCache {
pub fn new(cache_entries: u32) -> Self {
ParentCache {
forward: vec![None; cache_entries as usize],
reverse: vec![None; cache_entries as usize],
cache_entries,
}
}
pub fn contains_forward(&self, node: u32) -> bool {
assert!(node < self.cache_entries);
self.forward[node as usize].is_some()
}
pub fn contains_reverse(&self, node: u32) -> bool {
assert!(node < self.cache_entries);
self.reverse[node as usize].is_some()
}
pub fn read_forward<F, T>(&self, node: u32, mut cb: F) -> T
where
F: FnMut(Option<&Vec<u32>>) -> T,
{
assert!(node < self.cache_entries);
cb(self.forward[node as usize].as_ref())
}
pub fn read_reverse<F, T>(&self, node: u32, mut cb: F) -> T
where
F: FnMut(Option<&Vec<u32>>) -> T,
{
assert!(node < self.cache_entries);
cb(self.reverse[node as usize].as_ref())
}
pub fn write_forward(&mut self, node: u32, parents: Vec<u32>) {
assert!(node < self.cache_entries);
let old_value = std::mem::replace(&mut self.forward[node as usize], Some(parents));
debug_assert_eq!(old_value, None);
// We shouldn't be rewriting entries (with most likely the same values),
// this would be a clear indication of a bug.
}
pub fn write_reverse(&mut self, node: u32, parents: Vec<u32>) {
assert!(node < self.cache_entries);
let old_value = std::mem::replace(&mut self.reverse[node as usize], Some(parents));
debug_assert_eq!(old_value, None);
// We shouldn't be rewriting entries (with most likely the same values),
// this would be a clear indication of a bug.
}
}
#[derive(Debug, Clone)]
pub struct ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + 'static,
{
expansion_degree: usize,
base_graph: G,
pub reversed: bool,
feistel_precomputed: FeistelPrecomputed,
id: String,
use_cache: bool,
_h: PhantomData<H>,
}
pub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>;
impl<'a, H, G> Layerable<H> for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata + 'static,
{
}
impl<H, G> ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
pub fn new(
base_graph: Option<G>,
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self {
if !cfg!(feature = "unchecked-degrees") {
assert_eq!(base_degree, BASE_DEGREE);
assert_eq!(expansion_degree, EXP_DEGREE);
}
let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching;
let base_graph = match base_graph {
Some(graph) => graph,
None => G::new(nodes, base_degree, 0, seed),
};
let bg_id = base_graph.identifier();
let res = ZigZagGraph {
base_graph,
id: format!(
"zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}",
expansion_degree, bg_id,
),
expansion_degree,
use_cache,
reversed: false,
feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index),
_h: PhantomData,
};
if use_cache {
info!("using parents cache of unlimited size",);
assert!(nodes <= std::u32::MAX as usize);
if !PARENT_CACHE.read().unwrap().contains_key(&res.id) {
PARENT_CACHE
.write()
.unwrap()
.insert(res.id.clone(), ParentCache::new(nodes as u32));
}
}
res
}
}
impl<H, G> ParameterSetMetadata for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
fn identifier(&self) -> String {
self.id.clone()
}
fn sector_size(&self) -> u64 {
self.base_graph.sector_size()
}
}
pub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq {
type BaseHasher: Hasher;
type BaseGraph: Graph<Self::BaseHasher>;
/// zigzag returns a new graph with expansion component inverted and a distinct
/// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes).
/// The name is 'weird', but so is the operation -- hence the choice.
fn zigzag(&self) -> Self;
/// Constructs a new graph.
fn base_graph(&self) -> Self::BaseGraph;
fn expansion_degree(&self) -> usize;
fn reversed(&self) -> bool;
fn expanded_parents<F, T>(&self, node: usize, cb: F) -> T
where
F: FnMut(&Vec<u32>) -> T;
fn real_index(&self, i: usize) -> usize;
fn new_zigzag(
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self;
}
impl<Z: ZigZag> Graph<Z::BaseHasher> for Z {
fn size(&self) -> usize {
self.base_graph().size()
}
fn degree(&self) -> usize {
self.base_graph().degree() + self.expansion_degree()
}
#[inline]
fn parents(&self, raw_node: usize, parents: &mut [usize]) {
// If graph is reversed, use real_index to convert index to reversed index.
// So we convert a raw reversed node to an unreversed node, calculate its parents,
// then convert the parents to reversed.
self.base_graph()
.parents(self.real_index(raw_node), parents);
for parent in parents.iter_mut().take(self.base_graph().degree()) {
*parent = self.real_index(*parent);
}
// expanded_parents takes raw_node
self.expanded_parents(raw_node, |expanded_parents| {
for (ii, value) in expanded_parents.iter().enumerate() {
parents[ii + self.base_graph().degree()] = *value as usize
}
// Pad so all nodes have correct degree.
let current_length = self.base_graph().degree() + expanded_parents.len();
for ii in 0..(self.degree() - current_length) {
if self.reversed() {
parents[ii + current_length] = self.size() - 1
} else {
parents[ii + current_length] = 0
}
}
});
assert!(parents.len() == self.degree());
if self.forward() {
parents.sort();
} else {
// Sort in reverse order.
parents.sort_by(|a, b| a.cmp(b).reverse());
}
assert!(parents.iter().all(|p| if self.forward() {
*p <= raw_node
} else {
*p >= raw_node
}));
}
fn seed(&self) -> [u32; 7] {
self.base_graph().seed()
}
fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self {
Z::new_zigzag(nodes, base_degree, expansion_degree, seed)
}
fn forward(&self) -> bool {
!self.reversed()
}
}
impl<'a, H, G> ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
// Assign `expansion_degree` parents to `node` using an invertible function. That
// means we can't just generate random values between `[0, size())`, we need to
// expand the search space (domain) to accommodate every unique parent assignment
// generated here. This can be visualized more clearly as a matrix where the each
// new parent of each new node is assigned a unique `index`:
//
//
// | Parent 1 | Parent 2 | Parent 3 |
//
// | Node 1 | 0 | 1 | 2 |
//
// | Node 2 | 3 | 4 | 5 |
//
// | Node 3 | 6 | 7 | 8 |
//
// | Node 4 | 9 | A | B |
//
// This starting `index` will be shuffled to another position to generate a
// parent-child relationship, e.g., if generating the parents for the second node,
// `permute` would be called with values `[3; 4; 5]` that would be mapped to other
// indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would
// correspond to nodes numbered `[4; 1, 2]` which will become the parents of the
// second node. In a later pass invalid parents like 2, self-referencing, and parents
// with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the
// inverse), will be removed.
//
// Since `permute` is a bijective function which has the inverse `invert_permute`,
// it is guaranteed that when looking for the parents in the `reversed` direction
// the child `node` used earlier will now actually be the parent of the output
// parents generated before (inverting the relationship). Following the example,
// in the reverse direction, when looking for the parents of, say, node 1,
// `invert_permute` (that maps back the output of `permute` to its input) would
// receive the indexes `[0; 1; 2]`, where the index `0` is guaranteed to map back
// to the index `4` that generated it earlier, corresponding to the node 2, inverting
// in fact the child-parent relationship.
fn correspondent(&self, node: usize, i: usize) -> usize {
let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index;
let feistel_keys = &[1, 2, 3, 4];
let transformed = if self.reversed {
feistel::invert_permute(
self.size() as feistel::Index * self.expansion_degree as feistel::Index,
a,
feistel_keys,
self.feistel_precomputed,
)
} else {
feistel::permute(
self.size() as feistel::Index * self.expansion_degree as feistel::Index,
a,
feistel_keys,
self.feistel_precomputed,
)
};
transformed as usize / self.expansion_degree
// Collapse the output in the matrix search space to the row of the corresponding
// node (losing the column information, that will be regenerated later when calling
// back this function in the `reversed` direction).
}
// Read the `node` entry in the parents cache (which may not exist) for
// the current direction set in the graph and return a copy of it (or
// `None` to signal a cache miss).
fn contains_parents_cache(&self, node: usize) -> bool {
if self.use_cache {
if let Some(ref cache) = PARENT_CACHE.read().unwrap().get(&self.id) {
if self.forward() {
cache.contains_forward(node as u32)
} else {
cache.contains_reverse(node as u32)
}
} else {
false
}
} else {
false
}
}
fn generate_expanded_parents(&self, node: usize) -> Vec<u32> {
(0..self.expansion_degree)
.filter_map(|i| {
let other = self.correspondent(node, i);
if self.reversed {
if other > node {
Some(other as u32)
} else {
None
}
} else if other < node {
Some(other as u32)
} else {
None
}
})
.collect()
}
}
impl<'a, H, G> ZigZag for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H> + ParameterSetMetadata,
{
type BaseHasher = H;
type BaseGraph = G;
fn new_zigzag(
nodes: usize,
base_degree: usize,
expansion_degree: usize,
seed: [u32; 7],
) -> Self {
Self::new(None, nodes, base_degree, expansion_degree, seed)
}
/// To zigzag a graph, we just toggle its reversed field.
/// All the real work happens when we calculate node parents on-demand.
// We always share the two caches (forward/reversed) between
// ZigZag graphs even if each graph will use only one of those
// caches (depending of its direction). This allows to propagate
// the caches across different layers, where consecutive even+odd
// layers have inverse directions.
fn zigzag(&self) -> Self {
let mut zigzag = self.clone();
zigzag.reversed = !zigzag.reversed;
zigzag
}
fn base_graph(&self) -> Self::BaseGraph {
self.base_graph.clone()
}
fn expansion_degree(&self) -> usize {
self.expansion_degree
}
fn reversed(&self) -> bool {
self.reversed
}
// TODO: Optimization: Evaluate providing an `all_parents` (and hence
// `all_expanded_parents`) method that would return the entire cache
// in a single lock operation, or at least (if the cache is not big enough)
// it would allow to batch parents calculations with that single lock. Also,
// since there is a reciprocity between forward and reversed parents,
// we would only need to compute the parents in one direction and with
// that fill both caches.
#[inline]
fn expanded_parents<F, T>(&self, node: usize, mut cb: F) -> T
where | F: FnMut(&Vec<u32>) -> T,
{
if !self.use_cache {
// No cache usage, generate on demand.
return cb(&self.generate_expanded_parents(node));
}
// Check if we need to fill the cache.
if !self.contains_parents_cache(node) {
// Cache is empty so we need to generate the parents.
let parents = self.generate_expanded_parents(node);
// Store the newly generated cached value.
let mut cache_lock = PARENT_CACHE.write().unwrap();
let cache = cache_lock
.get_mut(&self.id)
.expect("Invalid cache construction");
if self.forward() {
cache.write_forward(node as u32, parents);
} else {
cache.write_reverse(node as u32, parents);
}
}
// We made sure the cache is filled above, now we can return the value.
let cache_lock = PARENT_CACHE.read().unwrap();
let cache = cache_lock
.get(&self.id)
.expect("Invalid cache construction");
if self.forward() {
cache.read_forward(node as u32, |parents| cb(parents.unwrap()))
} else {
cache.read_reverse(node as u32, |parents| cb(parents.unwrap()))
}
}
#[inline]
fn real_index(&self, i: usize) -> usize {
if self.reversed {
(self.size() - 1) - i
} else {
i
}
}
}
impl<H, G> PartialEq for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H>,
{
fn eq(&self, other: &ZigZagGraph<H, G>) -> bool {
self.base_graph == other.base_graph
&& self.expansion_degree == other.expansion_degree
&& self.reversed == other.reversed
}
}
impl<H, G> Eq for ZigZagGraph<H, G>
where
H: Hasher,
G: Graph<H>,
{
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::{HashMap, HashSet};
use crate::drgraph::{new_seed, BASE_DEGREE};
use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};
fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) {
for i in 0..g.size() {
let mut parents = vec![0; g.degree()];
g.parents(i, &mut parents);
for p in parents {
if i == 0 {
assert!(p == i);
} else {
assert!(p < i);
}
}
}
}
fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) {
for i in 0..g.size() {
let mut parents = vec![0; g.degree()];
g.parents(i, &mut parents);
for p in parents {
if i == g.size() - 1 {
assert!(p == i);
} else {
assert!(p > i);
}
}
}
}
#[test]
fn zigzag_graph_zigzags_pedersen() {
test_zigzag_graph_zigzags::<PedersenHasher>();
}
#[test]
fn zigzag_graph_zigzags_sha256() {
test_zigzag_graph_zigzags::<Sha256Hasher>();
}
#[test]
fn zigzag_graph_zigzags_blake2s() {
test_zigzag_graph_zigzags::<Blake2sHasher>();
}
fn test_zigzag_graph_zigzags<H: 'static + Hasher>() {
let g = ZigZagBucketGraph::<H>::new_zigzag(50, BASE_DEGREE, EXP_DEGREE, new_seed());
let gz = g.zigzag();
assert_graph_ascending(g);
assert_graph_descending(gz);
}
#[test]
fn expansion_pedersen() {
test_expansion::<PedersenHasher>();
}
#[test]
fn expansion_sha256() {
test_expansion::<Sha256Hasher>();
}
#[test]
fn expansion_blake2s() {
test_expansion::<Blake2sHasher>();
}
fn test_expansion<H: 'static + Hasher>() {
// We need a graph.
let g = ZigZagBucketGraph::<H>::new_zigzag(25, BASE_DEGREE, EXP_DEGREE, new_seed());
// We're going to fully realize the expansion-graph component, in a HashMap.
let gcache = get_all_expanded_parents(&g);
// Here's the zigzag version of the graph.
let gz = g.zigzag();
// And a HashMap to hold the expanded parents.
let gzcache = get_all_expanded_parents(&gz);
for i in 0..gz.size() {
let parents = gzcache.get(&i).unwrap();
// Check to make sure all (expanded) node-parent relationships also exist in reverse,
// in the original graph's Hashmap.
for p in parents {
assert!(gcache[&(*p as usize)].contains(&(i as u32)));
}
}
// And then do the same check to make sure all (expanded) node-parent relationships from the original
// are present in the zigzag, just reversed.
for i in 0..g.size() {
g.expanded_parents(i, |parents| {
for p in parents.iter() {
assert!(gzcache[&(*p as usize)].contains(&(i as u32)));
}
});
}
// Having checked both ways, we know the graph and its zigzag counterpart have 'expanded' components
// which are each other's inverses. It's important that this be true.
}
fn get_all_expanded_parents<H: 'static + Hasher>(
zigzag_graph: &ZigZagBucketGraph<H>,
) -> HashMap<usize, Vec<u32>> {
let mut parents_map: HashMap<usize, Vec<u32>> = HashMap::new();
for i in 0..zigzag_graph.size() {
parents_map.insert(i, zigzag_graph.expanded_parents(i, |p| p.clone()));
}
parents_map
}
// Test that 3 (or more) rounds of the Feistel cipher can be used
// as a pseudorandom permutation, that is, each input will be mapped
// to a unique output (and though not test here, since the cipher
// is symmetric, the decryption rounds also work as the inverse
// permutation), for more details see:
// https://en.wikipedia.org/wiki/Feistel_cipher#Theoretical_work.
#[test]
fn test_shuffle() {
let n = 2_u64.pow(10);
let d = EXP_DEGREE as u64;
// Use a relatively small value of `n` as Feistel is expensive (but big
// enough that `n >> d`).
let mut shuffled: HashSet<u64> = HashSet::with_capacity((n * d) as usize);
let feistel_keys = &[1, 2, 3, 4];
let feistel_precomputed = feistel::precompute((n * d) as feistel::Index);
for i in 0..n {
for k in 0..d {
let permuted =
feistel::permute(n * d, i * d + k, feistel_keys, feistel_precomputed);
// Since the permutation implies a one-to-one correspondence,
// traversing the entire input space should generate the entire
// output space (in `shuffled`) without repetitions (since a duplicate
// output would imply there is another output that wasn't generated
// and the permutation would be incomplete).
assert!(shuffled.insert(permuted));
}
}
// Actually implied by the previous `assert!` this is left in place as an
// extra safety check that indeed the permutation preserved all the output
// space (of `n * d` nodes) without repetitions (which the `HashSet` would
// have skipped as duplicates).
assert_eq!(shuffled.len(), (n * d) as usize);
}
} | random_line_split | |
converter.py |
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfdevice import PDFDevice
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, PDFTextState, PDFGraphicState
from pdfminer.pdftypes import list_value, dict_value, stream_value, PDFStream
from pdfminer.psparser import LIT, PSLiteral
from pdfminer.pdftypes import PDFObjRef, resolve1
from pdfminer.utils import mult_matrix
from pdftext import TextAnalyzer, textSpanBox
import pdffonts
import colorspace
def literal(name): return LIT(
name) if not isinstance(name, PSLiteral) else name
def render_type(ftype):
def render_function(func):
def render_arguments(self, *args, **kwargs):
if ftype in self.filtered:
return
return func(self, *args, **kwargs)
return render_arguments
return render_function
def get_default(res_type):
def binding(func):
def get_arguments(self, objid, obj=None):
res_list = getattr(self, res_type+'s', None)
if res_list is None:
return
if objid is not None:
objid = literal(objid)
if objid in res_list:
return res_list[objid]
elif obj is None:
return
func(self, objid, obj=obj)
if objid is not None:
return res_list.get(objid)
return get_arguments
return binding
class Paint:
def __init__(self, cs, value):
self.cs = cs
self.value = value
def draw(self):
return self.cs.getRGB(*self.value)
class TextState(PDFTextState):
def __init__(self):
super().__init__()
self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.font = self.font
obj.fontsize = self.fontsize
obj.charspace = self.charspace
obj.wordspace = self.wordspace
obj.scaling = self.scaling
obj.leading = self.leading
obj.render = self.render
obj.rise = self.rise
obj.matrix = self.matrix
obj.linematrix = self.linematrix
obj.fill = self.fill
obj.extState = self.extState
return obj
def __setattr__(self, key, value):
if key in ['charspace', 'wordspace']:
value *= getattr(self, 'scaling', 100) * 0.01
return object.__setattr__(self, key, value)
class GraphicState(PDFGraphicState):
def __init__(self):
super().__init__()
self.stroke = self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.linewidth = self.linewidth
obj.linecap = self.linecap
obj.linejoin = self.linejoin
obj.miterlimit = self.miterlimit
obj.dash = self.dash
obj.intent = self.intent
obj.flatness = self.flatness
obj.stroke = self.stroke
obj.fill = self.fill
obj.extState = self.extState
return obj
class Device(PDFDevice):
def __init__(self, filtered=None, laparams=None, check_visible=True):
super().__init__(None)
self.filtered = filtered or []
self.check_visible = check_visible
self.analyzer = TextAnalyzer(**(laparams or {}))
self.pageno = 1
self.reset()
self.viewBox = [0, 0, 0, 0]
def reset(self):
self.images = {}
self.text_layer = []
self.layers = {}
self.layer_stack = []
def begin_page(self, page, ctm):
self.reset()
self.layers[LIT('Page')] = (page.cropbox, ctm)
self.layer_stack = [LIT('Page')]
self.viewBox = page.cropbox
self.ymax = page.mediabox[3] - page.mediabox[1]
def is_visible(self, span, bbox):
boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))
if len(boxset) < len(span.bbox):
return False
xmin, ymin, xmax, ymax = bbox
return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)
def get_current_layer(self):
i = -1
depth = 0
while True:
layerName = self.layer_stack[i]
if layerName == 'end':
depth += 1
else:
depth -= 1
if depth < 0:
break
i -= 1
return layerName, self.layers[layerName]
def end_page(self, page):
self.text_layer = filter(lambda x: not self.check_visible
or self.is_visible(x, self.viewBox), self.text_layer)
lines = self.analyzer.group_lines(self.text_layer)
paras = self.analyzer.group_paras(lines)
self.text_layer = paras
self.pageno += 1
def begin_figure(self, name, bbox, matrix):
x, y, w, h = bbox
self.layers[name] = ([x, y, x+w, y+h], matrix)
self.layer_stack.append(name)
def end_figure(self, name):
self.layer_stack.append('end')
@render_type('path')
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
# path handling suspended
return path
@render_type('image')
def render_image(self, name, stream, anchored=False, textstate=None):
bbox, matrix = self.get_current_layer()[1]
self.images.setdefault(stream.objid, (name, stream, bbox, matrix))
@render_type('text')
def render_string(self, textstate, seq, *args):
layerName = self.get_current_layer()[0]
x, y = textstate.linematrix
a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)
matrix = a, b, c, d, e, self.ymax - f
box = textSpanBox((x, y), seq, textstate, layerName=layerName, matrix=matrix)
# check if text is visible
if not textstate.extState.get('OP', False) or not textstate.extState.get('OPM', 0):
self.text_layer.append(box)
elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):
self.text_layer.append(box)
textstate.linematrix = box.originbox[2]
class ResourceManager(PDFResourceManager):
def __init__(self):
self.fonts = {}
self.colorspaces = colorspace.defaults.copy()
self.xobjects = {}
self.cache = {}
self.stream_objects = []
def clear(self):
for res in self.fonts:
stream_to_close = getattr(res, 'embedFont', None)
stream_to_close and stream_to_close.close()
self.fonts.clear()
self.colorspaces.clear()
self.xobjects.clear()
def render_resource(self, res_type, res_obj):
|
@get_default('font')
def get_font(self, objid, obj=None):
for (fontid, spec) in dict_value(obj).items():
spec = dict_value(spec)
spec, fontType, embedFont, opentype = pdffonts.getType(spec)
if fontType:
font = fontType(spec, embedFont=embedFont and self.xobjects.get(
embedFont.objid, embedFont), opentype=opentype)
if embedFont:
objid = literal(embedFont.objid)
if not objid in self.xobjects:
self.xobjects[objid] = font.embedFont
self.fonts[literal(fontid)] = font
@get_default('colorspace')
def get_colorspace(self, objid, obj=None):
for (csid, spec) in dict_value(obj).items():
cs = colorspace.parse(spec)
if cs:
self.colorspaces[literal(csid)] = cs
def get_procset(self, objid, obj=None):
# procset handling suspended
pass
@get_default('xobject')
def get_xobject(self, objid, obj=None):
for (xobjid, xobjstrm) in dict_value(obj).items():
self.xobjects[literal(xobjid)] = xobjstrm
class Interpreter(PDFPageInterpreter):
def __init__(self, device):
self.rsrcmgr = ResourceManager()
self.device = device
# custom logging here
def log(self, message):
pass
def dup(self):
return self.__class__(self.device)
def close(self):
self.rsrcmgr.clear()
def init_resources(self, resources):
self.resources = resources
if resources:
for (k, v) in dict_value(resources).items():
self.debug and self.log('Resource: %r: %r' % (k, v))
self.rsrcmgr.render_resource(k, v)
def init_state(self, ctm):
self.gstack = []
self.ctm = ctm
self.device.set_ctm(self.ctm)
self.textstate = TextState()
self.graphicstate = GraphicState()
self.curpath = []
self.argstack = []
self.scs = self.ncs = colorspace.CMYKColorSpace()
def do_CS(self, name):
self.scs = self.rsrcmgr.get_colorspace(literal(name))
def do_cs(self, name):
self.ncs = self.rsrcmgr.get_colorspace(literal(name))
def do_SCN(self):
n = len(self.scs.mode)
pattern = self.argstack[-n:]
self.graphicstate.stroke = Paint(self.scs, pattern)
self.argstack = self.argstack[:-n]
def do_scn(self):
n = len(self.ncs.mode)
pattern = self.argstack[-n:]
self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)
self.argstack = self.argstack[:-n]
def do_G(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.stroke = Paint(cs, gray)
def do_g(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)
def do_RG(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.stroke = Paint(cs, (r, g, b))
def do_rg(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))
def do_K(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.stroke = Paint(cs, (c, m, y, k))
def do_k(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))
def do_Tf(self, fontid, fontsize):
self.textstate.font = self.rsrcmgr.get_font(literal(fontid))
self.textstate.fontsize = fontsize
def do_Do(self, xobjid):
xobj = self.rsrcmgr.get_xobject(literal(xobjid))
if not xobj:
return
self.debug and self.log('Processing xobj: %r' % xobj)
xobj = stream_value(xobj)
subtype = xobj.get('Subtype')
if subtype is LIT('Form') and 'BBox' in xobj:
interpreter = self.dup()
bbox = list_value(xobj['BBox'])
matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))
# According to PDF reference 1.7 section 4.9.1, XObjects in
# earlier PDFs (prior to v1.2) use the page's Resources entry
# instead of having their own Resources entry.
resources = dict_value(xobj.get('Resources')
) or self.resources.copy()
self.device.begin_figure(xobjid, bbox, matrix)
interpreter.render_contents(
resources, [xobj], ctm=mult_matrix(matrix, self.ctm))
self.device.end_figure(xobjid)
elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:
self.device.render_image(xobjid, xobj, anchored=True)
else:
# unsupported xobject type.
pass
def do_EI(self, obj):
if 'W' in obj and 'H' in obj:
self.device.render_image(
str(id(obj)), obj, anchored=False, state=self.textstate)
def do_gs(self, name):
if isinstance(name, PSLiteral):
name = name.name
gstate = self.resources['ExtGState'].get(name)
if gstate and not self.textstate.extState:
gstate = resolve1(gstate)
self.textstate.extState = gstate
def do_q(self):
self.gstack.append(self.get_current_state())
def do_Q(self):
self.gstack and self.set_current_state(self.gstack.pop())
# def do_Td(self, tx, ty):
# x, y = self.textstate.linematrix
# # print((x,y), (tx,ty))
# (a, b, c, d, e, f) = self.textstate.matrix
# print((x,y), (tx,ty), (tx*a+ty*c+e, tx*b+ty*d+f))
# self.textstate.matrix = (a, b, c, d, tx*a+ty*c+e, tx*b+ty*d+f)
# self.textstate.linematrix = (0, 0)
| get_function = getattr(self, 'get_' + res_type.lower(), None)
return get_function and get_function(None, obj=res_obj) | identifier_body |
converter.py |
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfdevice import PDFDevice
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, PDFTextState, PDFGraphicState
from pdfminer.pdftypes import list_value, dict_value, stream_value, PDFStream
from pdfminer.psparser import LIT, PSLiteral
from pdfminer.pdftypes import PDFObjRef, resolve1
from pdfminer.utils import mult_matrix
from pdftext import TextAnalyzer, textSpanBox
import pdffonts
import colorspace
def literal(name): return LIT(
name) if not isinstance(name, PSLiteral) else name
def render_type(ftype):
def render_function(func):
def render_arguments(self, *args, **kwargs):
if ftype in self.filtered:
return
return func(self, *args, **kwargs)
return render_arguments
return render_function
def get_default(res_type):
def binding(func):
def get_arguments(self, objid, obj=None):
res_list = getattr(self, res_type+'s', None)
if res_list is None:
return
if objid is not None:
objid = literal(objid)
if objid in res_list:
return res_list[objid]
elif obj is None:
return
func(self, objid, obj=obj)
if objid is not None:
return res_list.get(objid)
return get_arguments
return binding
class Paint:
def __init__(self, cs, value):
self.cs = cs
self.value = value
def draw(self):
return self.cs.getRGB(*self.value)
class TextState(PDFTextState):
def __init__(self):
super().__init__()
self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.font = self.font
obj.fontsize = self.fontsize
obj.charspace = self.charspace
obj.wordspace = self.wordspace
obj.scaling = self.scaling
obj.leading = self.leading
obj.render = self.render
obj.rise = self.rise
obj.matrix = self.matrix
obj.linematrix = self.linematrix
obj.fill = self.fill
obj.extState = self.extState
return obj
def __setattr__(self, key, value):
if key in ['charspace', 'wordspace']:
value *= getattr(self, 'scaling', 100) * 0.01
return object.__setattr__(self, key, value)
class GraphicState(PDFGraphicState):
def __init__(self):
super().__init__()
self.stroke = self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.linewidth = self.linewidth
obj.linecap = self.linecap
obj.linejoin = self.linejoin
obj.miterlimit = self.miterlimit
obj.dash = self.dash
obj.intent = self.intent
obj.flatness = self.flatness
obj.stroke = self.stroke
obj.fill = self.fill
obj.extState = self.extState
return obj
class Device(PDFDevice):
def __init__(self, filtered=None, laparams=None, check_visible=True):
super().__init__(None)
self.filtered = filtered or []
self.check_visible = check_visible
self.analyzer = TextAnalyzer(**(laparams or {}))
self.pageno = 1
self.reset()
self.viewBox = [0, 0, 0, 0]
def reset(self):
self.images = {}
self.text_layer = []
self.layers = {}
self.layer_stack = []
def begin_page(self, page, ctm):
self.reset()
self.layers[LIT('Page')] = (page.cropbox, ctm)
self.layer_stack = [LIT('Page')]
self.viewBox = page.cropbox
self.ymax = page.mediabox[3] - page.mediabox[1]
def is_visible(self, span, bbox):
boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))
if len(boxset) < len(span.bbox):
return False
xmin, ymin, xmax, ymax = bbox
return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)
def get_current_layer(self):
i = -1
depth = 0
while True:
layerName = self.layer_stack[i]
if layerName == 'end':
depth += 1
else:
depth -= 1
if depth < 0:
break
i -= 1
return layerName, self.layers[layerName]
def end_page(self, page):
self.text_layer = filter(lambda x: not self.check_visible
or self.is_visible(x, self.viewBox), self.text_layer)
lines = self.analyzer.group_lines(self.text_layer)
paras = self.analyzer.group_paras(lines)
self.text_layer = paras
self.pageno += 1
def begin_figure(self, name, bbox, matrix):
x, y, w, h = bbox
self.layers[name] = ([x, y, x+w, y+h], matrix)
self.layer_stack.append(name)
def end_figure(self, name):
self.layer_stack.append('end')
@render_type('path')
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
# path handling suspended
return path
@render_type('image')
def render_image(self, name, stream, anchored=False, textstate=None):
bbox, matrix = self.get_current_layer()[1]
self.images.setdefault(stream.objid, (name, stream, bbox, matrix))
@render_type('text')
def render_string(self, textstate, seq, *args):
layerName = self.get_current_layer()[0]
x, y = textstate.linematrix
a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)
matrix = a, b, c, d, e, self.ymax - f
box = textSpanBox((x, y), seq, textstate, layerName=layerName, matrix=matrix)
# check if text is visible
if not textstate.extState.get('OP', False) or not textstate.extState.get('OPM', 0):
self.text_layer.append(box)
elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):
self.text_layer.append(box)
textstate.linematrix = box.originbox[2]
class ResourceManager(PDFResourceManager):
def __init__(self):
self.fonts = {}
self.colorspaces = colorspace.defaults.copy()
self.xobjects = {}
self.cache = {}
self.stream_objects = []
def clear(self):
for res in self.fonts:
stream_to_close = getattr(res, 'embedFont', None)
stream_to_close and stream_to_close.close()
self.fonts.clear()
self.colorspaces.clear()
self.xobjects.clear()
def render_resource(self, res_type, res_obj):
get_function = getattr(self, 'get_' + res_type.lower(), None)
return get_function and get_function(None, obj=res_obj)
@get_default('font')
def get_font(self, objid, obj=None):
for (fontid, spec) in dict_value(obj).items():
spec = dict_value(spec)
spec, fontType, embedFont, opentype = pdffonts.getType(spec)
if fontType:
font = fontType(spec, embedFont=embedFont and self.xobjects.get(
embedFont.objid, embedFont), opentype=opentype)
if embedFont:
objid = literal(embedFont.objid)
if not objid in self.xobjects:
self.xobjects[objid] = font.embedFont
self.fonts[literal(fontid)] = font
@get_default('colorspace')
def get_colorspace(self, objid, obj=None):
for (csid, spec) in dict_value(obj).items():
cs = colorspace.parse(spec)
if cs:
self.colorspaces[literal(csid)] = cs
def get_procset(self, objid, obj=None):
# procset handling suspended
pass
@get_default('xobject')
def get_xobject(self, objid, obj=None):
for (xobjid, xobjstrm) in dict_value(obj).items():
self.xobjects[literal(xobjid)] = xobjstrm
class Interpreter(PDFPageInterpreter):
def __init__(self, device):
self.rsrcmgr = ResourceManager()
self.device = device
# custom logging here
def log(self, message):
pass
def dup(self):
return self.__class__(self.device)
def close(self):
self.rsrcmgr.clear()
def init_resources(self, resources):
self.resources = resources
if resources:
for (k, v) in dict_value(resources).items():
self.debug and self.log('Resource: %r: %r' % (k, v))
self.rsrcmgr.render_resource(k, v)
def init_state(self, ctm):
self.gstack = []
self.ctm = ctm
self.device.set_ctm(self.ctm)
self.textstate = TextState()
self.graphicstate = GraphicState()
self.curpath = []
self.argstack = []
self.scs = self.ncs = colorspace.CMYKColorSpace()
def do_CS(self, name):
self.scs = self.rsrcmgr.get_colorspace(literal(name))
def do_cs(self, name):
self.ncs = self.rsrcmgr.get_colorspace(literal(name))
def do_SCN(self):
n = len(self.scs.mode)
pattern = self.argstack[-n:]
self.graphicstate.stroke = Paint(self.scs, pattern)
self.argstack = self.argstack[:-n]
def do_scn(self):
n = len(self.ncs.mode)
pattern = self.argstack[-n:]
self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)
self.argstack = self.argstack[:-n]
def do_G(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.stroke = Paint(cs, gray)
def do_g(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)
def do_RG(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.stroke = Paint(cs, (r, g, b))
def do_rg(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))
def do_K(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.stroke = Paint(cs, (c, m, y, k))
def do_k(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))
def do_Tf(self, fontid, fontsize):
self.textstate.font = self.rsrcmgr.get_font(literal(fontid))
self.textstate.fontsize = fontsize
def do_Do(self, xobjid):
xobj = self.rsrcmgr.get_xobject(literal(xobjid))
if not xobj:
return
self.debug and self.log('Processing xobj: %r' % xobj)
xobj = stream_value(xobj)
subtype = xobj.get('Subtype')
if subtype is LIT('Form') and 'BBox' in xobj:
interpreter = self.dup()
bbox = list_value(xobj['BBox'])
matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))
# According to PDF reference 1.7 section 4.9.1, XObjects in
# earlier PDFs (prior to v1.2) use the page's Resources entry
# instead of having their own Resources entry.
resources = dict_value(xobj.get('Resources')
) or self.resources.copy()
self.device.begin_figure(xobjid, bbox, matrix)
interpreter.render_contents(
resources, [xobj], ctm=mult_matrix(matrix, self.ctm))
self.device.end_figure(xobjid)
elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:
self.device.render_image(xobjid, xobj, anchored=True)
else:
# unsupported xobject type.
pass
def do_EI(self, obj):
if 'W' in obj and 'H' in obj:
self.device.render_image(
str(id(obj)), obj, anchored=False, state=self.textstate)
def | (self, name):
if isinstance(name, PSLiteral):
name = name.name
gstate = self.resources['ExtGState'].get(name)
if gstate and not self.textstate.extState:
gstate = resolve1(gstate)
self.textstate.extState = gstate
def do_q(self):
self.gstack.append(self.get_current_state())
def do_Q(self):
self.gstack and self.set_current_state(self.gstack.pop())
# def do_Td(self, tx, ty):
# x, y = self.textstate.linematrix
# # print((x,y), (tx,ty))
# (a, b, c, d, e, f) = self.textstate.matrix
# print((x,y), (tx,ty), (tx*a+ty*c+e, tx*b+ty*d+f))
# self.textstate.matrix = (a, b, c, d, tx*a+ty*c+e, tx*b+ty*d+f)
# self.textstate.linematrix = (0, 0)
| do_gs | identifier_name |
converter.py |
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfdevice import PDFDevice
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, PDFTextState, PDFGraphicState
from pdfminer.pdftypes import list_value, dict_value, stream_value, PDFStream
from pdfminer.psparser import LIT, PSLiteral
from pdfminer.pdftypes import PDFObjRef, resolve1
from pdfminer.utils import mult_matrix
from pdftext import TextAnalyzer, textSpanBox
import pdffonts
import colorspace
def literal(name): return LIT(
name) if not isinstance(name, PSLiteral) else name
def render_type(ftype):
def render_function(func):
def render_arguments(self, *args, **kwargs):
if ftype in self.filtered:
return
return func(self, *args, **kwargs)
return render_arguments
return render_function
def get_default(res_type):
def binding(func):
def get_arguments(self, objid, obj=None):
res_list = getattr(self, res_type+'s', None)
if res_list is None:
return
if objid is not None:
objid = literal(objid)
if objid in res_list:
return res_list[objid]
elif obj is None:
return
func(self, objid, obj=obj)
if objid is not None:
return res_list.get(objid)
return get_arguments
return binding
class Paint:
def __init__(self, cs, value):
self.cs = cs
self.value = value
def draw(self):
return self.cs.getRGB(*self.value)
class TextState(PDFTextState):
def __init__(self):
super().__init__()
self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.font = self.font
obj.fontsize = self.fontsize
obj.charspace = self.charspace
obj.wordspace = self.wordspace
obj.scaling = self.scaling
obj.leading = self.leading
obj.render = self.render
obj.rise = self.rise
obj.matrix = self.matrix
obj.linematrix = self.linematrix
obj.fill = self.fill
obj.extState = self.extState
return obj
def __setattr__(self, key, value):
if key in ['charspace', 'wordspace']:
value *= getattr(self, 'scaling', 100) * 0.01
return object.__setattr__(self, key, value)
class GraphicState(PDFGraphicState):
def __init__(self):
super().__init__()
self.stroke = self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.linewidth = self.linewidth
obj.linecap = self.linecap
obj.linejoin = self.linejoin
obj.miterlimit = self.miterlimit
obj.dash = self.dash
obj.intent = self.intent
obj.flatness = self.flatness
obj.stroke = self.stroke
obj.fill = self.fill
obj.extState = self.extState
return obj
class Device(PDFDevice):
def __init__(self, filtered=None, laparams=None, check_visible=True):
super().__init__(None)
self.filtered = filtered or []
self.check_visible = check_visible
self.analyzer = TextAnalyzer(**(laparams or {}))
self.pageno = 1
self.reset()
self.viewBox = [0, 0, 0, 0]
def reset(self):
self.images = {}
self.text_layer = []
self.layers = {}
self.layer_stack = []
def begin_page(self, page, ctm):
self.reset()
self.layers[LIT('Page')] = (page.cropbox, ctm)
self.layer_stack = [LIT('Page')]
self.viewBox = page.cropbox
self.ymax = page.mediabox[3] - page.mediabox[1]
def is_visible(self, span, bbox):
boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))
if len(boxset) < len(span.bbox):
return False
xmin, ymin, xmax, ymax = bbox
return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)
def get_current_layer(self):
i = -1
depth = 0
while True:
layerName = self.layer_stack[i]
if layerName == 'end':
depth += 1
else:
depth -= 1
if depth < 0:
break
i -= 1
return layerName, self.layers[layerName]
def end_page(self, page):
self.text_layer = filter(lambda x: not self.check_visible
or self.is_visible(x, self.viewBox), self.text_layer)
lines = self.analyzer.group_lines(self.text_layer)
paras = self.analyzer.group_paras(lines)
self.text_layer = paras
self.pageno += 1
def begin_figure(self, name, bbox, matrix):
x, y, w, h = bbox
self.layers[name] = ([x, y, x+w, y+h], matrix)
self.layer_stack.append(name)
def end_figure(self, name):
self.layer_stack.append('end')
@render_type('path')
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
# path handling suspended
return path
@render_type('image')
def render_image(self, name, stream, anchored=False, textstate=None):
bbox, matrix = self.get_current_layer()[1]
self.images.setdefault(stream.objid, (name, stream, bbox, matrix))
@render_type('text')
def render_string(self, textstate, seq, *args):
layerName = self.get_current_layer()[0]
x, y = textstate.linematrix
a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)
matrix = a, b, c, d, e, self.ymax - f
box = textSpanBox((x, y), seq, textstate, layerName=layerName, matrix=matrix)
# check if text is visible
if not textstate.extState.get('OP', False) or not textstate.extState.get('OPM', 0):
self.text_layer.append(box)
elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):
self.text_layer.append(box)
textstate.linematrix = box.originbox[2]
class ResourceManager(PDFResourceManager):
def __init__(self):
self.fonts = {}
self.colorspaces = colorspace.defaults.copy()
self.xobjects = {}
self.cache = {}
self.stream_objects = []
def clear(self):
for res in self.fonts:
stream_to_close = getattr(res, 'embedFont', None)
stream_to_close and stream_to_close.close()
self.fonts.clear()
self.colorspaces.clear()
self.xobjects.clear()
def render_resource(self, res_type, res_obj):
get_function = getattr(self, 'get_' + res_type.lower(), None)
return get_function and get_function(None, obj=res_obj)
@get_default('font')
def get_font(self, objid, obj=None):
for (fontid, spec) in dict_value(obj).items():
spec = dict_value(spec)
spec, fontType, embedFont, opentype = pdffonts.getType(spec)
if fontType:
font = fontType(spec, embedFont=embedFont and self.xobjects.get(
embedFont.objid, embedFont), opentype=opentype)
if embedFont:
objid = literal(embedFont.objid)
if not objid in self.xobjects:
|
self.fonts[literal(fontid)] = font
@get_default('colorspace')
def get_colorspace(self, objid, obj=None):
for (csid, spec) in dict_value(obj).items():
cs = colorspace.parse(spec)
if cs:
self.colorspaces[literal(csid)] = cs
def get_procset(self, objid, obj=None):
# procset handling suspended
pass
@get_default('xobject')
def get_xobject(self, objid, obj=None):
for (xobjid, xobjstrm) in dict_value(obj).items():
self.xobjects[literal(xobjid)] = xobjstrm
class Interpreter(PDFPageInterpreter):
def __init__(self, device):
self.rsrcmgr = ResourceManager()
self.device = device
# custom logging here
def log(self, message):
pass
def dup(self):
return self.__class__(self.device)
def close(self):
self.rsrcmgr.clear()
def init_resources(self, resources):
self.resources = resources
if resources:
for (k, v) in dict_value(resources).items():
self.debug and self.log('Resource: %r: %r' % (k, v))
self.rsrcmgr.render_resource(k, v)
def init_state(self, ctm):
self.gstack = []
self.ctm = ctm
self.device.set_ctm(self.ctm)
self.textstate = TextState()
self.graphicstate = GraphicState()
self.curpath = []
self.argstack = []
self.scs = self.ncs = colorspace.CMYKColorSpace()
def do_CS(self, name):
self.scs = self.rsrcmgr.get_colorspace(literal(name))
def do_cs(self, name):
self.ncs = self.rsrcmgr.get_colorspace(literal(name))
def do_SCN(self):
n = len(self.scs.mode)
pattern = self.argstack[-n:]
self.graphicstate.stroke = Paint(self.scs, pattern)
self.argstack = self.argstack[:-n]
def do_scn(self):
n = len(self.ncs.mode)
pattern = self.argstack[-n:]
self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)
self.argstack = self.argstack[:-n]
def do_G(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.stroke = Paint(cs, gray)
def do_g(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)
def do_RG(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.stroke = Paint(cs, (r, g, b))
def do_rg(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))
def do_K(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.stroke = Paint(cs, (c, m, y, k))
def do_k(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))
def do_Tf(self, fontid, fontsize):
self.textstate.font = self.rsrcmgr.get_font(literal(fontid))
self.textstate.fontsize = fontsize
def do_Do(self, xobjid):
xobj = self.rsrcmgr.get_xobject(literal(xobjid))
if not xobj:
return
self.debug and self.log('Processing xobj: %r' % xobj)
xobj = stream_value(xobj)
subtype = xobj.get('Subtype')
if subtype is LIT('Form') and 'BBox' in xobj:
interpreter = self.dup()
bbox = list_value(xobj['BBox'])
matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))
# According to PDF reference 1.7 section 4.9.1, XObjects in
# earlier PDFs (prior to v1.2) use the page's Resources entry
# instead of having their own Resources entry.
resources = dict_value(xobj.get('Resources')
) or self.resources.copy()
self.device.begin_figure(xobjid, bbox, matrix)
interpreter.render_contents(
resources, [xobj], ctm=mult_matrix(matrix, self.ctm))
self.device.end_figure(xobjid)
elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:
self.device.render_image(xobjid, xobj, anchored=True)
else:
# unsupported xobject type.
pass
def do_EI(self, obj):
if 'W' in obj and 'H' in obj:
self.device.render_image(
str(id(obj)), obj, anchored=False, state=self.textstate)
def do_gs(self, name):
if isinstance(name, PSLiteral):
name = name.name
gstate = self.resources['ExtGState'].get(name)
if gstate and not self.textstate.extState:
gstate = resolve1(gstate)
self.textstate.extState = gstate
def do_q(self):
self.gstack.append(self.get_current_state())
def do_Q(self):
self.gstack and self.set_current_state(self.gstack.pop())
# def do_Td(self, tx, ty):
# x, y = self.textstate.linematrix
# # print((x,y), (tx,ty))
# (a, b, c, d, e, f) = self.textstate.matrix
# print((x,y), (tx,ty), (tx*a+ty*c+e, tx*b+ty*d+f))
# self.textstate.matrix = (a, b, c, d, tx*a+ty*c+e, tx*b+ty*d+f)
# self.textstate.linematrix = (0, 0)
| self.xobjects[objid] = font.embedFont | conditional_block |
converter.py | from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfdevice import PDFDevice
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, PDFTextState, PDFGraphicState
from pdfminer.pdftypes import list_value, dict_value, stream_value, PDFStream
from pdfminer.psparser import LIT, PSLiteral
from pdfminer.pdftypes import PDFObjRef, resolve1
from pdfminer.utils import mult_matrix
from pdftext import TextAnalyzer, textSpanBox
import pdffonts
import colorspace
def literal(name): return LIT(
name) if not isinstance(name, PSLiteral) else name
def render_type(ftype):
def render_function(func):
def render_arguments(self, *args, **kwargs):
if ftype in self.filtered:
return
return func(self, *args, **kwargs)
return render_arguments
return render_function
def get_default(res_type):
def binding(func):
def get_arguments(self, objid, obj=None):
res_list = getattr(self, res_type+'s', None)
if res_list is None:
return
if objid is not None:
objid = literal(objid)
if objid in res_list:
return res_list[objid]
elif obj is None:
return
func(self, objid, obj=obj)
if objid is not None:
return res_list.get(objid)
return get_arguments
return binding
class Paint:
def __init__(self, cs, value):
self.cs = cs
self.value = value
def draw(self):
return self.cs.getRGB(*self.value)
class TextState(PDFTextState):
def __init__(self):
super().__init__()
self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.font = self.font
obj.fontsize = self.fontsize
obj.charspace = self.charspace
obj.wordspace = self.wordspace
obj.scaling = self.scaling
obj.leading = self.leading
obj.render = self.render
obj.rise = self.rise
obj.matrix = self.matrix
obj.linematrix = self.linematrix
obj.fill = self.fill
obj.extState = self.extState
return obj
def __setattr__(self, key, value):
if key in ['charspace', 'wordspace']:
value *= getattr(self, 'scaling', 100) * 0.01
return object.__setattr__(self, key, value)
class GraphicState(PDFGraphicState):
def __init__(self):
super().__init__()
self.stroke = self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.linewidth = self.linewidth
obj.linecap = self.linecap
obj.linejoin = self.linejoin
obj.miterlimit = self.miterlimit
obj.dash = self.dash
obj.intent = self.intent
obj.flatness = self.flatness
obj.stroke = self.stroke
obj.fill = self.fill
obj.extState = self.extState
return obj
class Device(PDFDevice):
def __init__(self, filtered=None, laparams=None, check_visible=True):
super().__init__(None)
self.filtered = filtered or []
self.check_visible = check_visible
self.analyzer = TextAnalyzer(**(laparams or {}))
self.pageno = 1
self.reset()
self.viewBox = [0, 0, 0, 0]
def reset(self):
self.images = {}
self.text_layer = []
self.layers = {}
self.layer_stack = []
def begin_page(self, page, ctm):
self.reset()
self.layers[LIT('Page')] = (page.cropbox, ctm)
self.layer_stack = [LIT('Page')]
self.viewBox = page.cropbox
self.ymax = page.mediabox[3] - page.mediabox[1]
def is_visible(self, span, bbox):
boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))
if len(boxset) < len(span.bbox):
return False
xmin, ymin, xmax, ymax = bbox
return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)
def get_current_layer(self):
i = -1
depth = 0
while True:
layerName = self.layer_stack[i]
if layerName == 'end':
depth += 1
else:
depth -= 1
if depth < 0:
break
i -= 1
return layerName, self.layers[layerName]
def end_page(self, page):
self.text_layer = filter(lambda x: not self.check_visible
or self.is_visible(x, self.viewBox), self.text_layer)
lines = self.analyzer.group_lines(self.text_layer)
paras = self.analyzer.group_paras(lines)
self.text_layer = paras
self.pageno += 1
def begin_figure(self, name, bbox, matrix):
x, y, w, h = bbox
self.layers[name] = ([x, y, x+w, y+h], matrix)
self.layer_stack.append(name)
def end_figure(self, name):
self.layer_stack.append('end')
@render_type('path')
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
# path handling suspended
return path
@render_type('image')
def render_image(self, name, stream, anchored=False, textstate=None):
bbox, matrix = self.get_current_layer()[1]
self.images.setdefault(stream.objid, (name, stream, bbox, matrix))
@render_type('text')
def render_string(self, textstate, seq, *args):
layerName = self.get_current_layer()[0]
x, y = textstate.linematrix
a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)
matrix = a, b, c, d, e, self.ymax - f
box = textSpanBox((x, y), seq, textstate, layerName=layerName, matrix=matrix)
# check if text is visible
if not textstate.extState.get('OP', False) or not textstate.extState.get('OPM', 0):
self.text_layer.append(box)
elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):
self.text_layer.append(box)
textstate.linematrix = box.originbox[2]
class ResourceManager(PDFResourceManager):
def __init__(self):
self.fonts = {}
self.colorspaces = colorspace.defaults.copy()
self.xobjects = {}
self.cache = {}
self.stream_objects = []
def clear(self):
for res in self.fonts:
stream_to_close = getattr(res, 'embedFont', None)
stream_to_close and stream_to_close.close()
self.fonts.clear()
self.colorspaces.clear()
self.xobjects.clear()
def render_resource(self, res_type, res_obj):
get_function = getattr(self, 'get_' + res_type.lower(), None)
return get_function and get_function(None, obj=res_obj)
@get_default('font')
def get_font(self, objid, obj=None):
for (fontid, spec) in dict_value(obj).items():
spec = dict_value(spec)
spec, fontType, embedFont, opentype = pdffonts.getType(spec)
if fontType:
font = fontType(spec, embedFont=embedFont and self.xobjects.get(
embedFont.objid, embedFont), opentype=opentype)
if embedFont:
objid = literal(embedFont.objid)
if not objid in self.xobjects:
self.xobjects[objid] = font.embedFont
self.fonts[literal(fontid)] = font
@get_default('colorspace')
def get_colorspace(self, objid, obj=None):
for (csid, spec) in dict_value(obj).items():
cs = colorspace.parse(spec)
if cs:
self.colorspaces[literal(csid)] = cs
def get_procset(self, objid, obj=None):
# procset handling suspended
pass
@get_default('xobject')
def get_xobject(self, objid, obj=None):
for (xobjid, xobjstrm) in dict_value(obj).items():
self.xobjects[literal(xobjid)] = xobjstrm
class Interpreter(PDFPageInterpreter):
def __init__(self, device):
self.rsrcmgr = ResourceManager()
self.device = device
# custom logging here
def log(self, message):
pass
def dup(self):
return self.__class__(self.device)
def close(self):
self.rsrcmgr.clear()
def init_resources(self, resources):
self.resources = resources
if resources:
for (k, v) in dict_value(resources).items():
self.debug and self.log('Resource: %r: %r' % (k, v))
self.rsrcmgr.render_resource(k, v)
def init_state(self, ctm):
self.gstack = []
self.ctm = ctm
self.device.set_ctm(self.ctm)
self.textstate = TextState()
self.graphicstate = GraphicState()
self.curpath = []
self.argstack = []
self.scs = self.ncs = colorspace.CMYKColorSpace()
def do_CS(self, name):
self.scs = self.rsrcmgr.get_colorspace(literal(name))
def do_cs(self, name):
self.ncs = self.rsrcmgr.get_colorspace(literal(name))
def do_SCN(self):
n = len(self.scs.mode)
pattern = self.argstack[-n:]
self.graphicstate.stroke = Paint(self.scs, pattern)
self.argstack = self.argstack[:-n]
def do_scn(self):
n = len(self.ncs.mode)
pattern = self.argstack[-n:]
self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)
self.argstack = self.argstack[:-n]
def do_G(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.stroke = Paint(cs, gray)
def do_g(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)
def do_RG(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.stroke = Paint(cs, (r, g, b))
def do_rg(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))
def do_K(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.stroke = Paint(cs, (c, m, y, k))
def do_k(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))
def do_Tf(self, fontid, fontsize):
self.textstate.font = self.rsrcmgr.get_font(literal(fontid))
self.textstate.fontsize = fontsize
def do_Do(self, xobjid):
xobj = self.rsrcmgr.get_xobject(literal(xobjid))
if not xobj:
return
self.debug and self.log('Processing xobj: %r' % xobj)
xobj = stream_value(xobj)
subtype = xobj.get('Subtype')
if subtype is LIT('Form') and 'BBox' in xobj:
interpreter = self.dup()
bbox = list_value(xobj['BBox'])
matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))
# According to PDF reference 1.7 section 4.9.1, XObjects in
# earlier PDFs (prior to v1.2) use the page's Resources entry
# instead of having their own Resources entry.
resources = dict_value(xobj.get('Resources')
) or self.resources.copy()
self.device.begin_figure(xobjid, bbox, matrix)
interpreter.render_contents(
resources, [xobj], ctm=mult_matrix(matrix, self.ctm))
self.device.end_figure(xobjid)
elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:
self.device.render_image(xobjid, xobj, anchored=True)
else:
# unsupported xobject type.
pass
def do_EI(self, obj):
if 'W' in obj and 'H' in obj:
self.device.render_image(
str(id(obj)), obj, anchored=False, state=self.textstate)
def do_gs(self, name):
if isinstance(name, PSLiteral):
name = name.name
gstate = self.resources['ExtGState'].get(name)
if gstate and not self.textstate.extState:
gstate = resolve1(gstate)
self.textstate.extState = gstate
def do_q(self):
self.gstack.append(self.get_current_state()) |
def do_Q(self):
self.gstack and self.set_current_state(self.gstack.pop())
# def do_Td(self, tx, ty):
# x, y = self.textstate.linematrix
# # print((x,y), (tx,ty))
# (a, b, c, d, e, f) = self.textstate.matrix
# print((x,y), (tx,ty), (tx*a+ty*c+e, tx*b+ty*d+f))
# self.textstate.matrix = (a, b, c, d, tx*a+ty*c+e, tx*b+ty*d+f)
# self.textstate.linematrix = (0, 0) | random_line_split | |
diagnostic_server.rs | //! A small TCP server to handle collection of diagnostics information in a
//! cross-platform way for the `cargo fix` command.
use std::collections::HashSet;
use std::io::{BufReader, Read, Write};
use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{self, JoinHandle};
use anyhow::{Context, Error};
use cargo_util::ProcessBuilder;
use serde::{Deserialize, Serialize};
use tracing::warn;
use crate::core::Edition;
use crate::util::errors::CargoResult;
use crate::util::Config;
const DIAGNOSTICS_SERVER_VAR: &str = "__CARGO_FIX_DIAGNOSTICS_SERVER";
#[derive(Deserialize, Serialize, Hash, Eq, PartialEq, Clone)]
pub enum Message {
Migrating {
file: String,
from_edition: Edition,
to_edition: Edition,
},
Fixing {
file: String,
},
Fixed {
file: String,
fixes: u32,
},
FixFailed {
files: Vec<String>,
krate: Option<String>,
errors: Vec<String>,
abnormal_exit: Option<String>,
},
ReplaceFailed {
file: String,
message: String,
},
EditionAlreadyEnabled {
message: String,
edition: Edition,
},
}
impl Message {
pub fn post(&self, config: &Config) -> Result<(), Error> {
let addr = config
.get_env(DIAGNOSTICS_SERVER_VAR)
.context("diagnostics collector misconfigured")?;
let mut client =
TcpStream::connect(&addr).context("failed to connect to parent diagnostics target")?;
let s = serde_json::to_string(self).context("failed to serialize message")?;
client
.write_all(s.as_bytes())
.context("failed to write message to diagnostics target")?;
client
.shutdown(Shutdown::Write)
.context("failed to shutdown")?;
client
.read_to_end(&mut Vec::new())
.context("failed to receive a disconnect")?;
Ok(())
}
}
/// A printer that will print diagnostics messages to the shell.
pub struct DiagnosticPrinter<'a> {
/// The config to get the shell to print to.
config: &'a Config,
/// An optional wrapper to be used in addition to `rustc.wrapper` for workspace crates.
/// This is used to get the correct bug report URL. For instance,
/// if `clippy-driver` is set as the value for the wrapper,
/// then the correct bug report URL for `clippy` can be obtained.
workspace_wrapper: &'a Option<PathBuf>,
// A set of messages that have already been printed.
dedupe: HashSet<Message>,
}
impl<'a> DiagnosticPrinter<'a> {
pub fn new(
config: &'a Config,
workspace_wrapper: &'a Option<PathBuf>,
) -> DiagnosticPrinter<'a> {
DiagnosticPrinter {
config,
workspace_wrapper,
dedupe: HashSet::new(),
}
}
pub fn print(&mut self, msg: &Message) -> CargoResult<()> {
match msg {
Message::Migrating {
file,
from_edition,
to_edition,
} => {
if !self.dedupe.insert(msg.clone()) {
return Ok(());
}
self.config.shell().status(
"Migrating",
&format!("{} from {} edition to {}", file, from_edition, to_edition),
)
}
Message::Fixing { file } => self
.config
.shell()
.verbose(|shell| shell.status("Fixing", file)),
Message::Fixed { file, fixes } => {
let msg = if *fixes == 1 { "fix" } else { "fixes" };
let msg = format!("{} ({} {})", file, fixes, msg);
self.config.shell().status("Fixed", msg)
}
Message::ReplaceFailed { file, message } => {
let msg = format!("error applying suggestions to `{}`\n", file);
self.config.shell().warn(&msg)?;
write!(
self.config.shell().err(),
"The full error message was:\n\n> {}\n\n",
message,
)?;
let issue_link = get_bug_report_url(self.workspace_wrapper);
write!(
self.config.shell().err(),
"{}",
gen_please_report_this_bug_text(issue_link)
)?;
Ok(())
}
Message::FixFailed {
files,
krate,
errors,
abnormal_exit,
} => {
if let Some(ref krate) = *krate {
self.config.shell().warn(&format!(
"failed to automatically apply fixes suggested by rustc \
to crate `{}`",
krate,
))?;
} else {
self.config
.shell()
.warn("failed to automatically apply fixes suggested by rustc")?;
}
if !files.is_empty() {
writeln!(
self.config.shell().err(),
"\nafter fixes were automatically applied the compiler \
reported errors within these files:\n"
)?;
for file in files {
writeln!(self.config.shell().err(), " * {}", file)?;
}
writeln!(self.config.shell().err())?;
}
let issue_link = get_bug_report_url(self.workspace_wrapper);
write!(
self.config.shell().err(),
"{}",
gen_please_report_this_bug_text(issue_link)
)?;
if !errors.is_empty() {
writeln!(
self.config.shell().err(),
"The following errors were reported:"
)?;
for error in errors {
write!(self.config.shell().err(), "{}", error)?;
if !error.ends_with('\n') {
writeln!(self.config.shell().err())?;
}
}
}
if let Some(exit) = abnormal_exit {
writeln!(
self.config.shell().err(),
"rustc exited abnormally: {}",
exit
)?;
}
writeln!(
self.config.shell().err(),
"Original diagnostics will follow.\n"
)?;
Ok(())
}
Message::EditionAlreadyEnabled { message, edition } => {
if !self.dedupe.insert(msg.clone()) {
return Ok(());
}
// Don't give a really verbose warning if it has already been issued.
if self.dedupe.insert(Message::EditionAlreadyEnabled {
message: "".to_string(), // Dummy, so that this only long-warns once.
edition: *edition,
}) {
self.config.shell().warn(&format!("\
{}
If you are trying to migrate from the previous edition ({prev_edition}), the
process requires following these steps:
1. Start with `edition = \"{prev_edition}\"` in `Cargo.toml`
2. Run `cargo fix --edition`
3. Modify `Cargo.toml` to set `edition = \"{this_edition}\"`
4. Run `cargo build` or `cargo test` to verify the fixes worked
More details may be found at
https://doc.rust-lang.org/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html
",
message, this_edition=edition, prev_edition=edition.previous().unwrap()
))
} else {
self.config.shell().warn(message)
}
}
}
}
}
fn gen_please_report_this_bug_text(url: &str) -> String {
format!(
"This likely indicates a bug in either rustc or cargo itself,\n\
and we would appreciate a bug report! You're likely to see \n\
a number of compiler warnings after this message which cargo\n\
attempted to fix but failed. If you could open an issue at\n\
{}\n\
quoting the full output of this command we'd be very appreciative!\n\
Note that you may be able to make some more progress in the near-term\n\
fixing code with the `--broken-code` flag\n\n\
",
url
)
}
fn get_bug_report_url(rustc_workspace_wrapper: &Option<PathBuf>) -> &str {
let clippy = std::ffi::OsStr::new("clippy-driver");
let issue_link = match rustc_workspace_wrapper.as_ref().and_then(|x| x.file_stem()) {
Some(wrapper) if wrapper == clippy => "https://github.com/rust-lang/rust-clippy/issues",
_ => "https://github.com/rust-lang/rust/issues",
};
issue_link
}
#[derive(Debug)]
pub struct | {
listener: TcpListener,
addr: SocketAddr,
}
pub struct StartedServer {
addr: SocketAddr,
done: Arc<AtomicBool>,
thread: Option<JoinHandle<()>>,
}
impl RustfixDiagnosticServer {
pub fn new() -> Result<Self, Error> {
let listener = TcpListener::bind("127.0.0.1:0")
.with_context(|| "failed to bind TCP listener to manage locking")?;
let addr = listener.local_addr()?;
Ok(RustfixDiagnosticServer { listener, addr })
}
pub fn configure(&self, process: &mut ProcessBuilder) {
process.env(DIAGNOSTICS_SERVER_VAR, self.addr.to_string());
}
pub fn start<F>(self, on_message: F) -> Result<StartedServer, Error>
where
F: Fn(Message) + Send + 'static,
{
let addr = self.addr;
let done = Arc::new(AtomicBool::new(false));
let done2 = done.clone();
let thread = thread::spawn(move || {
self.run(&on_message, &done2);
});
Ok(StartedServer {
addr,
thread: Some(thread),
done,
})
}
fn run(self, on_message: &dyn Fn(Message), done: &AtomicBool) {
while let Ok((client, _)) = self.listener.accept() {
if done.load(Ordering::SeqCst) {
break;
}
let mut client = BufReader::new(client);
let mut s = String::new();
if let Err(e) = client.read_to_string(&mut s) {
warn!("diagnostic server failed to read: {}", e);
} else {
match serde_json::from_str(&s) {
Ok(message) => on_message(message),
Err(e) => warn!("invalid diagnostics message: {}", e),
}
}
// The client should be kept alive until after `on_message` is
// called to ensure that the client doesn't exit too soon (and
// Message::Finish getting posted before Message::FixDiagnostic).
drop(client);
}
}
}
impl Drop for StartedServer {
fn drop(&mut self) {
self.done.store(true, Ordering::SeqCst);
// Ignore errors here as this is largely best-effort
if TcpStream::connect(&self.addr).is_err() {
return;
}
drop(self.thread.take().unwrap().join());
}
}
| RustfixDiagnosticServer | identifier_name |
diagnostic_server.rs | //! A small TCP server to handle collection of diagnostics information in a
//! cross-platform way for the `cargo fix` command.
use std::collections::HashSet;
use std::io::{BufReader, Read, Write};
use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{self, JoinHandle};
use anyhow::{Context, Error};
use cargo_util::ProcessBuilder;
use serde::{Deserialize, Serialize};
use tracing::warn;
use crate::core::Edition;
use crate::util::errors::CargoResult;
use crate::util::Config;
const DIAGNOSTICS_SERVER_VAR: &str = "__CARGO_FIX_DIAGNOSTICS_SERVER";
#[derive(Deserialize, Serialize, Hash, Eq, PartialEq, Clone)]
pub enum Message {
Migrating {
file: String,
from_edition: Edition,
to_edition: Edition,
},
Fixing {
file: String,
},
Fixed {
file: String,
fixes: u32,
},
FixFailed {
files: Vec<String>,
krate: Option<String>,
errors: Vec<String>,
abnormal_exit: Option<String>,
},
ReplaceFailed {
file: String,
message: String,
},
EditionAlreadyEnabled {
message: String,
edition: Edition,
},
}
impl Message {
pub fn post(&self, config: &Config) -> Result<(), Error> {
let addr = config
.get_env(DIAGNOSTICS_SERVER_VAR)
.context("diagnostics collector misconfigured")?;
let mut client =
TcpStream::connect(&addr).context("failed to connect to parent diagnostics target")?;
let s = serde_json::to_string(self).context("failed to serialize message")?;
client
.write_all(s.as_bytes())
.context("failed to write message to diagnostics target")?;
client
.shutdown(Shutdown::Write)
.context("failed to shutdown")?;
client
.read_to_end(&mut Vec::new())
.context("failed to receive a disconnect")?;
Ok(())
}
}
/// A printer that will print diagnostics messages to the shell.
pub struct DiagnosticPrinter<'a> {
/// The config to get the shell to print to.
config: &'a Config,
/// An optional wrapper to be used in addition to `rustc.wrapper` for workspace crates.
/// This is used to get the correct bug report URL. For instance,
/// if `clippy-driver` is set as the value for the wrapper,
/// then the correct bug report URL for `clippy` can be obtained.
workspace_wrapper: &'a Option<PathBuf>,
// A set of messages that have already been printed.
dedupe: HashSet<Message>,
}
impl<'a> DiagnosticPrinter<'a> {
pub fn new(
config: &'a Config,
workspace_wrapper: &'a Option<PathBuf>,
) -> DiagnosticPrinter<'a> {
DiagnosticPrinter {
config,
workspace_wrapper,
dedupe: HashSet::new(),
}
}
pub fn print(&mut self, msg: &Message) -> CargoResult<()> {
match msg {
Message::Migrating {
file,
from_edition,
to_edition,
} => {
if !self.dedupe.insert(msg.clone()) {
return Ok(());
}
self.config.shell().status(
"Migrating",
&format!("{} from {} edition to {}", file, from_edition, to_edition),
)
}
Message::Fixing { file } => self
.config
.shell()
.verbose(|shell| shell.status("Fixing", file)),
Message::Fixed { file, fixes } => {
let msg = if *fixes == 1 { "fix" } else { "fixes" };
let msg = format!("{} ({} {})", file, fixes, msg);
self.config.shell().status("Fixed", msg)
}
Message::ReplaceFailed { file, message } => {
let msg = format!("error applying suggestions to `{}`\n", file);
self.config.shell().warn(&msg)?;
write!(
self.config.shell().err(),
"The full error message was:\n\n> {}\n\n",
message,
)?;
let issue_link = get_bug_report_url(self.workspace_wrapper);
write!(
self.config.shell().err(),
"{}",
gen_please_report_this_bug_text(issue_link)
)?;
Ok(())
}
Message::FixFailed {
files,
krate,
errors,
abnormal_exit,
} => {
if let Some(ref krate) = *krate {
self.config.shell().warn(&format!(
"failed to automatically apply fixes suggested by rustc \
to crate `{}`",
krate,
))?;
} else |
if !files.is_empty() {
writeln!(
self.config.shell().err(),
"\nafter fixes were automatically applied the compiler \
reported errors within these files:\n"
)?;
for file in files {
writeln!(self.config.shell().err(), " * {}", file)?;
}
writeln!(self.config.shell().err())?;
}
let issue_link = get_bug_report_url(self.workspace_wrapper);
write!(
self.config.shell().err(),
"{}",
gen_please_report_this_bug_text(issue_link)
)?;
if !errors.is_empty() {
writeln!(
self.config.shell().err(),
"The following errors were reported:"
)?;
for error in errors {
write!(self.config.shell().err(), "{}", error)?;
if !error.ends_with('\n') {
writeln!(self.config.shell().err())?;
}
}
}
if let Some(exit) = abnormal_exit {
writeln!(
self.config.shell().err(),
"rustc exited abnormally: {}",
exit
)?;
}
writeln!(
self.config.shell().err(),
"Original diagnostics will follow.\n"
)?;
Ok(())
}
Message::EditionAlreadyEnabled { message, edition } => {
if !self.dedupe.insert(msg.clone()) {
return Ok(());
}
// Don't give a really verbose warning if it has already been issued.
if self.dedupe.insert(Message::EditionAlreadyEnabled {
message: "".to_string(), // Dummy, so that this only long-warns once.
edition: *edition,
}) {
self.config.shell().warn(&format!("\
{}
If you are trying to migrate from the previous edition ({prev_edition}), the
process requires following these steps:
1. Start with `edition = \"{prev_edition}\"` in `Cargo.toml`
2. Run `cargo fix --edition`
3. Modify `Cargo.toml` to set `edition = \"{this_edition}\"`
4. Run `cargo build` or `cargo test` to verify the fixes worked
More details may be found at
https://doc.rust-lang.org/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html
",
message, this_edition=edition, prev_edition=edition.previous().unwrap()
))
} else {
self.config.shell().warn(message)
}
}
}
}
}
fn gen_please_report_this_bug_text(url: &str) -> String {
format!(
"This likely indicates a bug in either rustc or cargo itself,\n\
and we would appreciate a bug report! You're likely to see \n\
a number of compiler warnings after this message which cargo\n\
attempted to fix but failed. If you could open an issue at\n\
{}\n\
quoting the full output of this command we'd be very appreciative!\n\
Note that you may be able to make some more progress in the near-term\n\
fixing code with the `--broken-code` flag\n\n\
",
url
)
}
fn get_bug_report_url(rustc_workspace_wrapper: &Option<PathBuf>) -> &str {
let clippy = std::ffi::OsStr::new("clippy-driver");
let issue_link = match rustc_workspace_wrapper.as_ref().and_then(|x| x.file_stem()) {
Some(wrapper) if wrapper == clippy => "https://github.com/rust-lang/rust-clippy/issues",
_ => "https://github.com/rust-lang/rust/issues",
};
issue_link
}
#[derive(Debug)]
pub struct RustfixDiagnosticServer {
listener: TcpListener,
addr: SocketAddr,
}
pub struct StartedServer {
addr: SocketAddr,
done: Arc<AtomicBool>,
thread: Option<JoinHandle<()>>,
}
impl RustfixDiagnosticServer {
pub fn new() -> Result<Self, Error> {
let listener = TcpListener::bind("127.0.0.1:0")
.with_context(|| "failed to bind TCP listener to manage locking")?;
let addr = listener.local_addr()?;
Ok(RustfixDiagnosticServer { listener, addr })
}
pub fn configure(&self, process: &mut ProcessBuilder) {
process.env(DIAGNOSTICS_SERVER_VAR, self.addr.to_string());
}
pub fn start<F>(self, on_message: F) -> Result<StartedServer, Error>
where
F: Fn(Message) + Send + 'static,
{
let addr = self.addr;
let done = Arc::new(AtomicBool::new(false));
let done2 = done.clone();
let thread = thread::spawn(move || {
self.run(&on_message, &done2);
});
Ok(StartedServer {
addr,
thread: Some(thread),
done,
})
}
fn run(self, on_message: &dyn Fn(Message), done: &AtomicBool) {
while let Ok((client, _)) = self.listener.accept() {
if done.load(Ordering::SeqCst) {
break;
}
let mut client = BufReader::new(client);
let mut s = String::new();
if let Err(e) = client.read_to_string(&mut s) {
warn!("diagnostic server failed to read: {}", e);
} else {
match serde_json::from_str(&s) {
Ok(message) => on_message(message),
Err(e) => warn!("invalid diagnostics message: {}", e),
}
}
// The client should be kept alive until after `on_message` is
// called to ensure that the client doesn't exit too soon (and
// Message::Finish getting posted before Message::FixDiagnostic).
drop(client);
}
}
}
impl Drop for StartedServer {
fn drop(&mut self) {
self.done.store(true, Ordering::SeqCst);
// Ignore errors here as this is largely best-effort
if TcpStream::connect(&self.addr).is_err() {
return;
}
drop(self.thread.take().unwrap().join());
}
}
| {
self.config
.shell()
.warn("failed to automatically apply fixes suggested by rustc")?;
} | conditional_block |
diagnostic_server.rs | //! A small TCP server to handle collection of diagnostics information in a
//! cross-platform way for the `cargo fix` command.
use std::collections::HashSet;
use std::io::{BufReader, Read, Write};
use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{self, JoinHandle};
use anyhow::{Context, Error};
use cargo_util::ProcessBuilder;
use serde::{Deserialize, Serialize};
use tracing::warn;
use crate::core::Edition;
use crate::util::errors::CargoResult;
use crate::util::Config;
const DIAGNOSTICS_SERVER_VAR: &str = "__CARGO_FIX_DIAGNOSTICS_SERVER";
#[derive(Deserialize, Serialize, Hash, Eq, PartialEq, Clone)]
pub enum Message {
Migrating {
file: String,
from_edition: Edition,
to_edition: Edition,
},
Fixing {
file: String,
},
Fixed {
file: String,
fixes: u32,
},
FixFailed {
files: Vec<String>,
krate: Option<String>,
errors: Vec<String>,
abnormal_exit: Option<String>,
},
ReplaceFailed {
file: String,
message: String,
},
EditionAlreadyEnabled {
message: String,
edition: Edition,
},
}
impl Message {
pub fn post(&self, config: &Config) -> Result<(), Error> {
let addr = config
.get_env(DIAGNOSTICS_SERVER_VAR)
.context("diagnostics collector misconfigured")?;
let mut client =
TcpStream::connect(&addr).context("failed to connect to parent diagnostics target")?;
let s = serde_json::to_string(self).context("failed to serialize message")?;
client
.write_all(s.as_bytes())
.context("failed to write message to diagnostics target")?;
client
.shutdown(Shutdown::Write)
.context("failed to shutdown")?;
client
.read_to_end(&mut Vec::new())
.context("failed to receive a disconnect")?;
Ok(())
}
}
/// A printer that will print diagnostics messages to the shell.
pub struct DiagnosticPrinter<'a> {
/// The config to get the shell to print to.
config: &'a Config,
/// An optional wrapper to be used in addition to `rustc.wrapper` for workspace crates.
/// This is used to get the correct bug report URL. For instance,
/// if `clippy-driver` is set as the value for the wrapper,
/// then the correct bug report URL for `clippy` can be obtained.
workspace_wrapper: &'a Option<PathBuf>,
// A set of messages that have already been printed.
dedupe: HashSet<Message>,
}
impl<'a> DiagnosticPrinter<'a> {
pub fn new(
config: &'a Config,
workspace_wrapper: &'a Option<PathBuf>,
) -> DiagnosticPrinter<'a> {
DiagnosticPrinter {
config,
workspace_wrapper,
dedupe: HashSet::new(),
}
}
pub fn print(&mut self, msg: &Message) -> CargoResult<()> {
match msg {
Message::Migrating {
file,
from_edition,
to_edition,
} => {
if !self.dedupe.insert(msg.clone()) {
return Ok(());
}
self.config.shell().status(
"Migrating",
&format!("{} from {} edition to {}", file, from_edition, to_edition),
)
}
Message::Fixing { file } => self
.config
.shell()
.verbose(|shell| shell.status("Fixing", file)),
Message::Fixed { file, fixes } => {
let msg = if *fixes == 1 { "fix" } else { "fixes" };
let msg = format!("{} ({} {})", file, fixes, msg);
self.config.shell().status("Fixed", msg)
}
Message::ReplaceFailed { file, message } => {
let msg = format!("error applying suggestions to `{}`\n", file);
self.config.shell().warn(&msg)?;
write!(
self.config.shell().err(),
"The full error message was:\n\n> {}\n\n",
message,
)?;
let issue_link = get_bug_report_url(self.workspace_wrapper);
write!(
self.config.shell().err(),
"{}",
gen_please_report_this_bug_text(issue_link)
)?;
Ok(())
}
Message::FixFailed {
files,
krate,
errors,
abnormal_exit,
} => {
if let Some(ref krate) = *krate {
self.config.shell().warn(&format!(
"failed to automatically apply fixes suggested by rustc \
to crate `{}`",
krate,
))?;
} else {
self.config
.shell()
.warn("failed to automatically apply fixes suggested by rustc")?;
}
if !files.is_empty() {
writeln!(
self.config.shell().err(),
"\nafter fixes were automatically applied the compiler \
reported errors within these files:\n"
)?;
for file in files {
writeln!(self.config.shell().err(), " * {}", file)?;
}
writeln!(self.config.shell().err())?;
}
let issue_link = get_bug_report_url(self.workspace_wrapper);
write!(
self.config.shell().err(),
"{}",
gen_please_report_this_bug_text(issue_link)
)?;
if !errors.is_empty() {
writeln!(
self.config.shell().err(),
"The following errors were reported:"
)?;
for error in errors {
write!(self.config.shell().err(), "{}", error)?;
if !error.ends_with('\n') {
writeln!(self.config.shell().err())?;
}
}
}
if let Some(exit) = abnormal_exit {
writeln!(
self.config.shell().err(),
"rustc exited abnormally: {}",
exit
)?;
}
writeln!(
self.config.shell().err(),
"Original diagnostics will follow.\n"
)?;
Ok(())
}
Message::EditionAlreadyEnabled { message, edition } => {
if !self.dedupe.insert(msg.clone()) {
return Ok(());
}
// Don't give a really verbose warning if it has already been issued.
if self.dedupe.insert(Message::EditionAlreadyEnabled { | self.config.shell().warn(&format!("\
{}
If you are trying to migrate from the previous edition ({prev_edition}), the
process requires following these steps:
1. Start with `edition = \"{prev_edition}\"` in `Cargo.toml`
2. Run `cargo fix --edition`
3. Modify `Cargo.toml` to set `edition = \"{this_edition}\"`
4. Run `cargo build` or `cargo test` to verify the fixes worked
More details may be found at
https://doc.rust-lang.org/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html
",
message, this_edition=edition, prev_edition=edition.previous().unwrap()
))
} else {
self.config.shell().warn(message)
}
}
}
}
}
fn gen_please_report_this_bug_text(url: &str) -> String {
format!(
"This likely indicates a bug in either rustc or cargo itself,\n\
and we would appreciate a bug report! You're likely to see \n\
a number of compiler warnings after this message which cargo\n\
attempted to fix but failed. If you could open an issue at\n\
{}\n\
quoting the full output of this command we'd be very appreciative!\n\
Note that you may be able to make some more progress in the near-term\n\
fixing code with the `--broken-code` flag\n\n\
",
url
)
}
fn get_bug_report_url(rustc_workspace_wrapper: &Option<PathBuf>) -> &str {
let clippy = std::ffi::OsStr::new("clippy-driver");
let issue_link = match rustc_workspace_wrapper.as_ref().and_then(|x| x.file_stem()) {
Some(wrapper) if wrapper == clippy => "https://github.com/rust-lang/rust-clippy/issues",
_ => "https://github.com/rust-lang/rust/issues",
};
issue_link
}
#[derive(Debug)]
pub struct RustfixDiagnosticServer {
listener: TcpListener,
addr: SocketAddr,
}
pub struct StartedServer {
addr: SocketAddr,
done: Arc<AtomicBool>,
thread: Option<JoinHandle<()>>,
}
impl RustfixDiagnosticServer {
pub fn new() -> Result<Self, Error> {
let listener = TcpListener::bind("127.0.0.1:0")
.with_context(|| "failed to bind TCP listener to manage locking")?;
let addr = listener.local_addr()?;
Ok(RustfixDiagnosticServer { listener, addr })
}
pub fn configure(&self, process: &mut ProcessBuilder) {
process.env(DIAGNOSTICS_SERVER_VAR, self.addr.to_string());
}
pub fn start<F>(self, on_message: F) -> Result<StartedServer, Error>
where
F: Fn(Message) + Send + 'static,
{
let addr = self.addr;
let done = Arc::new(AtomicBool::new(false));
let done2 = done.clone();
let thread = thread::spawn(move || {
self.run(&on_message, &done2);
});
Ok(StartedServer {
addr,
thread: Some(thread),
done,
})
}
fn run(self, on_message: &dyn Fn(Message), done: &AtomicBool) {
while let Ok((client, _)) = self.listener.accept() {
if done.load(Ordering::SeqCst) {
break;
}
let mut client = BufReader::new(client);
let mut s = String::new();
if let Err(e) = client.read_to_string(&mut s) {
warn!("diagnostic server failed to read: {}", e);
} else {
match serde_json::from_str(&s) {
Ok(message) => on_message(message),
Err(e) => warn!("invalid diagnostics message: {}", e),
}
}
// The client should be kept alive until after `on_message` is
// called to ensure that the client doesn't exit too soon (and
// Message::Finish getting posted before Message::FixDiagnostic).
drop(client);
}
}
}
impl Drop for StartedServer {
fn drop(&mut self) {
self.done.store(true, Ordering::SeqCst);
// Ignore errors here as this is largely best-effort
if TcpStream::connect(&self.addr).is_err() {
return;
}
drop(self.thread.take().unwrap().join());
}
} | message: "".to_string(), // Dummy, so that this only long-warns once.
edition: *edition,
}) { | random_line_split |
getast.go | package parse
import (
"errors"
"fmt"
"github.com/philhofer/msgp/gen"
"github.com/ttacon/chalk"
"go/ast"
"go/parser"
"go/token"
"os"
"reflect"
"strings"
)
type Identity uint8
const (
IDENT Identity = iota
Struct
Builtin
Map
Unsupported
)
var (
// this records a set of all the
// identifiers in the file that are
// not go builtins. identities not
// in this set after the first pass
// of processing are "unknown" identifiers.
globalIdents map[string]gen.Base
// this records the set of all
// processed types (types for which we created code)
globalProcessed map[string]struct{}
)
func init() {
globalIdents = make(map[string]gen.Base)
globalProcessed = make(map[string]struct{})
}
// GetAST simply creates the ast out of a filename and filters
// out non-exported elements.
func GetAST(filename string) (files []*ast.File, pkgName string, err error) {
var (
f *ast.File
fInfo os.FileInfo
)
fset := token.NewFileSet()
fInfo, err = os.Stat(filename)
if err != nil {
return
}
if fInfo.IsDir() {
var pkgs map[string]*ast.Package
pkgs, err = parser.ParseDir(fset, filename, nil, parser.AllErrors)
if err != nil {
return
}
// we'll assume one package per dir
var pkg *ast.Package
for _, pkg = range pkgs {
pkgName = pkg.Name
}
files = make([]*ast.File, len(pkg.Files))
var i = 0
for _, file := range pkg.Files {
files[i] = file
i++
}
return
}
f, err = parser.ParseFile(fset, filename, nil, parser.AllErrors)
if err != nil {
return
}
if !ast.FileExports(f) {
f, err = nil, errors.New("no exports in file")
}
files = []*ast.File{f}
if f != nil {
pkgName = f.Name.Name
}
return
}
// GetElems gets the generator elements out of a file (may be nil)
func GetElems(filename string) ([]gen.Elem, string, error) {
f, pkg, err := GetAST(filename)
if err != nil {
return nil, "", err
}
var specs []*ast.TypeSpec
for _, file := range f {
specs = append(specs, GetTypeSpecs(file)...)
}
if specs == nil {
return nil, "", nil
}
var out []gen.Elem
for i := range specs {
el := GenElem(specs[i])
if el != nil {
out = append(out, el)
}
}
var ptd bool
for _, o := range out {
unr := findUnresolved(o)
if unr != nil {
if !ptd {
fmt.Println(chalk.Yellow.Color("Non-local or unresolved identifiers:"))
ptd = true
}
for _, u := range unr {
fmt.Printf(chalk.Yellow.Color(" -> %q\n"), u)
}
}
}
return out, pkg, nil
}
// should return a list of *ast.TypeSpec we are interested in
func GetTypeSpecs(f *ast.File) []*ast.TypeSpec {
var out []*ast.TypeSpec
// check all declarations...
for i := range f.Decls {
// for GenDecls...
if g, ok := f.Decls[i].(*ast.GenDecl); ok {
// and check the specs...
for _, s := range g.Specs {
// for ast.TypeSpecs....
if ts, ok := s.(*ast.TypeSpec); ok {
out = append(out, ts)
// record identifier
switch ts.Type.(type) {
case *ast.StructType:
globalIdents[ts.Name.Name] = gen.IDENT
case *ast.Ident:
// we will resolve this later
globalIdents[ts.Name.Name] = pullIdent(ts.Type.(*ast.Ident).Name)
case *ast.ArrayType:
a := ts.Type.(*ast.ArrayType)
switch a.Elt.(type) {
case *ast.Ident:
if a.Elt.(*ast.Ident).Name == "byte" {
globalIdents[ts.Name.Name] = gen.Bytes
} else {
globalIdents[ts.Name.Name] = gen.IDENT
}
default:
globalIdents[ts.Name.Name] = gen.IDENT
}
case *ast.StarExpr:
globalIdents[ts.Name.Name] = gen.IDENT
case *ast.MapType:
globalIdents[ts.Name.Name] = gen.IDENT
}
}
}
}
}
return out
}
// GenElem creates the gen.Elem out of an
// ast.TypeSpec. Right now the only supported
// TypeSpec.Type is *ast.StructType
func GenElem(in *ast.TypeSpec) gen.Elem {
// handle supported types
switch in.Type.(type) {
case *ast.StructType:
v := in.Type.(*ast.StructType)
fmt.Printf(chalk.Green.Color("parsing %s..."), in.Name.Name)
p := &gen.Ptr{
Value: &gen.Struct{
Name: in.Name.Name, // ast.Ident
Fields: parseFieldList(v.Fields),
},
}
// mark type as processed
globalProcessed[in.Name.Name] = struct{}{}
if len(p.Value.(*gen.Struct).Fields) == 0 {
fmt.Printf(chalk.Red.Color(" has no exported fields \u2717\n")) // X
return nil
}
fmt.Print(chalk.Green.Color(" \u2713\n")) // check
return p
default:
return nil
}
}
func parseFieldList(fl *ast.FieldList) []gen.StructField {
if fl == nil || fl.NumFields() == 0 {
return nil
}
out := make([]gen.StructField, 0, fl.NumFields())
for_fields:
for _, field := range fl.List {
var sf gen.StructField
// field name
switch len(field.Names) {
case 1:
sf.FieldName = field.Names[0].Name
case 0:
sf.FieldName = embedded(field.Type)
if sf.FieldName == "" {
// means it's a selector expr., or
// something else unsupported
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %v unsupported)"), field.Type)
continue for_fields
}
default:
// inline multiple field declaration
for _, nm := range field.Names {
el := parseExpr(field.Type)
if el == nil {
// skip
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q unsupported)"), sf.FieldName)
continue for_fields
}
out = append(out, gen.StructField{
FieldTag: nm.Name,
FieldName: nm.Name,
FieldElem: el,
})
}
continue for_fields
}
// field tag
var flagExtension bool
if field.Tag != nil {
// we need to trim the leading and trailing ` characters for
// to convert to reflect.StructTag
body := reflect.StructTag(strings.Trim(field.Tag.Value, "`")).Get("msg")
// check for a tag like `msg:"name,extension"`
tags := strings.Split(body, ",")
if len(tags) > 1 && tags[1] == "extension" {
flagExtension = true
}
sf.FieldTag = tags[0]
}
if sf.FieldTag == "" {
sf.FieldTag = sf.FieldName
} else if sf.FieldTag == "-" {
// deliberately ignore field
continue for_fields
}
e := parseExpr(field.Type)
if e == nil {
// unsupported type
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q unsupported)"), sf.FieldName)
continue
}
// mark as extension
if flagExtension {
// an extension can be
// a pointer or base type
switch e.Type() {
case gen.PtrType:
if e.Ptr().Value.Type() == gen.BaseType {
e.Ptr().Value.Base().Value = gen.Ext
} else {
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q couldn't be cast as an extension"), sf.FieldName)
continue
}
case gen.BaseType:
e.Base().Value = gen.Ext
default:
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q couldn't be cast as an extension"), sf.FieldName)
continue
}
}
sf.FieldElem = e
out = append(out, sf)
}
return out
}
// extract embedded field name
func embedded(f ast.Expr) string {
switch f.(type) {
case *ast.Ident:
return f.(*ast.Ident).Name
case *ast.StarExpr:
return embedded(f.(*ast.StarExpr).X)
default:
// other possibilities (like selector expressions)
// are disallowed; we can't reasonably know
// their type
return ""
}
}
// go from ast.Expr to gen.Elem; nil means type not supported
func parseExpr(e ast.Expr) gen.Elem {
switch e.(type) {
case *ast.MapType:
switch e.(*ast.MapType).Key.(type) {
case *ast.Ident:
switch e.(*ast.MapType).Key.(*ast.Ident).Name {
case "string":
inner := parseExpr(e.(*ast.MapType).Value)
if inner == nil {
return nil
}
return &gen.Map{
Value: inner,
}
default:
return nil
}
default:
// we don't support non-string map keys
return nil
}
case *ast.Ident:
b := &gen.BaseElem{
Value: pullIdent(e.(*ast.Ident).Name),
}
if b.Value == gen.IDENT {
b.Ident = (e.(*ast.Ident).Name)
}
return b
case *ast.ArrayType:
arr := e.(*ast.ArrayType)
// array and not a slice
if arr.Len != nil {
switch arr.Len.(type) {
case *ast.BasicLit:
return &gen.Array{
Size: arr.Len.(*ast.BasicLit).Value,
Els: parseExpr(arr.Elt),
}
case *ast.Ident:
return &gen.Array{
Size: arr.Len.(*ast.Ident).String(),
Els: parseExpr(arr.Elt),
}
default:
return nil
}
}
// special case for []byte; others go to gen.Slice
switch arr.Elt.(type) {
case *ast.Ident:
i := arr.Elt.(*ast.Ident)
if i.Name == "byte" {
return &gen.BaseElem{
Value: gen.Bytes,
}
} else {
e := parseExpr(arr.Elt)
if e == nil {
return nil
}
return &gen.Slice{
Els: e,
}
}
default:
e := parseExpr(arr.Elt)
if e == nil {
return nil
}
return &gen.Slice{
Els: e,
}
}
case *ast.StarExpr:
v := parseExpr(e.(*ast.StarExpr).X)
if v == nil {
return nil
}
return &gen.Ptr{
Value: v,
}
case *ast.StructType:
return &gen.Struct{
Fields: parseFieldList(e.(*ast.StructType).Fields),
}
case *ast.SelectorExpr:
v := e.(*ast.SelectorExpr)
if im, ok := v.X.(*ast.Ident); ok {
if v.Sel.Name == "Time" && im.Name == "time" {
return &gen.BaseElem{
Value: gen.Time,
}
} else {
return &gen.BaseElem{
Value: gen.IDENT,
Ident: im.Name + "." + v.Sel.Name,
}
}
} | return nil
case *ast.InterfaceType:
// support `interface{}`
if len(e.(*ast.InterfaceType).Methods.List) == 0 {
return &gen.BaseElem{
Value: gen.Intf,
}
}
return nil
default: // other types not supported
return nil
}
}
func pullIdent(name string) gen.Base {
switch name {
case "string":
return gen.String
case "byte":
return gen.Byte
case "int":
return gen.Int
case "int8":
return gen.Int8
case "int16":
return gen.Int16
case "int32":
return gen.Int32
case "int64":
return gen.Int64
case "uint":
return gen.Uint
case "uint8":
return gen.Uint8
case "uint16":
return gen.Uint16
case "uint32":
return gen.Uint32
case "uint64":
return gen.Uint64
case "bool":
return gen.Bool
case "float64":
return gen.Float64
case "float32":
return gen.Float32
case "complex64":
return gen.Complex64
case "complex128":
return gen.Complex128
case "time.Time":
return gen.Time
case "interface{}":
return gen.Intf
default:
// unrecognized identity
return gen.IDENT
}
} | random_line_split | |
getast.go | package parse
import (
"errors"
"fmt"
"github.com/philhofer/msgp/gen"
"github.com/ttacon/chalk"
"go/ast"
"go/parser"
"go/token"
"os"
"reflect"
"strings"
)
type Identity uint8
const (
IDENT Identity = iota
Struct
Builtin
Map
Unsupported
)
var (
// this records a set of all the
// identifiers in the file that are
// not go builtins. identities not
// in this set after the first pass
// of processing are "unknown" identifiers.
globalIdents map[string]gen.Base
// this records the set of all
// processed types (types for which we created code)
globalProcessed map[string]struct{}
)
func init() {
globalIdents = make(map[string]gen.Base)
globalProcessed = make(map[string]struct{})
}
// GetAST simply creates the ast out of a filename and filters
// out non-exported elements.
func | (filename string) (files []*ast.File, pkgName string, err error) {
var (
f *ast.File
fInfo os.FileInfo
)
fset := token.NewFileSet()
fInfo, err = os.Stat(filename)
if err != nil {
return
}
if fInfo.IsDir() {
var pkgs map[string]*ast.Package
pkgs, err = parser.ParseDir(fset, filename, nil, parser.AllErrors)
if err != nil {
return
}
// we'll assume one package per dir
var pkg *ast.Package
for _, pkg = range pkgs {
pkgName = pkg.Name
}
files = make([]*ast.File, len(pkg.Files))
var i = 0
for _, file := range pkg.Files {
files[i] = file
i++
}
return
}
f, err = parser.ParseFile(fset, filename, nil, parser.AllErrors)
if err != nil {
return
}
if !ast.FileExports(f) {
f, err = nil, errors.New("no exports in file")
}
files = []*ast.File{f}
if f != nil {
pkgName = f.Name.Name
}
return
}
// GetElems gets the generator elements out of a file (may be nil)
func GetElems(filename string) ([]gen.Elem, string, error) {
f, pkg, err := GetAST(filename)
if err != nil {
return nil, "", err
}
var specs []*ast.TypeSpec
for _, file := range f {
specs = append(specs, GetTypeSpecs(file)...)
}
if specs == nil {
return nil, "", nil
}
var out []gen.Elem
for i := range specs {
el := GenElem(specs[i])
if el != nil {
out = append(out, el)
}
}
var ptd bool
for _, o := range out {
unr := findUnresolved(o)
if unr != nil {
if !ptd {
fmt.Println(chalk.Yellow.Color("Non-local or unresolved identifiers:"))
ptd = true
}
for _, u := range unr {
fmt.Printf(chalk.Yellow.Color(" -> %q\n"), u)
}
}
}
return out, pkg, nil
}
// should return a list of *ast.TypeSpec we are interested in
func GetTypeSpecs(f *ast.File) []*ast.TypeSpec {
var out []*ast.TypeSpec
// check all declarations...
for i := range f.Decls {
// for GenDecls...
if g, ok := f.Decls[i].(*ast.GenDecl); ok {
// and check the specs...
for _, s := range g.Specs {
// for ast.TypeSpecs....
if ts, ok := s.(*ast.TypeSpec); ok {
out = append(out, ts)
// record identifier
switch ts.Type.(type) {
case *ast.StructType:
globalIdents[ts.Name.Name] = gen.IDENT
case *ast.Ident:
// we will resolve this later
globalIdents[ts.Name.Name] = pullIdent(ts.Type.(*ast.Ident).Name)
case *ast.ArrayType:
a := ts.Type.(*ast.ArrayType)
switch a.Elt.(type) {
case *ast.Ident:
if a.Elt.(*ast.Ident).Name == "byte" {
globalIdents[ts.Name.Name] = gen.Bytes
} else {
globalIdents[ts.Name.Name] = gen.IDENT
}
default:
globalIdents[ts.Name.Name] = gen.IDENT
}
case *ast.StarExpr:
globalIdents[ts.Name.Name] = gen.IDENT
case *ast.MapType:
globalIdents[ts.Name.Name] = gen.IDENT
}
}
}
}
}
return out
}
// GenElem creates the gen.Elem out of an
// ast.TypeSpec. Right now the only supported
// TypeSpec.Type is *ast.StructType
func GenElem(in *ast.TypeSpec) gen.Elem {
// handle supported types
switch in.Type.(type) {
case *ast.StructType:
v := in.Type.(*ast.StructType)
fmt.Printf(chalk.Green.Color("parsing %s..."), in.Name.Name)
p := &gen.Ptr{
Value: &gen.Struct{
Name: in.Name.Name, // ast.Ident
Fields: parseFieldList(v.Fields),
},
}
// mark type as processed
globalProcessed[in.Name.Name] = struct{}{}
if len(p.Value.(*gen.Struct).Fields) == 0 {
fmt.Printf(chalk.Red.Color(" has no exported fields \u2717\n")) // X
return nil
}
fmt.Print(chalk.Green.Color(" \u2713\n")) // check
return p
default:
return nil
}
}
func parseFieldList(fl *ast.FieldList) []gen.StructField {
if fl == nil || fl.NumFields() == 0 {
return nil
}
out := make([]gen.StructField, 0, fl.NumFields())
for_fields:
for _, field := range fl.List {
var sf gen.StructField
// field name
switch len(field.Names) {
case 1:
sf.FieldName = field.Names[0].Name
case 0:
sf.FieldName = embedded(field.Type)
if sf.FieldName == "" {
// means it's a selector expr., or
// something else unsupported
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %v unsupported)"), field.Type)
continue for_fields
}
default:
// inline multiple field declaration
for _, nm := range field.Names {
el := parseExpr(field.Type)
if el == nil {
// skip
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q unsupported)"), sf.FieldName)
continue for_fields
}
out = append(out, gen.StructField{
FieldTag: nm.Name,
FieldName: nm.Name,
FieldElem: el,
})
}
continue for_fields
}
// field tag
var flagExtension bool
if field.Tag != nil {
// we need to trim the leading and trailing ` characters for
// to convert to reflect.StructTag
body := reflect.StructTag(strings.Trim(field.Tag.Value, "`")).Get("msg")
// check for a tag like `msg:"name,extension"`
tags := strings.Split(body, ",")
if len(tags) > 1 && tags[1] == "extension" {
flagExtension = true
}
sf.FieldTag = tags[0]
}
if sf.FieldTag == "" {
sf.FieldTag = sf.FieldName
} else if sf.FieldTag == "-" {
// deliberately ignore field
continue for_fields
}
e := parseExpr(field.Type)
if e == nil {
// unsupported type
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q unsupported)"), sf.FieldName)
continue
}
// mark as extension
if flagExtension {
// an extension can be
// a pointer or base type
switch e.Type() {
case gen.PtrType:
if e.Ptr().Value.Type() == gen.BaseType {
e.Ptr().Value.Base().Value = gen.Ext
} else {
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q couldn't be cast as an extension"), sf.FieldName)
continue
}
case gen.BaseType:
e.Base().Value = gen.Ext
default:
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q couldn't be cast as an extension"), sf.FieldName)
continue
}
}
sf.FieldElem = e
out = append(out, sf)
}
return out
}
// extract embedded field name
func embedded(f ast.Expr) string {
switch f.(type) {
case *ast.Ident:
return f.(*ast.Ident).Name
case *ast.StarExpr:
return embedded(f.(*ast.StarExpr).X)
default:
// other possibilities (like selector expressions)
// are disallowed; we can't reasonably know
// their type
return ""
}
}
// go from ast.Expr to gen.Elem; nil means type not supported
func parseExpr(e ast.Expr) gen.Elem {
switch e.(type) {
case *ast.MapType:
switch e.(*ast.MapType).Key.(type) {
case *ast.Ident:
switch e.(*ast.MapType).Key.(*ast.Ident).Name {
case "string":
inner := parseExpr(e.(*ast.MapType).Value)
if inner == nil {
return nil
}
return &gen.Map{
Value: inner,
}
default:
return nil
}
default:
// we don't support non-string map keys
return nil
}
case *ast.Ident:
b := &gen.BaseElem{
Value: pullIdent(e.(*ast.Ident).Name),
}
if b.Value == gen.IDENT {
b.Ident = (e.(*ast.Ident).Name)
}
return b
case *ast.ArrayType:
arr := e.(*ast.ArrayType)
// array and not a slice
if arr.Len != nil {
switch arr.Len.(type) {
case *ast.BasicLit:
return &gen.Array{
Size: arr.Len.(*ast.BasicLit).Value,
Els: parseExpr(arr.Elt),
}
case *ast.Ident:
return &gen.Array{
Size: arr.Len.(*ast.Ident).String(),
Els: parseExpr(arr.Elt),
}
default:
return nil
}
}
// special case for []byte; others go to gen.Slice
switch arr.Elt.(type) {
case *ast.Ident:
i := arr.Elt.(*ast.Ident)
if i.Name == "byte" {
return &gen.BaseElem{
Value: gen.Bytes,
}
} else {
e := parseExpr(arr.Elt)
if e == nil {
return nil
}
return &gen.Slice{
Els: e,
}
}
default:
e := parseExpr(arr.Elt)
if e == nil {
return nil
}
return &gen.Slice{
Els: e,
}
}
case *ast.StarExpr:
v := parseExpr(e.(*ast.StarExpr).X)
if v == nil {
return nil
}
return &gen.Ptr{
Value: v,
}
case *ast.StructType:
return &gen.Struct{
Fields: parseFieldList(e.(*ast.StructType).Fields),
}
case *ast.SelectorExpr:
v := e.(*ast.SelectorExpr)
if im, ok := v.X.(*ast.Ident); ok {
if v.Sel.Name == "Time" && im.Name == "time" {
return &gen.BaseElem{
Value: gen.Time,
}
} else {
return &gen.BaseElem{
Value: gen.IDENT,
Ident: im.Name + "." + v.Sel.Name,
}
}
}
return nil
case *ast.InterfaceType:
// support `interface{}`
if len(e.(*ast.InterfaceType).Methods.List) == 0 {
return &gen.BaseElem{
Value: gen.Intf,
}
}
return nil
default: // other types not supported
return nil
}
}
func pullIdent(name string) gen.Base {
switch name {
case "string":
return gen.String
case "byte":
return gen.Byte
case "int":
return gen.Int
case "int8":
return gen.Int8
case "int16":
return gen.Int16
case "int32":
return gen.Int32
case "int64":
return gen.Int64
case "uint":
return gen.Uint
case "uint8":
return gen.Uint8
case "uint16":
return gen.Uint16
case "uint32":
return gen.Uint32
case "uint64":
return gen.Uint64
case "bool":
return gen.Bool
case "float64":
return gen.Float64
case "float32":
return gen.Float32
case "complex64":
return gen.Complex64
case "complex128":
return gen.Complex128
case "time.Time":
return gen.Time
case "interface{}":
return gen.Intf
default:
// unrecognized identity
return gen.IDENT
}
}
| GetAST | identifier_name |
getast.go | package parse
import (
"errors"
"fmt"
"github.com/philhofer/msgp/gen"
"github.com/ttacon/chalk"
"go/ast"
"go/parser"
"go/token"
"os"
"reflect"
"strings"
)
type Identity uint8
const (
IDENT Identity = iota
Struct
Builtin
Map
Unsupported
)
var (
// this records a set of all the
// identifiers in the file that are
// not go builtins. identities not
// in this set after the first pass
// of processing are "unknown" identifiers.
globalIdents map[string]gen.Base
// this records the set of all
// processed types (types for which we created code)
globalProcessed map[string]struct{}
)
func init() |
// GetAST simply creates the ast out of a filename and filters
// out non-exported elements.
func GetAST(filename string) (files []*ast.File, pkgName string, err error) {
var (
f *ast.File
fInfo os.FileInfo
)
fset := token.NewFileSet()
fInfo, err = os.Stat(filename)
if err != nil {
return
}
if fInfo.IsDir() {
var pkgs map[string]*ast.Package
pkgs, err = parser.ParseDir(fset, filename, nil, parser.AllErrors)
if err != nil {
return
}
// we'll assume one package per dir
var pkg *ast.Package
for _, pkg = range pkgs {
pkgName = pkg.Name
}
files = make([]*ast.File, len(pkg.Files))
var i = 0
for _, file := range pkg.Files {
files[i] = file
i++
}
return
}
f, err = parser.ParseFile(fset, filename, nil, parser.AllErrors)
if err != nil {
return
}
if !ast.FileExports(f) {
f, err = nil, errors.New("no exports in file")
}
files = []*ast.File{f}
if f != nil {
pkgName = f.Name.Name
}
return
}
// GetElems gets the generator elements out of a file (may be nil)
func GetElems(filename string) ([]gen.Elem, string, error) {
f, pkg, err := GetAST(filename)
if err != nil {
return nil, "", err
}
var specs []*ast.TypeSpec
for _, file := range f {
specs = append(specs, GetTypeSpecs(file)...)
}
if specs == nil {
return nil, "", nil
}
var out []gen.Elem
for i := range specs {
el := GenElem(specs[i])
if el != nil {
out = append(out, el)
}
}
var ptd bool
for _, o := range out {
unr := findUnresolved(o)
if unr != nil {
if !ptd {
fmt.Println(chalk.Yellow.Color("Non-local or unresolved identifiers:"))
ptd = true
}
for _, u := range unr {
fmt.Printf(chalk.Yellow.Color(" -> %q\n"), u)
}
}
}
return out, pkg, nil
}
// should return a list of *ast.TypeSpec we are interested in
func GetTypeSpecs(f *ast.File) []*ast.TypeSpec {
var out []*ast.TypeSpec
// check all declarations...
for i := range f.Decls {
// for GenDecls...
if g, ok := f.Decls[i].(*ast.GenDecl); ok {
// and check the specs...
for _, s := range g.Specs {
// for ast.TypeSpecs....
if ts, ok := s.(*ast.TypeSpec); ok {
out = append(out, ts)
// record identifier
switch ts.Type.(type) {
case *ast.StructType:
globalIdents[ts.Name.Name] = gen.IDENT
case *ast.Ident:
// we will resolve this later
globalIdents[ts.Name.Name] = pullIdent(ts.Type.(*ast.Ident).Name)
case *ast.ArrayType:
a := ts.Type.(*ast.ArrayType)
switch a.Elt.(type) {
case *ast.Ident:
if a.Elt.(*ast.Ident).Name == "byte" {
globalIdents[ts.Name.Name] = gen.Bytes
} else {
globalIdents[ts.Name.Name] = gen.IDENT
}
default:
globalIdents[ts.Name.Name] = gen.IDENT
}
case *ast.StarExpr:
globalIdents[ts.Name.Name] = gen.IDENT
case *ast.MapType:
globalIdents[ts.Name.Name] = gen.IDENT
}
}
}
}
}
return out
}
// GenElem creates the gen.Elem out of an
// ast.TypeSpec. Right now the only supported
// TypeSpec.Type is *ast.StructType
func GenElem(in *ast.TypeSpec) gen.Elem {
// handle supported types
switch in.Type.(type) {
case *ast.StructType:
v := in.Type.(*ast.StructType)
fmt.Printf(chalk.Green.Color("parsing %s..."), in.Name.Name)
p := &gen.Ptr{
Value: &gen.Struct{
Name: in.Name.Name, // ast.Ident
Fields: parseFieldList(v.Fields),
},
}
// mark type as processed
globalProcessed[in.Name.Name] = struct{}{}
if len(p.Value.(*gen.Struct).Fields) == 0 {
fmt.Printf(chalk.Red.Color(" has no exported fields \u2717\n")) // X
return nil
}
fmt.Print(chalk.Green.Color(" \u2713\n")) // check
return p
default:
return nil
}
}
func parseFieldList(fl *ast.FieldList) []gen.StructField {
if fl == nil || fl.NumFields() == 0 {
return nil
}
out := make([]gen.StructField, 0, fl.NumFields())
for_fields:
for _, field := range fl.List {
var sf gen.StructField
// field name
switch len(field.Names) {
case 1:
sf.FieldName = field.Names[0].Name
case 0:
sf.FieldName = embedded(field.Type)
if sf.FieldName == "" {
// means it's a selector expr., or
// something else unsupported
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %v unsupported)"), field.Type)
continue for_fields
}
default:
// inline multiple field declaration
for _, nm := range field.Names {
el := parseExpr(field.Type)
if el == nil {
// skip
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q unsupported)"), sf.FieldName)
continue for_fields
}
out = append(out, gen.StructField{
FieldTag: nm.Name,
FieldName: nm.Name,
FieldElem: el,
})
}
continue for_fields
}
// field tag
var flagExtension bool
if field.Tag != nil {
// we need to trim the leading and trailing ` characters for
// to convert to reflect.StructTag
body := reflect.StructTag(strings.Trim(field.Tag.Value, "`")).Get("msg")
// check for a tag like `msg:"name,extension"`
tags := strings.Split(body, ",")
if len(tags) > 1 && tags[1] == "extension" {
flagExtension = true
}
sf.FieldTag = tags[0]
}
if sf.FieldTag == "" {
sf.FieldTag = sf.FieldName
} else if sf.FieldTag == "-" {
// deliberately ignore field
continue for_fields
}
e := parseExpr(field.Type)
if e == nil {
// unsupported type
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q unsupported)"), sf.FieldName)
continue
}
// mark as extension
if flagExtension {
// an extension can be
// a pointer or base type
switch e.Type() {
case gen.PtrType:
if e.Ptr().Value.Type() == gen.BaseType {
e.Ptr().Value.Base().Value = gen.Ext
} else {
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q couldn't be cast as an extension"), sf.FieldName)
continue
}
case gen.BaseType:
e.Base().Value = gen.Ext
default:
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q couldn't be cast as an extension"), sf.FieldName)
continue
}
}
sf.FieldElem = e
out = append(out, sf)
}
return out
}
// extract embedded field name
func embedded(f ast.Expr) string {
switch f.(type) {
case *ast.Ident:
return f.(*ast.Ident).Name
case *ast.StarExpr:
return embedded(f.(*ast.StarExpr).X)
default:
// other possibilities (like selector expressions)
// are disallowed; we can't reasonably know
// their type
return ""
}
}
// go from ast.Expr to gen.Elem; nil means type not supported
func parseExpr(e ast.Expr) gen.Elem {
switch e.(type) {
case *ast.MapType:
switch e.(*ast.MapType).Key.(type) {
case *ast.Ident:
switch e.(*ast.MapType).Key.(*ast.Ident).Name {
case "string":
inner := parseExpr(e.(*ast.MapType).Value)
if inner == nil {
return nil
}
return &gen.Map{
Value: inner,
}
default:
return nil
}
default:
// we don't support non-string map keys
return nil
}
case *ast.Ident:
b := &gen.BaseElem{
Value: pullIdent(e.(*ast.Ident).Name),
}
if b.Value == gen.IDENT {
b.Ident = (e.(*ast.Ident).Name)
}
return b
case *ast.ArrayType:
arr := e.(*ast.ArrayType)
// array and not a slice
if arr.Len != nil {
switch arr.Len.(type) {
case *ast.BasicLit:
return &gen.Array{
Size: arr.Len.(*ast.BasicLit).Value,
Els: parseExpr(arr.Elt),
}
case *ast.Ident:
return &gen.Array{
Size: arr.Len.(*ast.Ident).String(),
Els: parseExpr(arr.Elt),
}
default:
return nil
}
}
// special case for []byte; others go to gen.Slice
switch arr.Elt.(type) {
case *ast.Ident:
i := arr.Elt.(*ast.Ident)
if i.Name == "byte" {
return &gen.BaseElem{
Value: gen.Bytes,
}
} else {
e := parseExpr(arr.Elt)
if e == nil {
return nil
}
return &gen.Slice{
Els: e,
}
}
default:
e := parseExpr(arr.Elt)
if e == nil {
return nil
}
return &gen.Slice{
Els: e,
}
}
case *ast.StarExpr:
v := parseExpr(e.(*ast.StarExpr).X)
if v == nil {
return nil
}
return &gen.Ptr{
Value: v,
}
case *ast.StructType:
return &gen.Struct{
Fields: parseFieldList(e.(*ast.StructType).Fields),
}
case *ast.SelectorExpr:
v := e.(*ast.SelectorExpr)
if im, ok := v.X.(*ast.Ident); ok {
if v.Sel.Name == "Time" && im.Name == "time" {
return &gen.BaseElem{
Value: gen.Time,
}
} else {
return &gen.BaseElem{
Value: gen.IDENT,
Ident: im.Name + "." + v.Sel.Name,
}
}
}
return nil
case *ast.InterfaceType:
// support `interface{}`
if len(e.(*ast.InterfaceType).Methods.List) == 0 {
return &gen.BaseElem{
Value: gen.Intf,
}
}
return nil
default: // other types not supported
return nil
}
}
func pullIdent(name string) gen.Base {
switch name {
case "string":
return gen.String
case "byte":
return gen.Byte
case "int":
return gen.Int
case "int8":
return gen.Int8
case "int16":
return gen.Int16
case "int32":
return gen.Int32
case "int64":
return gen.Int64
case "uint":
return gen.Uint
case "uint8":
return gen.Uint8
case "uint16":
return gen.Uint16
case "uint32":
return gen.Uint32
case "uint64":
return gen.Uint64
case "bool":
return gen.Bool
case "float64":
return gen.Float64
case "float32":
return gen.Float32
case "complex64":
return gen.Complex64
case "complex128":
return gen.Complex128
case "time.Time":
return gen.Time
case "interface{}":
return gen.Intf
default:
// unrecognized identity
return gen.IDENT
}
}
| {
globalIdents = make(map[string]gen.Base)
globalProcessed = make(map[string]struct{})
} | identifier_body |
getast.go | package parse
import (
"errors"
"fmt"
"github.com/philhofer/msgp/gen"
"github.com/ttacon/chalk"
"go/ast"
"go/parser"
"go/token"
"os"
"reflect"
"strings"
)
type Identity uint8
const (
IDENT Identity = iota
Struct
Builtin
Map
Unsupported
)
var (
// this records a set of all the
// identifiers in the file that are
// not go builtins. identities not
// in this set after the first pass
// of processing are "unknown" identifiers.
globalIdents map[string]gen.Base
// this records the set of all
// processed types (types for which we created code)
globalProcessed map[string]struct{}
)
func init() {
globalIdents = make(map[string]gen.Base)
globalProcessed = make(map[string]struct{})
}
// GetAST simply creates the ast out of a filename and filters
// out non-exported elements.
func GetAST(filename string) (files []*ast.File, pkgName string, err error) {
var (
f *ast.File
fInfo os.FileInfo
)
fset := token.NewFileSet()
fInfo, err = os.Stat(filename)
if err != nil {
return
}
if fInfo.IsDir() {
var pkgs map[string]*ast.Package
pkgs, err = parser.ParseDir(fset, filename, nil, parser.AllErrors)
if err != nil {
return
}
// we'll assume one package per dir
var pkg *ast.Package
for _, pkg = range pkgs {
pkgName = pkg.Name
}
files = make([]*ast.File, len(pkg.Files))
var i = 0
for _, file := range pkg.Files {
files[i] = file
i++
}
return
}
f, err = parser.ParseFile(fset, filename, nil, parser.AllErrors)
if err != nil {
return
}
if !ast.FileExports(f) {
f, err = nil, errors.New("no exports in file")
}
files = []*ast.File{f}
if f != nil {
pkgName = f.Name.Name
}
return
}
// GetElems gets the generator elements out of a file (may be nil)
func GetElems(filename string) ([]gen.Elem, string, error) {
f, pkg, err := GetAST(filename)
if err != nil {
return nil, "", err
}
var specs []*ast.TypeSpec
for _, file := range f {
specs = append(specs, GetTypeSpecs(file)...)
}
if specs == nil {
return nil, "", nil
}
var out []gen.Elem
for i := range specs {
el := GenElem(specs[i])
if el != nil {
out = append(out, el)
}
}
var ptd bool
for _, o := range out {
unr := findUnresolved(o)
if unr != nil {
if !ptd {
fmt.Println(chalk.Yellow.Color("Non-local or unresolved identifiers:"))
ptd = true
}
for _, u := range unr {
fmt.Printf(chalk.Yellow.Color(" -> %q\n"), u)
}
}
}
return out, pkg, nil
}
// should return a list of *ast.TypeSpec we are interested in
func GetTypeSpecs(f *ast.File) []*ast.TypeSpec {
var out []*ast.TypeSpec
// check all declarations...
for i := range f.Decls {
// for GenDecls...
if g, ok := f.Decls[i].(*ast.GenDecl); ok {
// and check the specs...
for _, s := range g.Specs {
// for ast.TypeSpecs....
if ts, ok := s.(*ast.TypeSpec); ok {
out = append(out, ts)
// record identifier
switch ts.Type.(type) {
case *ast.StructType:
globalIdents[ts.Name.Name] = gen.IDENT
case *ast.Ident:
// we will resolve this later
globalIdents[ts.Name.Name] = pullIdent(ts.Type.(*ast.Ident).Name)
case *ast.ArrayType:
a := ts.Type.(*ast.ArrayType)
switch a.Elt.(type) {
case *ast.Ident:
if a.Elt.(*ast.Ident).Name == "byte" {
globalIdents[ts.Name.Name] = gen.Bytes
} else {
globalIdents[ts.Name.Name] = gen.IDENT
}
default:
globalIdents[ts.Name.Name] = gen.IDENT
}
case *ast.StarExpr:
globalIdents[ts.Name.Name] = gen.IDENT
case *ast.MapType:
globalIdents[ts.Name.Name] = gen.IDENT
}
}
}
}
}
return out
}
// GenElem creates the gen.Elem out of an
// ast.TypeSpec. Right now the only supported
// TypeSpec.Type is *ast.StructType
func GenElem(in *ast.TypeSpec) gen.Elem {
// handle supported types
switch in.Type.(type) {
case *ast.StructType:
v := in.Type.(*ast.StructType)
fmt.Printf(chalk.Green.Color("parsing %s..."), in.Name.Name)
p := &gen.Ptr{
Value: &gen.Struct{
Name: in.Name.Name, // ast.Ident
Fields: parseFieldList(v.Fields),
},
}
// mark type as processed
globalProcessed[in.Name.Name] = struct{}{}
if len(p.Value.(*gen.Struct).Fields) == 0 {
fmt.Printf(chalk.Red.Color(" has no exported fields \u2717\n")) // X
return nil
}
fmt.Print(chalk.Green.Color(" \u2713\n")) // check
return p
default:
return nil
}
}
func parseFieldList(fl *ast.FieldList) []gen.StructField {
if fl == nil || fl.NumFields() == 0 {
return nil
}
out := make([]gen.StructField, 0, fl.NumFields())
for_fields:
for _, field := range fl.List |
return out
}
// extract embedded field name
func embedded(f ast.Expr) string {
switch f.(type) {
case *ast.Ident:
return f.(*ast.Ident).Name
case *ast.StarExpr:
return embedded(f.(*ast.StarExpr).X)
default:
// other possibilities (like selector expressions)
// are disallowed; we can't reasonably know
// their type
return ""
}
}
// go from ast.Expr to gen.Elem; nil means type not supported
func parseExpr(e ast.Expr) gen.Elem {
switch e.(type) {
case *ast.MapType:
switch e.(*ast.MapType).Key.(type) {
case *ast.Ident:
switch e.(*ast.MapType).Key.(*ast.Ident).Name {
case "string":
inner := parseExpr(e.(*ast.MapType).Value)
if inner == nil {
return nil
}
return &gen.Map{
Value: inner,
}
default:
return nil
}
default:
// we don't support non-string map keys
return nil
}
case *ast.Ident:
b := &gen.BaseElem{
Value: pullIdent(e.(*ast.Ident).Name),
}
if b.Value == gen.IDENT {
b.Ident = (e.(*ast.Ident).Name)
}
return b
case *ast.ArrayType:
arr := e.(*ast.ArrayType)
// array and not a slice
if arr.Len != nil {
switch arr.Len.(type) {
case *ast.BasicLit:
return &gen.Array{
Size: arr.Len.(*ast.BasicLit).Value,
Els: parseExpr(arr.Elt),
}
case *ast.Ident:
return &gen.Array{
Size: arr.Len.(*ast.Ident).String(),
Els: parseExpr(arr.Elt),
}
default:
return nil
}
}
// special case for []byte; others go to gen.Slice
switch arr.Elt.(type) {
case *ast.Ident:
i := arr.Elt.(*ast.Ident)
if i.Name == "byte" {
return &gen.BaseElem{
Value: gen.Bytes,
}
} else {
e := parseExpr(arr.Elt)
if e == nil {
return nil
}
return &gen.Slice{
Els: e,
}
}
default:
e := parseExpr(arr.Elt)
if e == nil {
return nil
}
return &gen.Slice{
Els: e,
}
}
case *ast.StarExpr:
v := parseExpr(e.(*ast.StarExpr).X)
if v == nil {
return nil
}
return &gen.Ptr{
Value: v,
}
case *ast.StructType:
return &gen.Struct{
Fields: parseFieldList(e.(*ast.StructType).Fields),
}
case *ast.SelectorExpr:
v := e.(*ast.SelectorExpr)
if im, ok := v.X.(*ast.Ident); ok {
if v.Sel.Name == "Time" && im.Name == "time" {
return &gen.BaseElem{
Value: gen.Time,
}
} else {
return &gen.BaseElem{
Value: gen.IDENT,
Ident: im.Name + "." + v.Sel.Name,
}
}
}
return nil
case *ast.InterfaceType:
// support `interface{}`
if len(e.(*ast.InterfaceType).Methods.List) == 0 {
return &gen.BaseElem{
Value: gen.Intf,
}
}
return nil
default: // other types not supported
return nil
}
}
func pullIdent(name string) gen.Base {
switch name {
case "string":
return gen.String
case "byte":
return gen.Byte
case "int":
return gen.Int
case "int8":
return gen.Int8
case "int16":
return gen.Int16
case "int32":
return gen.Int32
case "int64":
return gen.Int64
case "uint":
return gen.Uint
case "uint8":
return gen.Uint8
case "uint16":
return gen.Uint16
case "uint32":
return gen.Uint32
case "uint64":
return gen.Uint64
case "bool":
return gen.Bool
case "float64":
return gen.Float64
case "float32":
return gen.Float32
case "complex64":
return gen.Complex64
case "complex128":
return gen.Complex128
case "time.Time":
return gen.Time
case "interface{}":
return gen.Intf
default:
// unrecognized identity
return gen.IDENT
}
}
| {
var sf gen.StructField
// field name
switch len(field.Names) {
case 1:
sf.FieldName = field.Names[0].Name
case 0:
sf.FieldName = embedded(field.Type)
if sf.FieldName == "" {
// means it's a selector expr., or
// something else unsupported
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %v unsupported)"), field.Type)
continue for_fields
}
default:
// inline multiple field declaration
for _, nm := range field.Names {
el := parseExpr(field.Type)
if el == nil {
// skip
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q unsupported)"), sf.FieldName)
continue for_fields
}
out = append(out, gen.StructField{
FieldTag: nm.Name,
FieldName: nm.Name,
FieldElem: el,
})
}
continue for_fields
}
// field tag
var flagExtension bool
if field.Tag != nil {
// we need to trim the leading and trailing ` characters for
// to convert to reflect.StructTag
body := reflect.StructTag(strings.Trim(field.Tag.Value, "`")).Get("msg")
// check for a tag like `msg:"name,extension"`
tags := strings.Split(body, ",")
if len(tags) > 1 && tags[1] == "extension" {
flagExtension = true
}
sf.FieldTag = tags[0]
}
if sf.FieldTag == "" {
sf.FieldTag = sf.FieldName
} else if sf.FieldTag == "-" {
// deliberately ignore field
continue for_fields
}
e := parseExpr(field.Type)
if e == nil {
// unsupported type
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q unsupported)"), sf.FieldName)
continue
}
// mark as extension
if flagExtension {
// an extension can be
// a pointer or base type
switch e.Type() {
case gen.PtrType:
if e.Ptr().Value.Type() == gen.BaseType {
e.Ptr().Value.Base().Value = gen.Ext
} else {
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q couldn't be cast as an extension"), sf.FieldName)
continue
}
case gen.BaseType:
e.Base().Value = gen.Ext
default:
fmt.Printf(chalk.Yellow.Color(" (\u26a0 field %q couldn't be cast as an extension"), sf.FieldName)
continue
}
}
sf.FieldElem = e
out = append(out, sf)
} | conditional_block |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/oschwald/geoip2-golang"
"github.com/umahmood/haversine"
"log"
"net"
"net/http"
"reflect"
"strconv"
"time"
_ "github.com/mattn/go-sqlite3"
)
// NullString is an alias for sql.NullString data type
type NullString sql.NullString
// Scan implements the Scanner interface for NullString
func (ns *NullString) Scan(value interface{}) error {
var s sql.NullString
if err := s.Scan(value); err != nil {
return err
}
// if nil then make Valid false
if reflect.TypeOf(value) == nil | else {
*ns = NullString{s.String, true}
}
return nil
}
type Env struct {
sqlDb *sql.DB
}
// json object to map the endpoint input data
type RequestInput struct {
Event_UUID string `json:"event_uuid"`
Username string `json:"username"`
Unix_timestamp int64 `json:"unix_timestamp"`
IP_Address string `json:"ip_address"`
}
// geo location of the IP in the request
type currentGeo struct {
Lat float64 `json:"lat"`
Lon float64 `json:"lon"`
Radius uint16 `json:"radius"`
}
// json object representing the preceeding or succeeding IP
type ipResponse struct {
Ip string `json:"ip,omitempty"'`
Speed *float32 `json:"speed,omitempty"`
Lat float64 `json:"lat,omitempty"`
Lon float64 `json:"lon,omitempty"`
Radius uint16 `json:"radius,omitempty"`
Timestamp int64 `json:"unix_timestamp,omitempty"`
}
// Error response json object
type errResponse struct {
Error string `json:"error"`
}
// Response json object
type Response struct {
CurrentGeo currentGeo `json:"currentGeo"`
TravelToCurrentGeoSuspicious *bool `json:"travelToCurrentGeoSuspicious,omitempty"`
TravelFromCurrentGeoSuspicious *bool `json:"travelFromCurrentGeoSuspicious,omitempty"`
PrecedingIpAccess ipResponse `json:"precedingIpAccess,omitempty"`
SubsequentIpAccess ipResponse `json:"subsequentIpAccess,omitempty"`
}
//var sqlDb *sql.DB
var tm *time.Time
var currHaversineCoord haversine.Coord
func (env *Env) home(writer http.ResponseWriter, req *http.Request){
decoder := json.NewDecoder(req.Body)
var input RequestInput
writer.Header().Set("Content-Type", "application/json")
err := decoder.Decode(&input)
if err != nil {
respondWithError(err.Error(), writer)
fmt.Println("handling ", req.RequestURI, ": ", err)
return
}
tm := time.Unix(input.Unix_timestamp, 0)
fmt.Println("UUID: ", input.Event_UUID)
fmt.Println("Username: ", input.Username)
fmt.Println("IP Address: ", input.IP_Address)
fmt.Println("Time: ", tm)
// Check for valid IP address
valid_ip := net.ParseIP(input.IP_Address)
if valid_ip == nil {
respondWithError("Invalid IP Address", writer)
fmt.Println("Invalid IP Address: ", input.IP_Address)
return
}
// building a temp table that appends row_number column which is used in join condition
selectStatement := fmt.Sprintf(`with new_table
AS (select uuid, username, ipaddress, date_time,ROW_NUMBER() OVER (order by date_time) row_no FROM request where username="%s")
select t.uuid, t.username, t.ipaddress, t.date_time, t1.uuid, t1.username, t1.ipaddress, t1.date_time, t2.uuid, t2.username, t2.ipaddress, t2.date_time from (select * from new_table where date_time="%s" and ipaddress="%s") as t
LEFT JOIN (select * from new_table) as t1 ON t1.row_no = t.row_no-1
LEFT JOIN (select * from new_table) as t2 ON t2.row_no = t.row_no+1;`, input.Username, strconv.FormatInt(input.Unix_timestamp, 10), input.IP_Address)
tx, err := env.sqlDb.Begin()
if err != nil {
log.Fatal(err)
panic(err)
}
// insert the request data into the database
_, err = env.sqlDb.Exec("insert into request(uuid, username, ipaddress, date_time) values(?, ?, ?, ?)", input.Event_UUID, input.Username, input.IP_Address, input.Unix_timestamp)
if err != nil {
log.Fatal(err)
panic(err)
}
tx.Commit()
// Defined the paramters as NullString to handle dereferencing issue with nil values returned from the database.
type sqlrow struct {
uuid NullString
username NullString
ipaddress NullString
date_time NullString
}
// variables to hold the current, preceeding and succeeding database records
var t, t1, t2 sqlrow
row := env.sqlDb.QueryRow(selectStatement)
switch err := row.Scan(&t.uuid, &t.username, &t.ipaddress, &t.date_time, &t1.uuid, &t1.username, &t1.ipaddress, &t1.date_time, &t2.uuid, &t2.username, &t2.ipaddress, &t2.date_time); err {
case sql.ErrNoRows:
fmt.Println("No rows were returned!")
case nil:
var current = currentGeo{} // json object referencing the request data geo location
var preceeding = ipResponse{} // json object referencing the preceeding immediate request w.r.t the request data
var succeeding = ipResponse{}// json object referencing the succeeding immediate request w.r.t the request data
var resp = Response{} // response json that is returned to the end user
var preceedingHaversineCoord haversine.Coord
var succeedingHaversineCoord haversine.Coord
current.Lat, current.Lon, current.Radius = GetLatitudeAndLongitude(input.IP_Address)
if current.Lat != -10000 {
resp.CurrentGeo = current
currHaversineCoord = haversine.Coord{Lat:current.Lat, Lon:current.Lon}
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for ip %s", input.IP_Address), writer)
return
}
// if t1.uuid is not null
if t1.uuid.Valid {
preceeding.Lat, preceeding.Lon, preceeding.Radius = GetLatitudeAndLongitude(t1.ipaddress.String)
if preceeding.Lat != -10000 {
// creating a bool and speed object so that the json keys can map to these objects so that they are not ignored
// when displaying 0 or nil values because of omitempty flag set on the key
tr := new(bool)
speed := new(float32)
tm1, _ := strconv.ParseInt(t1.date_time.String, 10, 64)
preceeding.Timestamp = tm1
preceeding.Ip = t1.ipaddress.String
preceedingHaversineCoord = haversine.Coord{Lat: preceeding.Lat, Lon: preceeding.Lon}
dist, _ := haversine.Distance(currHaversineCoord, preceedingHaversineCoord)
// Deducting the accuracy radius for both the locations from the previous haversine distance, considering the
// location can be anywhere within the radius. In this case i'm assuming its on the circle. Before deducting
// convert radius in kilometers to miles by multiplying with conversion factor 0.6214
finalDist := dist - (float64(preceeding.Radius) + float64(current.Radius)) * 0.6214
if finalDist < 0 {
finalDist = 0
resp.TravelToCurrentGeoSuspicious = tr
} else {
tmDiff := tm.Sub(time.Unix(tm1, 0)).Hours()
// if hour is less than 0 assume hour = 1
if tmDiff == 0 {
tmDiff = 1
}
// Setting the suspicion flag to true if the speed to travel from preceeding location to the current location is greater than 500
*speed = float32(dist / tmDiff)
if *speed > 500 {
*tr = true
}
resp.TravelToCurrentGeoSuspicious = tr
}
preceeding.Speed = speed
resp.PrecedingIpAccess = preceeding
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for preceeding ip %s", t1.ipaddress.String), writer)
return
}
}
// if t2.uuid is not null
if t2.uuid.Valid {
succeeding.Lat, succeeding.Lon, succeeding.Radius = GetLatitudeAndLongitude(t2.ipaddress.String)
if succeeding.Lat != -10000 {
tr := new(bool)
speed := new(float32)
tm1, _ := strconv.ParseInt(t2.date_time.String, 10, 64)
succeeding.Timestamp = tm1
succeeding.Ip = t2.ipaddress.String
succeedingHaversineCoord = haversine.Coord{Lat: succeeding.Lat, Lon: succeeding.Lon}
dist, _ := haversine.Distance(currHaversineCoord, succeedingHaversineCoord)
// Deducting the accuracy radius for both the locations from the previous haversine distance, considering the
// location can be anywhere within the radius. In this case i'm assuming its on the circle. Before deducting
// convert radius in kilometers to miles by multiplying with conversion factor 0.6214
finalDist := dist - (float64(succeeding.Radius) + float64(current.Radius)) * 0.6214
if finalDist < 0 {
finalDist = 0
resp.TravelFromCurrentGeoSuspicious = tr
} else {
tmDiff := time.Unix(tm1, 0).Sub(tm).Hours()
// if hour is less than 0 assume hour = 1
if tmDiff == 0 {
tmDiff = 1
}
// Setting the suspicion flag to true if the speed to travel from current location to subsequent location is greater than 500
*speed = float32(finalDist / tmDiff)
if *speed > 500 {
*tr = true
}
resp.TravelFromCurrentGeoSuspicious = tr
}
succeeding.Speed = speed
resp.SubsequentIpAccess = succeeding
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for subsequent ip %s", t1.ipaddress.String), writer)
return
}
}
respJson, respErr := json.Marshal(resp)
if respErr != nil {
log.Fatal(respErr)
respondWithError(respErr.Error(), writer)
return
} else {
// Setting header content-type to application/json
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(respJson)
return
}
default:
fmt.Println(err)
}
}
// this function return latitude, longitude and radius give an IPAddress. If it couldnt find the ip it returns dummy value of -10000
func GetLatitudeAndLongitude(ip string) (lat, lon float64, radius uint16) {
geoIpdb, err := geoip2.Open("databases/GeoLite2-City.mmdb")
if err != nil {
log.Fatal(err)
}
defer geoIpdb.Close()
ipAddress := net.ParseIP(ip)
record, err := geoIpdb.City(ipAddress)
if err != nil {
log.Fatal(err)
return -10000, -10000, 65535
}
lat = record.Location.Latitude
lon = record.Location.Longitude
radius = record.Location.AccuracyRadius
return
}
func respondWithError(str string, writer http.ResponseWriter) {
errResp := errResponse{str}
errRespJson, _ := json.Marshal(errResp)
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusBadRequest)
writer.Write(errRespJson)
}
func handleRequests() {
// Opens a db connection to login.db that keeps track of all the incoming requests
sqlDb, sqlErr := sql.Open("sqlite3", "databases/login.db")
if sqlErr != nil {
log.Fatal(sqlErr)
panic(sqlErr)
}
env := &Env{sqlDb: sqlDb}
defer env.sqlDb.Close()
http.HandleFunc("/", env.home)
log.Fatal(http.ListenAndServe(":10000", nil))
}
func main() {
//defer sqlDb.Close()
handleRequests()
}
| {
*ns = NullString{s.String, false}
} | conditional_block |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/oschwald/geoip2-golang"
"github.com/umahmood/haversine"
"log"
"net"
"net/http"
"reflect"
"strconv"
"time"
_ "github.com/mattn/go-sqlite3"
)
// NullString is an alias for sql.NullString data type
type NullString sql.NullString
// Scan implements the Scanner interface for NullString
func (ns *NullString) Scan(value interface{}) error {
var s sql.NullString
if err := s.Scan(value); err != nil {
return err
}
// if nil then make Valid false
if reflect.TypeOf(value) == nil {
*ns = NullString{s.String, false}
} else {
*ns = NullString{s.String, true}
}
return nil
}
type Env struct {
sqlDb *sql.DB
}
// json object to map the endpoint input data
type RequestInput struct {
Event_UUID string `json:"event_uuid"`
Username string `json:"username"`
Unix_timestamp int64 `json:"unix_timestamp"`
IP_Address string `json:"ip_address"`
}
// geo location of the IP in the request
type currentGeo struct {
Lat float64 `json:"lat"`
Lon float64 `json:"lon"`
Radius uint16 `json:"radius"`
}
// json object representing the preceeding or succeeding IP
type ipResponse struct {
Ip string `json:"ip,omitempty"'`
Speed *float32 `json:"speed,omitempty"`
Lat float64 `json:"lat,omitempty"`
Lon float64 `json:"lon,omitempty"`
Radius uint16 `json:"radius,omitempty"`
Timestamp int64 `json:"unix_timestamp,omitempty"`
}
// Error response json object
type errResponse struct {
Error string `json:"error"`
}
// Response json object
type Response struct {
CurrentGeo currentGeo `json:"currentGeo"`
TravelToCurrentGeoSuspicious *bool `json:"travelToCurrentGeoSuspicious,omitempty"`
TravelFromCurrentGeoSuspicious *bool `json:"travelFromCurrentGeoSuspicious,omitempty"`
PrecedingIpAccess ipResponse `json:"precedingIpAccess,omitempty"`
SubsequentIpAccess ipResponse `json:"subsequentIpAccess,omitempty"`
}
//var sqlDb *sql.DB
var tm *time.Time
var currHaversineCoord haversine.Coord
func (env *Env) home(writer http.ResponseWriter, req *http.Request) |
// this function return latitude, longitude and radius give an IPAddress. If it couldnt find the ip it returns dummy value of -10000
func GetLatitudeAndLongitude(ip string) (lat, lon float64, radius uint16) {
geoIpdb, err := geoip2.Open("databases/GeoLite2-City.mmdb")
if err != nil {
log.Fatal(err)
}
defer geoIpdb.Close()
ipAddress := net.ParseIP(ip)
record, err := geoIpdb.City(ipAddress)
if err != nil {
log.Fatal(err)
return -10000, -10000, 65535
}
lat = record.Location.Latitude
lon = record.Location.Longitude
radius = record.Location.AccuracyRadius
return
}
func respondWithError(str string, writer http.ResponseWriter) {
errResp := errResponse{str}
errRespJson, _ := json.Marshal(errResp)
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusBadRequest)
writer.Write(errRespJson)
}
func handleRequests() {
// Opens a db connection to login.db that keeps track of all the incoming requests
sqlDb, sqlErr := sql.Open("sqlite3", "databases/login.db")
if sqlErr != nil {
log.Fatal(sqlErr)
panic(sqlErr)
}
env := &Env{sqlDb: sqlDb}
defer env.sqlDb.Close()
http.HandleFunc("/", env.home)
log.Fatal(http.ListenAndServe(":10000", nil))
}
func main() {
//defer sqlDb.Close()
handleRequests()
}
| {
decoder := json.NewDecoder(req.Body)
var input RequestInput
writer.Header().Set("Content-Type", "application/json")
err := decoder.Decode(&input)
if err != nil {
respondWithError(err.Error(), writer)
fmt.Println("handling ", req.RequestURI, ": ", err)
return
}
tm := time.Unix(input.Unix_timestamp, 0)
fmt.Println("UUID: ", input.Event_UUID)
fmt.Println("Username: ", input.Username)
fmt.Println("IP Address: ", input.IP_Address)
fmt.Println("Time: ", tm)
// Check for valid IP address
valid_ip := net.ParseIP(input.IP_Address)
if valid_ip == nil {
respondWithError("Invalid IP Address", writer)
fmt.Println("Invalid IP Address: ", input.IP_Address)
return
}
// building a temp table that appends row_number column which is used in join condition
selectStatement := fmt.Sprintf(`with new_table
AS (select uuid, username, ipaddress, date_time,ROW_NUMBER() OVER (order by date_time) row_no FROM request where username="%s")
select t.uuid, t.username, t.ipaddress, t.date_time, t1.uuid, t1.username, t1.ipaddress, t1.date_time, t2.uuid, t2.username, t2.ipaddress, t2.date_time from (select * from new_table where date_time="%s" and ipaddress="%s") as t
LEFT JOIN (select * from new_table) as t1 ON t1.row_no = t.row_no-1
LEFT JOIN (select * from new_table) as t2 ON t2.row_no = t.row_no+1;`, input.Username, strconv.FormatInt(input.Unix_timestamp, 10), input.IP_Address)
tx, err := env.sqlDb.Begin()
if err != nil {
log.Fatal(err)
panic(err)
}
// insert the request data into the database
_, err = env.sqlDb.Exec("insert into request(uuid, username, ipaddress, date_time) values(?, ?, ?, ?)", input.Event_UUID, input.Username, input.IP_Address, input.Unix_timestamp)
if err != nil {
log.Fatal(err)
panic(err)
}
tx.Commit()
// Defined the paramters as NullString to handle dereferencing issue with nil values returned from the database.
type sqlrow struct {
uuid NullString
username NullString
ipaddress NullString
date_time NullString
}
// variables to hold the current, preceeding and succeeding database records
var t, t1, t2 sqlrow
row := env.sqlDb.QueryRow(selectStatement)
switch err := row.Scan(&t.uuid, &t.username, &t.ipaddress, &t.date_time, &t1.uuid, &t1.username, &t1.ipaddress, &t1.date_time, &t2.uuid, &t2.username, &t2.ipaddress, &t2.date_time); err {
case sql.ErrNoRows:
fmt.Println("No rows were returned!")
case nil:
var current = currentGeo{} // json object referencing the request data geo location
var preceeding = ipResponse{} // json object referencing the preceeding immediate request w.r.t the request data
var succeeding = ipResponse{}// json object referencing the succeeding immediate request w.r.t the request data
var resp = Response{} // response json that is returned to the end user
var preceedingHaversineCoord haversine.Coord
var succeedingHaversineCoord haversine.Coord
current.Lat, current.Lon, current.Radius = GetLatitudeAndLongitude(input.IP_Address)
if current.Lat != -10000 {
resp.CurrentGeo = current
currHaversineCoord = haversine.Coord{Lat:current.Lat, Lon:current.Lon}
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for ip %s", input.IP_Address), writer)
return
}
// if t1.uuid is not null
if t1.uuid.Valid {
preceeding.Lat, preceeding.Lon, preceeding.Radius = GetLatitudeAndLongitude(t1.ipaddress.String)
if preceeding.Lat != -10000 {
// creating a bool and speed object so that the json keys can map to these objects so that they are not ignored
// when displaying 0 or nil values because of omitempty flag set on the key
tr := new(bool)
speed := new(float32)
tm1, _ := strconv.ParseInt(t1.date_time.String, 10, 64)
preceeding.Timestamp = tm1
preceeding.Ip = t1.ipaddress.String
preceedingHaversineCoord = haversine.Coord{Lat: preceeding.Lat, Lon: preceeding.Lon}
dist, _ := haversine.Distance(currHaversineCoord, preceedingHaversineCoord)
// Deducting the accuracy radius for both the locations from the previous haversine distance, considering the
// location can be anywhere within the radius. In this case i'm assuming its on the circle. Before deducting
// convert radius in kilometers to miles by multiplying with conversion factor 0.6214
finalDist := dist - (float64(preceeding.Radius) + float64(current.Radius)) * 0.6214
if finalDist < 0 {
finalDist = 0
resp.TravelToCurrentGeoSuspicious = tr
} else {
tmDiff := tm.Sub(time.Unix(tm1, 0)).Hours()
// if hour is less than 0 assume hour = 1
if tmDiff == 0 {
tmDiff = 1
}
// Setting the suspicion flag to true if the speed to travel from preceeding location to the current location is greater than 500
*speed = float32(dist / tmDiff)
if *speed > 500 {
*tr = true
}
resp.TravelToCurrentGeoSuspicious = tr
}
preceeding.Speed = speed
resp.PrecedingIpAccess = preceeding
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for preceeding ip %s", t1.ipaddress.String), writer)
return
}
}
// if t2.uuid is not null
if t2.uuid.Valid {
succeeding.Lat, succeeding.Lon, succeeding.Radius = GetLatitudeAndLongitude(t2.ipaddress.String)
if succeeding.Lat != -10000 {
tr := new(bool)
speed := new(float32)
tm1, _ := strconv.ParseInt(t2.date_time.String, 10, 64)
succeeding.Timestamp = tm1
succeeding.Ip = t2.ipaddress.String
succeedingHaversineCoord = haversine.Coord{Lat: succeeding.Lat, Lon: succeeding.Lon}
dist, _ := haversine.Distance(currHaversineCoord, succeedingHaversineCoord)
// Deducting the accuracy radius for both the locations from the previous haversine distance, considering the
// location can be anywhere within the radius. In this case i'm assuming its on the circle. Before deducting
// convert radius in kilometers to miles by multiplying with conversion factor 0.6214
finalDist := dist - (float64(succeeding.Radius) + float64(current.Radius)) * 0.6214
if finalDist < 0 {
finalDist = 0
resp.TravelFromCurrentGeoSuspicious = tr
} else {
tmDiff := time.Unix(tm1, 0).Sub(tm).Hours()
// if hour is less than 0 assume hour = 1
if tmDiff == 0 {
tmDiff = 1
}
// Setting the suspicion flag to true if the speed to travel from current location to subsequent location is greater than 500
*speed = float32(finalDist / tmDiff)
if *speed > 500 {
*tr = true
}
resp.TravelFromCurrentGeoSuspicious = tr
}
succeeding.Speed = speed
resp.SubsequentIpAccess = succeeding
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for subsequent ip %s", t1.ipaddress.String), writer)
return
}
}
respJson, respErr := json.Marshal(resp)
if respErr != nil {
log.Fatal(respErr)
respondWithError(respErr.Error(), writer)
return
} else {
// Setting header content-type to application/json
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(respJson)
return
}
default:
fmt.Println(err)
}
} | identifier_body |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/oschwald/geoip2-golang"
"github.com/umahmood/haversine"
"log"
"net"
"net/http"
"reflect"
"strconv"
"time"
_ "github.com/mattn/go-sqlite3"
)
// NullString is an alias for sql.NullString data type
type NullString sql.NullString
// Scan implements the Scanner interface for NullString
func (ns *NullString) Scan(value interface{}) error {
var s sql.NullString
if err := s.Scan(value); err != nil {
return err
}
// if nil then make Valid false
if reflect.TypeOf(value) == nil {
*ns = NullString{s.String, false}
} else {
*ns = NullString{s.String, true}
}
return nil
}
type Env struct {
sqlDb *sql.DB
}
// json object to map the endpoint input data
type RequestInput struct {
Event_UUID string `json:"event_uuid"`
Username string `json:"username"`
Unix_timestamp int64 `json:"unix_timestamp"`
IP_Address string `json:"ip_address"`
}
// geo location of the IP in the request
type currentGeo struct {
Lat float64 `json:"lat"`
Lon float64 `json:"lon"`
Radius uint16 `json:"radius"`
}
// json object representing the preceeding or succeeding IP
type ipResponse struct {
Ip string `json:"ip,omitempty"'`
Speed *float32 `json:"speed,omitempty"`
Lat float64 `json:"lat,omitempty"`
Lon float64 `json:"lon,omitempty"`
Radius uint16 `json:"radius,omitempty"`
Timestamp int64 `json:"unix_timestamp,omitempty"`
}
// Error response json object
type errResponse struct {
Error string `json:"error"`
}
// Response json object
type Response struct {
CurrentGeo currentGeo `json:"currentGeo"`
TravelToCurrentGeoSuspicious *bool `json:"travelToCurrentGeoSuspicious,omitempty"`
TravelFromCurrentGeoSuspicious *bool `json:"travelFromCurrentGeoSuspicious,omitempty"`
PrecedingIpAccess ipResponse `json:"precedingIpAccess,omitempty"`
SubsequentIpAccess ipResponse `json:"subsequentIpAccess,omitempty"`
}
//var sqlDb *sql.DB
var tm *time.Time
var currHaversineCoord haversine.Coord
func (env *Env) | (writer http.ResponseWriter, req *http.Request){
decoder := json.NewDecoder(req.Body)
var input RequestInput
writer.Header().Set("Content-Type", "application/json")
err := decoder.Decode(&input)
if err != nil {
respondWithError(err.Error(), writer)
fmt.Println("handling ", req.RequestURI, ": ", err)
return
}
tm := time.Unix(input.Unix_timestamp, 0)
fmt.Println("UUID: ", input.Event_UUID)
fmt.Println("Username: ", input.Username)
fmt.Println("IP Address: ", input.IP_Address)
fmt.Println("Time: ", tm)
// Check for valid IP address
valid_ip := net.ParseIP(input.IP_Address)
if valid_ip == nil {
respondWithError("Invalid IP Address", writer)
fmt.Println("Invalid IP Address: ", input.IP_Address)
return
}
// building a temp table that appends row_number column which is used in join condition
selectStatement := fmt.Sprintf(`with new_table
AS (select uuid, username, ipaddress, date_time,ROW_NUMBER() OVER (order by date_time) row_no FROM request where username="%s")
select t.uuid, t.username, t.ipaddress, t.date_time, t1.uuid, t1.username, t1.ipaddress, t1.date_time, t2.uuid, t2.username, t2.ipaddress, t2.date_time from (select * from new_table where date_time="%s" and ipaddress="%s") as t
LEFT JOIN (select * from new_table) as t1 ON t1.row_no = t.row_no-1
LEFT JOIN (select * from new_table) as t2 ON t2.row_no = t.row_no+1;`, input.Username, strconv.FormatInt(input.Unix_timestamp, 10), input.IP_Address)
tx, err := env.sqlDb.Begin()
if err != nil {
log.Fatal(err)
panic(err)
}
// insert the request data into the database
_, err = env.sqlDb.Exec("insert into request(uuid, username, ipaddress, date_time) values(?, ?, ?, ?)", input.Event_UUID, input.Username, input.IP_Address, input.Unix_timestamp)
if err != nil {
log.Fatal(err)
panic(err)
}
tx.Commit()
// Defined the paramters as NullString to handle dereferencing issue with nil values returned from the database.
type sqlrow struct {
uuid NullString
username NullString
ipaddress NullString
date_time NullString
}
// variables to hold the current, preceeding and succeeding database records
var t, t1, t2 sqlrow
row := env.sqlDb.QueryRow(selectStatement)
switch err := row.Scan(&t.uuid, &t.username, &t.ipaddress, &t.date_time, &t1.uuid, &t1.username, &t1.ipaddress, &t1.date_time, &t2.uuid, &t2.username, &t2.ipaddress, &t2.date_time); err {
case sql.ErrNoRows:
fmt.Println("No rows were returned!")
case nil:
var current = currentGeo{} // json object referencing the request data geo location
var preceeding = ipResponse{} // json object referencing the preceeding immediate request w.r.t the request data
var succeeding = ipResponse{}// json object referencing the succeeding immediate request w.r.t the request data
var resp = Response{} // response json that is returned to the end user
var preceedingHaversineCoord haversine.Coord
var succeedingHaversineCoord haversine.Coord
current.Lat, current.Lon, current.Radius = GetLatitudeAndLongitude(input.IP_Address)
if current.Lat != -10000 {
resp.CurrentGeo = current
currHaversineCoord = haversine.Coord{Lat:current.Lat, Lon:current.Lon}
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for ip %s", input.IP_Address), writer)
return
}
// if t1.uuid is not null
if t1.uuid.Valid {
preceeding.Lat, preceeding.Lon, preceeding.Radius = GetLatitudeAndLongitude(t1.ipaddress.String)
if preceeding.Lat != -10000 {
// creating a bool and speed object so that the json keys can map to these objects so that they are not ignored
// when displaying 0 or nil values because of omitempty flag set on the key
tr := new(bool)
speed := new(float32)
tm1, _ := strconv.ParseInt(t1.date_time.String, 10, 64)
preceeding.Timestamp = tm1
preceeding.Ip = t1.ipaddress.String
preceedingHaversineCoord = haversine.Coord{Lat: preceeding.Lat, Lon: preceeding.Lon}
dist, _ := haversine.Distance(currHaversineCoord, preceedingHaversineCoord)
// Deducting the accuracy radius for both the locations from the previous haversine distance, considering the
// location can be anywhere within the radius. In this case i'm assuming its on the circle. Before deducting
// convert radius in kilometers to miles by multiplying with conversion factor 0.6214
finalDist := dist - (float64(preceeding.Radius) + float64(current.Radius)) * 0.6214
if finalDist < 0 {
finalDist = 0
resp.TravelToCurrentGeoSuspicious = tr
} else {
tmDiff := tm.Sub(time.Unix(tm1, 0)).Hours()
// if hour is less than 0 assume hour = 1
if tmDiff == 0 {
tmDiff = 1
}
// Setting the suspicion flag to true if the speed to travel from preceeding location to the current location is greater than 500
*speed = float32(dist / tmDiff)
if *speed > 500 {
*tr = true
}
resp.TravelToCurrentGeoSuspicious = tr
}
preceeding.Speed = speed
resp.PrecedingIpAccess = preceeding
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for preceeding ip %s", t1.ipaddress.String), writer)
return
}
}
// if t2.uuid is not null
if t2.uuid.Valid {
succeeding.Lat, succeeding.Lon, succeeding.Radius = GetLatitudeAndLongitude(t2.ipaddress.String)
if succeeding.Lat != -10000 {
tr := new(bool)
speed := new(float32)
tm1, _ := strconv.ParseInt(t2.date_time.String, 10, 64)
succeeding.Timestamp = tm1
succeeding.Ip = t2.ipaddress.String
succeedingHaversineCoord = haversine.Coord{Lat: succeeding.Lat, Lon: succeeding.Lon}
dist, _ := haversine.Distance(currHaversineCoord, succeedingHaversineCoord)
// Deducting the accuracy radius for both the locations from the previous haversine distance, considering the
// location can be anywhere within the radius. In this case i'm assuming its on the circle. Before deducting
// convert radius in kilometers to miles by multiplying with conversion factor 0.6214
finalDist := dist - (float64(succeeding.Radius) + float64(current.Radius)) * 0.6214
if finalDist < 0 {
finalDist = 0
resp.TravelFromCurrentGeoSuspicious = tr
} else {
tmDiff := time.Unix(tm1, 0).Sub(tm).Hours()
// if hour is less than 0 assume hour = 1
if tmDiff == 0 {
tmDiff = 1
}
// Setting the suspicion flag to true if the speed to travel from current location to subsequent location is greater than 500
*speed = float32(finalDist / tmDiff)
if *speed > 500 {
*tr = true
}
resp.TravelFromCurrentGeoSuspicious = tr
}
succeeding.Speed = speed
resp.SubsequentIpAccess = succeeding
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for subsequent ip %s", t1.ipaddress.String), writer)
return
}
}
respJson, respErr := json.Marshal(resp)
if respErr != nil {
log.Fatal(respErr)
respondWithError(respErr.Error(), writer)
return
} else {
// Setting header content-type to application/json
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(respJson)
return
}
default:
fmt.Println(err)
}
}
// this function return latitude, longitude and radius give an IPAddress. If it couldnt find the ip it returns dummy value of -10000
func GetLatitudeAndLongitude(ip string) (lat, lon float64, radius uint16) {
geoIpdb, err := geoip2.Open("databases/GeoLite2-City.mmdb")
if err != nil {
log.Fatal(err)
}
defer geoIpdb.Close()
ipAddress := net.ParseIP(ip)
record, err := geoIpdb.City(ipAddress)
if err != nil {
log.Fatal(err)
return -10000, -10000, 65535
}
lat = record.Location.Latitude
lon = record.Location.Longitude
radius = record.Location.AccuracyRadius
return
}
func respondWithError(str string, writer http.ResponseWriter) {
errResp := errResponse{str}
errRespJson, _ := json.Marshal(errResp)
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusBadRequest)
writer.Write(errRespJson)
}
func handleRequests() {
// Opens a db connection to login.db that keeps track of all the incoming requests
sqlDb, sqlErr := sql.Open("sqlite3", "databases/login.db")
if sqlErr != nil {
log.Fatal(sqlErr)
panic(sqlErr)
}
env := &Env{sqlDb: sqlDb}
defer env.sqlDb.Close()
http.HandleFunc("/", env.home)
log.Fatal(http.ListenAndServe(":10000", nil))
}
func main() {
//defer sqlDb.Close()
handleRequests()
}
| home | identifier_name |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/oschwald/geoip2-golang"
"github.com/umahmood/haversine"
"log"
"net"
"net/http"
"reflect"
"strconv"
"time"
_ "github.com/mattn/go-sqlite3"
)
// NullString is an alias for sql.NullString data type
type NullString sql.NullString
// Scan implements the Scanner interface for NullString
func (ns *NullString) Scan(value interface{}) error {
var s sql.NullString
if err := s.Scan(value); err != nil {
return err
}
// if nil then make Valid false
if reflect.TypeOf(value) == nil {
*ns = NullString{s.String, false}
} else {
*ns = NullString{s.String, true}
}
return nil
}
type Env struct {
sqlDb *sql.DB
}
// json object to map the endpoint input data
type RequestInput struct {
Event_UUID string `json:"event_uuid"`
Username string `json:"username"`
Unix_timestamp int64 `json:"unix_timestamp"`
IP_Address string `json:"ip_address"`
}
// geo location of the IP in the request
type currentGeo struct {
Lat float64 `json:"lat"`
Lon float64 `json:"lon"`
Radius uint16 `json:"radius"`
}
// json object representing the preceeding or succeeding IP
type ipResponse struct {
Ip string `json:"ip,omitempty"'`
Speed *float32 `json:"speed,omitempty"`
Lat float64 `json:"lat,omitempty"`
Lon float64 `json:"lon,omitempty"`
Radius uint16 `json:"radius,omitempty"`
Timestamp int64 `json:"unix_timestamp,omitempty"`
}
// Error response json object
type errResponse struct {
Error string `json:"error"`
}
// Response json object
type Response struct {
CurrentGeo currentGeo `json:"currentGeo"`
TravelToCurrentGeoSuspicious *bool `json:"travelToCurrentGeoSuspicious,omitempty"`
TravelFromCurrentGeoSuspicious *bool `json:"travelFromCurrentGeoSuspicious,omitempty"`
PrecedingIpAccess ipResponse `json:"precedingIpAccess,omitempty"`
SubsequentIpAccess ipResponse `json:"subsequentIpAccess,omitempty"`
}
//var sqlDb *sql.DB
var tm *time.Time
var currHaversineCoord haversine.Coord
func (env *Env) home(writer http.ResponseWriter, req *http.Request){
decoder := json.NewDecoder(req.Body)
var input RequestInput
writer.Header().Set("Content-Type", "application/json")
err := decoder.Decode(&input)
if err != nil {
respondWithError(err.Error(), writer)
fmt.Println("handling ", req.RequestURI, ": ", err)
return
}
tm := time.Unix(input.Unix_timestamp, 0)
fmt.Println("UUID: ", input.Event_UUID)
fmt.Println("Username: ", input.Username)
fmt.Println("IP Address: ", input.IP_Address)
fmt.Println("Time: ", tm)
// Check for valid IP address
valid_ip := net.ParseIP(input.IP_Address)
if valid_ip == nil {
respondWithError("Invalid IP Address", writer)
fmt.Println("Invalid IP Address: ", input.IP_Address)
return
}
// building a temp table that appends row_number column which is used in join condition
selectStatement := fmt.Sprintf(`with new_table
AS (select uuid, username, ipaddress, date_time,ROW_NUMBER() OVER (order by date_time) row_no FROM request where username="%s")
select t.uuid, t.username, t.ipaddress, t.date_time, t1.uuid, t1.username, t1.ipaddress, t1.date_time, t2.uuid, t2.username, t2.ipaddress, t2.date_time from (select * from new_table where date_time="%s" and ipaddress="%s") as t
LEFT JOIN (select * from new_table) as t1 ON t1.row_no = t.row_no-1
LEFT JOIN (select * from new_table) as t2 ON t2.row_no = t.row_no+1;`, input.Username, strconv.FormatInt(input.Unix_timestamp, 10), input.IP_Address)
tx, err := env.sqlDb.Begin()
if err != nil {
log.Fatal(err)
panic(err)
}
// insert the request data into the database | tx.Commit()
// Defined the paramters as NullString to handle dereferencing issue with nil values returned from the database.
type sqlrow struct {
uuid NullString
username NullString
ipaddress NullString
date_time NullString
}
// variables to hold the current, preceeding and succeeding database records
var t, t1, t2 sqlrow
row := env.sqlDb.QueryRow(selectStatement)
switch err := row.Scan(&t.uuid, &t.username, &t.ipaddress, &t.date_time, &t1.uuid, &t1.username, &t1.ipaddress, &t1.date_time, &t2.uuid, &t2.username, &t2.ipaddress, &t2.date_time); err {
case sql.ErrNoRows:
fmt.Println("No rows were returned!")
case nil:
var current = currentGeo{} // json object referencing the request data geo location
var preceeding = ipResponse{} // json object referencing the preceeding immediate request w.r.t the request data
var succeeding = ipResponse{}// json object referencing the succeeding immediate request w.r.t the request data
var resp = Response{} // response json that is returned to the end user
var preceedingHaversineCoord haversine.Coord
var succeedingHaversineCoord haversine.Coord
current.Lat, current.Lon, current.Radius = GetLatitudeAndLongitude(input.IP_Address)
if current.Lat != -10000 {
resp.CurrentGeo = current
currHaversineCoord = haversine.Coord{Lat:current.Lat, Lon:current.Lon}
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for ip %s", input.IP_Address), writer)
return
}
// if t1.uuid is not null
if t1.uuid.Valid {
preceeding.Lat, preceeding.Lon, preceeding.Radius = GetLatitudeAndLongitude(t1.ipaddress.String)
if preceeding.Lat != -10000 {
// creating a bool and speed object so that the json keys can map to these objects so that they are not ignored
// when displaying 0 or nil values because of omitempty flag set on the key
tr := new(bool)
speed := new(float32)
tm1, _ := strconv.ParseInt(t1.date_time.String, 10, 64)
preceeding.Timestamp = tm1
preceeding.Ip = t1.ipaddress.String
preceedingHaversineCoord = haversine.Coord{Lat: preceeding.Lat, Lon: preceeding.Lon}
dist, _ := haversine.Distance(currHaversineCoord, preceedingHaversineCoord)
// Deducting the accuracy radius for both the locations from the previous haversine distance, considering the
// location can be anywhere within the radius. In this case i'm assuming its on the circle. Before deducting
// convert radius in kilometers to miles by multiplying with conversion factor 0.6214
finalDist := dist - (float64(preceeding.Radius) + float64(current.Radius)) * 0.6214
if finalDist < 0 {
finalDist = 0
resp.TravelToCurrentGeoSuspicious = tr
} else {
tmDiff := tm.Sub(time.Unix(tm1, 0)).Hours()
// if hour is less than 0 assume hour = 1
if tmDiff == 0 {
tmDiff = 1
}
// Setting the suspicion flag to true if the speed to travel from preceeding location to the current location is greater than 500
*speed = float32(dist / tmDiff)
if *speed > 500 {
*tr = true
}
resp.TravelToCurrentGeoSuspicious = tr
}
preceeding.Speed = speed
resp.PrecedingIpAccess = preceeding
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for preceeding ip %s", t1.ipaddress.String), writer)
return
}
}
// if t2.uuid is not null
if t2.uuid.Valid {
succeeding.Lat, succeeding.Lon, succeeding.Radius = GetLatitudeAndLongitude(t2.ipaddress.String)
if succeeding.Lat != -10000 {
tr := new(bool)
speed := new(float32)
tm1, _ := strconv.ParseInt(t2.date_time.String, 10, 64)
succeeding.Timestamp = tm1
succeeding.Ip = t2.ipaddress.String
succeedingHaversineCoord = haversine.Coord{Lat: succeeding.Lat, Lon: succeeding.Lon}
dist, _ := haversine.Distance(currHaversineCoord, succeedingHaversineCoord)
// Deducting the accuracy radius for both the locations from the previous haversine distance, considering the
// location can be anywhere within the radius. In this case i'm assuming its on the circle. Before deducting
// convert radius in kilometers to miles by multiplying with conversion factor 0.6214
finalDist := dist - (float64(succeeding.Radius) + float64(current.Radius)) * 0.6214
if finalDist < 0 {
finalDist = 0
resp.TravelFromCurrentGeoSuspicious = tr
} else {
tmDiff := time.Unix(tm1, 0).Sub(tm).Hours()
// if hour is less than 0 assume hour = 1
if tmDiff == 0 {
tmDiff = 1
}
// Setting the suspicion flag to true if the speed to travel from current location to subsequent location is greater than 500
*speed = float32(finalDist / tmDiff)
if *speed > 500 {
*tr = true
}
resp.TravelFromCurrentGeoSuspicious = tr
}
succeeding.Speed = speed
resp.SubsequentIpAccess = succeeding
} else {
respondWithError(fmt.Sprintf("Error retreiving geo location for subsequent ip %s", t1.ipaddress.String), writer)
return
}
}
respJson, respErr := json.Marshal(resp)
if respErr != nil {
log.Fatal(respErr)
respondWithError(respErr.Error(), writer)
return
} else {
// Setting header content-type to application/json
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
writer.Write(respJson)
return
}
default:
fmt.Println(err)
}
}
// this function return latitude, longitude and radius give an IPAddress. If it couldnt find the ip it returns dummy value of -10000
func GetLatitudeAndLongitude(ip string) (lat, lon float64, radius uint16) {
geoIpdb, err := geoip2.Open("databases/GeoLite2-City.mmdb")
if err != nil {
log.Fatal(err)
}
defer geoIpdb.Close()
ipAddress := net.ParseIP(ip)
record, err := geoIpdb.City(ipAddress)
if err != nil {
log.Fatal(err)
return -10000, -10000, 65535
}
lat = record.Location.Latitude
lon = record.Location.Longitude
radius = record.Location.AccuracyRadius
return
}
func respondWithError(str string, writer http.ResponseWriter) {
errResp := errResponse{str}
errRespJson, _ := json.Marshal(errResp)
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusBadRequest)
writer.Write(errRespJson)
}
func handleRequests() {
// Opens a db connection to login.db that keeps track of all the incoming requests
sqlDb, sqlErr := sql.Open("sqlite3", "databases/login.db")
if sqlErr != nil {
log.Fatal(sqlErr)
panic(sqlErr)
}
env := &Env{sqlDb: sqlDb}
defer env.sqlDb.Close()
http.HandleFunc("/", env.home)
log.Fatal(http.ListenAndServe(":10000", nil))
}
func main() {
//defer sqlDb.Close()
handleRequests()
} | _, err = env.sqlDb.Exec("insert into request(uuid, username, ipaddress, date_time) values(?, ?, ?, ?)", input.Event_UUID, input.Username, input.IP_Address, input.Unix_timestamp)
if err != nil {
log.Fatal(err)
panic(err)
} | random_line_split |
btc-arbitrage.ts | import * as GTT from 'gdax-trading-toolkit';
import { GDAXFeed } from "gdax-trading-toolkit/build/src/exchanges";
import { StreamMessage, TradeMessage } from "gdax-trading-toolkit/build/src/core";
import { LiveOrder } from "gdax-trading-toolkit/build/src/lib";
import { PlaceOrderMessage } from 'gdax-trading-toolkit/build/src/core';
import {AvailableBalance, Balances} from "./gdax-tt/src/exchanges/AuthenticatedExchangeAPI";
import { Big, BigJS } from "gdax-trading-toolkit/build/src/lib/types";
const products = ['BTC-USD', 'ETH-USD', 'ETH-BTC', 'LTC-USD', 'LTC-BTC'];
// I do this so that I can change the order of the products without consequence.
const LTC_BTC_i = products.indexOf('LTC-BTC');
const ETH_BTC_i = products.indexOf('ETH-BTC');
const LTC_USD_i = products.indexOf('LTC-USD');
const ETH_USD_i = products.indexOf('ETH-USD');
const BTC_USD_i = products.indexOf('BTC-USD');
// Create a single logger instance to pass around
const logger = GTT.utils.ConsoleLoggerFactory();
const padfloat = GTT.utils.padfloat;
const gdaxAPI = GTT.Factories.GDAX.DefaultAPI(logger);
const diff: Array<BigJS> = [Big(-100000), Big(-100000)];
let ready = [false, false]; // track if we've fired the loop for each exchange.
let latest_eth_btc_conversion = Big(0);
const latest = new Array<BigJS>(products.length);
const zero = Big(0);
GTT.Factories.GDAX.FeedFactory(logger, products).then((feed: GDAXFeed) => {
// Configure all message streams to use the same websocket feed
// Create the source message streams by creating a MessageStream for each product, using the same WS feed for each
const streams = products.map((product) => new GTT.Core.ProductFilter({ logger: logger, productId: product }));
const outStream = new Array(products.length);
for (let i = 0; i < products.length; i++) {
outStream[i] = feed.pipe(streams[i]);
latest[i] = Big(-100000);
}
for (let i = 0; i < latest.length; i++) {
outStream[i].on('data', (msg: StreamMessage) => {
if (msg.type === 'trade') {
mangeTradeMessage(i, msg as TradeMessage);
}
if(msg.type !== 'level' && msg.type !== 'myOrderPlaced' && msg.type !== 'tradeExecuted' && msg.type !== 'tradeFinalized' && msg.type !== 'trade' && msg.type !== 'snapshot' && msg.type !== 'ticker' && msg.type !== 'unknown'){
console.log('Message Type: ' + msg.type)
}
if (msg.type === 'tradeExecuted') {//|| msg.type === 'tradeFinalized'){
console.log('Message Type: ' + msg.type)
process.stdout.write("\x07");
}
});
}
});
function mangeTradeMessage(index: number, msg: TradeMessage) {
let price = Big(msg.price);
// make the two exchange rates (ETH-BTC, LTC-BTC)relative to BTC-USD
if (products[index] === 'ETH-BTC' || products[index] === 'LTC-BTC'){
if (products[index] === 'ETH-BTC' ) {
latest_eth_btc_conversion = price;
}
let btc_usd = latest[BTC_USD_i];
if (btc_usd.gt(zero)) {
latest[index] = price.times(btc_usd);
}
// once this next line has run once you are all necessary infomation is in place to trade for the respective currency.
products[index] === 'ETH-BTC' ? ready[0] = true : ready[1] = true;
} else {
latest[index] = price;
}
// calculate the diff in prices between buying currency x in BTC vs USD
diff[0] = latest[ETH_USD_i].minus(latest[ETH_BTC_i]);
diff[1] = latest[LTC_USD_i].minus( latest[LTC_BTC_i]);
// if were not ready yet dont try to process the arbitrage.
if (!ready[0] || latest[ETH_BTC_i].lt(zero) || latest[ETH_USD_i].lt(zero) || latest[BTC_USD_i].lt(zero)) {
return
}
printUnderline(latest, index);
printLatestPrices(diff);
let randNumber = getRandomInt(0, 200);
if(randNumber != 10) {
// console.log(`skip order: ${randNumber}`);
return
}
process.stdout.write('🎯');
let eth_diff = diff[0];
processArbitrage(eth_diff, latest[ETH_BTC_i], latest[ETH_USD_i], latest[BTC_USD_i], latest_eth_btc_conversion);
processArbitrage2(eth_diff, latest[ETH_BTC_i], latest[ETH_USD_i], latest[BTC_USD_i], latest_eth_btc_conversion);
}
function printLatestPrices(prices: BigJS[]) {
// const cur = ['BTC-USD', 'ETH-BTC', 'LTC-BTC'];
const diffLabels = ['ETH', 'LTC'];
const pstr = diffLabels.map((c, i) => `${c} ${padfloat(prices[i], 6, 6)}`);
const msg = pstr.join(' | ');
// process.stdout.write(msg + ' | ');
console.log(' | ' + msg);
}
function printUnderline(prices: BigJS[], changeIndex: number) {
const pstr = products.map((c, i) => `${c}${i === changeIndex ? "\x1b[36m" : ""} ${padfloat(prices[i], 6, 6)} ${i === changeIndex ? "\x1b[0m" : ""}`);
const msg = pstr.join(' | ');
process.stdout.write(msg)
}
function orderMessageWithREST(side: string, product: string, amount: string, price: string) {
const [base, quote] = product.split('-');
console.log(side + ' ' + base + ' ' + amount + ' ' + product + '@ ' + price);
const order: PlaceOrderMessage = {
time: new Date(),
type: 'placeOrder',
productId: product,
size: amount,
price: price,
side: side,
orderType: 'limit',
postOnly: true
};
gdaxAPI.placeOrder(order).then((result: LiveOrder) => {
console.log('Order executed', `Order to ${order.side} ${amount} ${base} for ${price} ${quote} placed. Result: ${result.status}`);
processOrderResult(result, order)
}).catch(logError);
return order;
}
function orderMarketMessage(side: string, product: string, amount: string, price: string) {
console.log(side + ' ' + amount + ' ' + product + '@ ' + price + ' - ' + amount);
const [base, quote] = product.split('-');
const order: PlaceOrderMessage = {
type: 'order',
time: new Date(),
productId: product,
orderType: 'market',
side: side,
size: amount
};
gdaxAPI.placeOrder(order).then((result: LiveOrder) => {
console.log('Market order executed ', `Order to ${order.side} ${amount} ${base} placed. Result: ${result.status}`);
});
return order;
}
let executedArbitrage = false;
function processArbitrage(arbitrageValue: BigJS, eth_price: BigJS, eth_price2: BigJS, btc_price: BigJS, eth_btc: BigJS) {
// arbitrageValue = eth_price2 - eth_price
let arbitrageLimit = Big(1);
let min_increment = Big(0.01);
if (arbitrageValue.lte(zero)) {return}
if (eth_btc.isZero() || eth_btc.lte(zero)) {return}
if (arbitrageValue.lt(arbitrageLimit)) {return}
// if (executedArbitrage === true) {return}
let buy_price = eth_btc;
let btc_min_amount = Big(0.01);
let amount = btc_min_amount.div(eth_btc); //0.01 / 0.04472 = 0.22361359
let sell_price = eth_price2.plus(min_increment);
let buy_price2 = btc_price.minus(min_increment);
let eth_needed = btc_min_amount.div(eth_btc); |
console.log(`arbitrageValue: ${arbitrageValue}`);
sufficentBalances(btc_needed, eth_needed, usd_needed).then(result => {
if (!result) {
console.log('Insufficient Balances');
return
}
// alert me.
process.stdout.write("\x07");
// buy eth with bitcoin
orderMessageWithREST('buy', 'ETH-BTC', amount.toFixed(8), buy_price.toFixed(5));
console.log('buy ' + amount + ' eth @ ' + buy_price);
// sell eth for usd
orderMessageWithREST('sell', 'ETH-USD', amount.toFixed(8), sell_price.toFixed(2));
console.log('sell 0.01 eth for this many usd: ' + sell_price.toFixed(2));
// buy bitcoin with usd
orderMessageWithREST('buy', 'BTC-USD', `${btc_min_amount.toFixed(2)}`, buy_price2.toFixed(2));
console.log('buy bitcoin with usd: ' + buy_price2.toFixed(2));
executedArbitrage = true
});
executedArbitrage = true
}
function processArbitrage2(arbitrageValue: BigJS, eth_price: BigJS, eth_price2: BigJS, btc_price: BigJS, eth_btc: BigJS) {
// arbitrageValue = eth_price2 - eth_price
let arbitrageLimit = Big(-1);
let min_increment = Big(0.01);
if (arbitrageValue.gte(zero)) {return}
if (eth_btc.isZero() || eth_btc.lt(zero)) {return}
if (arbitrageValue.gt(arbitrageLimit)) {return}
// if (executedArbitrage === true) {return}
// i need 0.01 BTC
// I need 0.01 / btc_eth exchange rate
// I need 0.01 * btc_usd exchange rate
let buy_price = eth_btc;
let btc_min_amount = Big(0.01);
let amount = btc_min_amount.div(eth_btc); //0.01 / 0.04472 = 0.22361359
let sell_price = eth_price2.minus(min_increment);
let buy_price2 = btc_price.plus(min_increment);
let eth_needed = amount;
let btc_needed = eth_btc.times(amount);
let usd_needed = btc_min_amount.times(btc_price.minus(min_increment));
console.log(`arbitrageValue: ${arbitrageValue}`);
sufficentBalances(btc_needed, eth_needed, usd_needed).then(result => {
if (!result) {
console.log('😩 Insufficient Balance!');
return
}
// alert me.
process.stdout.write("\x07");
// sell eth for bitcoin
orderMessageWithREST('sell', 'ETH-BTC', amount.toFixed(8), buy_price.toFixed(5));
console.log('sell ' + amount.toFixed(8) + ' eth @ ' + buy_price.toFixed(5));
// buy eth using usd
orderMessageWithREST('buy', 'ETH-USD', amount.toFixed(8), sell_price.toFixed(2));
console.log('buy 0.01 eth for this many usd: ' + sell_price.toFixed(2));
// sell bitcoin for usd
orderMessageWithREST('sell', 'BTC-USD', `${btc_min_amount}`, buy_price2.toFixed(2));
console.log('sell bitcoin with usd: ' + buy_price2.toFixed(2));
executedArbitrage = true
});
executedArbitrage = true
}
function sufficentBalances(btc_needed: BigJS, eth_needed: BigJS, usd_needed: BigJS ): Promise<boolean | void> {
// I need at least eth_usd_sell of ether to sell
// I need at least eth_btc_buy of btc to buy ether
// I need at least btc_usd_buy of usd to buy btc
console.log(`btc_needed: ${btc_needed}, eth_needed: ${eth_needed}, usd_needed: ${usd_needed}`);
return gdaxAPI.loadBalances().then((balances: Balances) => {
let result: boolean;
for (const profile in balances) {
const eth_bal: AvailableBalance = balances[profile]['ETH'];
const btc_bal: AvailableBalance = balances[profile]['BTC'];
const usd_bal: AvailableBalance = balances[profile]['USD'];
const eth_avail = Big(eth_bal.available);
const btc_avail = Big(btc_bal.available);
const usd_avail = Big(usd_bal.available);
console.log(`Enough Ethereum? ${eth_avail.gte(eth_needed)} Enough Bitcoin? ${btc_avail.gte(btc_needed)} Enough USD? ${usd_avail.gte(usd_needed)}`);
result = eth_avail.gte(eth_needed) && btc_avail.gte(btc_needed) && usd_avail.gte(usd_needed);
for (const cur in balances[profile]) {
const bal: AvailableBalance = balances[profile][cur];
console.log(`${cur}: Balance = ${bal.balance.toFixed(6)}, Available = ${bal.available.toFixed(6)}`);
}
}
return Promise.resolve(result);
}).catch(logError);
}
function processOrderResult(result: LiveOrder, order: PlaceOrderMessage) {
if (result.status === 'rejected') {
console.log('order failed - placing market message.');
orderMarketMessage(order.side, order.productId, order.size, order.price)
}
}
function logError(err: any): void {
console.log(err);
console.log(err.message, err.response ? `${err.response.status}: ${err.response.body.message}` : '');
}
function getRandomInt(min: number, max: number) {
return Math.floor(Math.random() * (max - min + 1)) + min;
} | let btc_needed = eth_btc.times(amount);
let usd_needed = btc_min_amount.times(btc_price.minus(min_increment)); | random_line_split |
btc-arbitrage.ts |
import * as GTT from 'gdax-trading-toolkit';
import { GDAXFeed } from "gdax-trading-toolkit/build/src/exchanges";
import { StreamMessage, TradeMessage } from "gdax-trading-toolkit/build/src/core";
import { LiveOrder } from "gdax-trading-toolkit/build/src/lib";
import { PlaceOrderMessage } from 'gdax-trading-toolkit/build/src/core';
import {AvailableBalance, Balances} from "./gdax-tt/src/exchanges/AuthenticatedExchangeAPI";
import { Big, BigJS } from "gdax-trading-toolkit/build/src/lib/types";
const products = ['BTC-USD', 'ETH-USD', 'ETH-BTC', 'LTC-USD', 'LTC-BTC'];
// I do this so that I can change the order of the products without consequence.
const LTC_BTC_i = products.indexOf('LTC-BTC');
const ETH_BTC_i = products.indexOf('ETH-BTC');
const LTC_USD_i = products.indexOf('LTC-USD');
const ETH_USD_i = products.indexOf('ETH-USD');
const BTC_USD_i = products.indexOf('BTC-USD');
// Create a single logger instance to pass around
const logger = GTT.utils.ConsoleLoggerFactory();
const padfloat = GTT.utils.padfloat;
const gdaxAPI = GTT.Factories.GDAX.DefaultAPI(logger);
const diff: Array<BigJS> = [Big(-100000), Big(-100000)];
let ready = [false, false]; // track if we've fired the loop for each exchange.
let latest_eth_btc_conversion = Big(0);
const latest = new Array<BigJS>(products.length);
const zero = Big(0);
GTT.Factories.GDAX.FeedFactory(logger, products).then((feed: GDAXFeed) => {
// Configure all message streams to use the same websocket feed
// Create the source message streams by creating a MessageStream for each product, using the same WS feed for each
const streams = products.map((product) => new GTT.Core.ProductFilter({ logger: logger, productId: product }));
const outStream = new Array(products.length);
for (let i = 0; i < products.length; i++) {
outStream[i] = feed.pipe(streams[i]);
latest[i] = Big(-100000);
}
for (let i = 0; i < latest.length; i++) {
outStream[i].on('data', (msg: StreamMessage) => {
if (msg.type === 'trade') {
mangeTradeMessage(i, msg as TradeMessage);
}
if(msg.type !== 'level' && msg.type !== 'myOrderPlaced' && msg.type !== 'tradeExecuted' && msg.type !== 'tradeFinalized' && msg.type !== 'trade' && msg.type !== 'snapshot' && msg.type !== 'ticker' && msg.type !== 'unknown'){
console.log('Message Type: ' + msg.type)
}
if (msg.type === 'tradeExecuted') {//|| msg.type === 'tradeFinalized'){
console.log('Message Type: ' + msg.type)
process.stdout.write("\x07");
}
});
}
});
function mangeTradeMessage(index: number, msg: TradeMessage) {
let price = Big(msg.price);
// make the two exchange rates (ETH-BTC, LTC-BTC)relative to BTC-USD
if (products[index] === 'ETH-BTC' || products[index] === 'LTC-BTC'){
if (products[index] === 'ETH-BTC' ) {
latest_eth_btc_conversion = price;
}
let btc_usd = latest[BTC_USD_i];
if (btc_usd.gt(zero)) {
latest[index] = price.times(btc_usd);
}
// once this next line has run once you are all necessary infomation is in place to trade for the respective currency.
products[index] === 'ETH-BTC' ? ready[0] = true : ready[1] = true;
} else {
latest[index] = price;
}
// calculate the diff in prices between buying currency x in BTC vs USD
diff[0] = latest[ETH_USD_i].minus(latest[ETH_BTC_i]);
diff[1] = latest[LTC_USD_i].minus( latest[LTC_BTC_i]);
// if were not ready yet dont try to process the arbitrage.
if (!ready[0] || latest[ETH_BTC_i].lt(zero) || latest[ETH_USD_i].lt(zero) || latest[BTC_USD_i].lt(zero)) {
return
}
printUnderline(latest, index);
printLatestPrices(diff);
let randNumber = getRandomInt(0, 200);
if(randNumber != 10) {
// console.log(`skip order: ${randNumber}`);
return
}
process.stdout.write('🎯');
let eth_diff = diff[0];
processArbitrage(eth_diff, latest[ETH_BTC_i], latest[ETH_USD_i], latest[BTC_USD_i], latest_eth_btc_conversion);
processArbitrage2(eth_diff, latest[ETH_BTC_i], latest[ETH_USD_i], latest[BTC_USD_i], latest_eth_btc_conversion);
}
function printLatestPrices(prices: BigJS[]) {
// const cur = ['BTC-USD', 'ETH-BTC', 'LTC-BTC'];
const diffLabels = ['ETH', 'LTC'];
const pstr = diffLabels.map((c, i) => `${c} ${padfloat(prices[i], 6, 6)}`);
const msg = pstr.join(' | ');
// process.stdout.write(msg + ' | ');
console.log(' | ' + msg);
}
function printUnderline(prices: BigJS[], changeIndex: number) {
const pstr = products.map((c, i) => `${c}${i === changeIndex ? "\x1b[36m" : ""} ${padfloat(prices[i], 6, 6)} ${i === changeIndex ? "\x1b[0m" : ""}`);
const msg = pstr.join(' | ');
process.stdout.write(msg)
}
function ord | de: string, product: string, amount: string, price: string) {
const [base, quote] = product.split('-');
console.log(side + ' ' + base + ' ' + amount + ' ' + product + '@ ' + price);
const order: PlaceOrderMessage = {
time: new Date(),
type: 'placeOrder',
productId: product,
size: amount,
price: price,
side: side,
orderType: 'limit',
postOnly: true
};
gdaxAPI.placeOrder(order).then((result: LiveOrder) => {
console.log('Order executed', `Order to ${order.side} ${amount} ${base} for ${price} ${quote} placed. Result: ${result.status}`);
processOrderResult(result, order)
}).catch(logError);
return order;
}
function orderMarketMessage(side: string, product: string, amount: string, price: string) {
console.log(side + ' ' + amount + ' ' + product + '@ ' + price + ' - ' + amount);
const [base, quote] = product.split('-');
const order: PlaceOrderMessage = {
type: 'order',
time: new Date(),
productId: product,
orderType: 'market',
side: side,
size: amount
};
gdaxAPI.placeOrder(order).then((result: LiveOrder) => {
console.log('Market order executed ', `Order to ${order.side} ${amount} ${base} placed. Result: ${result.status}`);
});
return order;
}
let executedArbitrage = false;
function processArbitrage(arbitrageValue: BigJS, eth_price: BigJS, eth_price2: BigJS, btc_price: BigJS, eth_btc: BigJS) {
// arbitrageValue = eth_price2 - eth_price
let arbitrageLimit = Big(1);
let min_increment = Big(0.01);
if (arbitrageValue.lte(zero)) {return}
if (eth_btc.isZero() || eth_btc.lte(zero)) {return}
if (arbitrageValue.lt(arbitrageLimit)) {return}
// if (executedArbitrage === true) {return}
let buy_price = eth_btc;
let btc_min_amount = Big(0.01);
let amount = btc_min_amount.div(eth_btc); //0.01 / 0.04472 = 0.22361359
let sell_price = eth_price2.plus(min_increment);
let buy_price2 = btc_price.minus(min_increment);
let eth_needed = btc_min_amount.div(eth_btc);
let btc_needed = eth_btc.times(amount);
let usd_needed = btc_min_amount.times(btc_price.minus(min_increment));
console.log(`arbitrageValue: ${arbitrageValue}`);
sufficentBalances(btc_needed, eth_needed, usd_needed).then(result => {
if (!result) {
console.log('Insufficient Balances');
return
}
// alert me.
process.stdout.write("\x07");
// buy eth with bitcoin
orderMessageWithREST('buy', 'ETH-BTC', amount.toFixed(8), buy_price.toFixed(5));
console.log('buy ' + amount + ' eth @ ' + buy_price);
// sell eth for usd
orderMessageWithREST('sell', 'ETH-USD', amount.toFixed(8), sell_price.toFixed(2));
console.log('sell 0.01 eth for this many usd: ' + sell_price.toFixed(2));
// buy bitcoin with usd
orderMessageWithREST('buy', 'BTC-USD', `${btc_min_amount.toFixed(2)}`, buy_price2.toFixed(2));
console.log('buy bitcoin with usd: ' + buy_price2.toFixed(2));
executedArbitrage = true
});
executedArbitrage = true
}
function processArbitrage2(arbitrageValue: BigJS, eth_price: BigJS, eth_price2: BigJS, btc_price: BigJS, eth_btc: BigJS) {
// arbitrageValue = eth_price2 - eth_price
let arbitrageLimit = Big(-1);
let min_increment = Big(0.01);
if (arbitrageValue.gte(zero)) {return}
if (eth_btc.isZero() || eth_btc.lt(zero)) {return}
if (arbitrageValue.gt(arbitrageLimit)) {return}
// if (executedArbitrage === true) {return}
// i need 0.01 BTC
// I need 0.01 / btc_eth exchange rate
// I need 0.01 * btc_usd exchange rate
let buy_price = eth_btc;
let btc_min_amount = Big(0.01);
let amount = btc_min_amount.div(eth_btc); //0.01 / 0.04472 = 0.22361359
let sell_price = eth_price2.minus(min_increment);
let buy_price2 = btc_price.plus(min_increment);
let eth_needed = amount;
let btc_needed = eth_btc.times(amount);
let usd_needed = btc_min_amount.times(btc_price.minus(min_increment));
console.log(`arbitrageValue: ${arbitrageValue}`);
sufficentBalances(btc_needed, eth_needed, usd_needed).then(result => {
if (!result) {
console.log('😩 Insufficient Balance!');
return
}
// alert me.
process.stdout.write("\x07");
// sell eth for bitcoin
orderMessageWithREST('sell', 'ETH-BTC', amount.toFixed(8), buy_price.toFixed(5));
console.log('sell ' + amount.toFixed(8) + ' eth @ ' + buy_price.toFixed(5));
// buy eth using usd
orderMessageWithREST('buy', 'ETH-USD', amount.toFixed(8), sell_price.toFixed(2));
console.log('buy 0.01 eth for this many usd: ' + sell_price.toFixed(2));
// sell bitcoin for usd
orderMessageWithREST('sell', 'BTC-USD', `${btc_min_amount}`, buy_price2.toFixed(2));
console.log('sell bitcoin with usd: ' + buy_price2.toFixed(2));
executedArbitrage = true
});
executedArbitrage = true
}
function sufficentBalances(btc_needed: BigJS, eth_needed: BigJS, usd_needed: BigJS ): Promise<boolean | void> {
// I need at least eth_usd_sell of ether to sell
// I need at least eth_btc_buy of btc to buy ether
// I need at least btc_usd_buy of usd to buy btc
console.log(`btc_needed: ${btc_needed}, eth_needed: ${eth_needed}, usd_needed: ${usd_needed}`);
return gdaxAPI.loadBalances().then((balances: Balances) => {
let result: boolean;
for (const profile in balances) {
const eth_bal: AvailableBalance = balances[profile]['ETH'];
const btc_bal: AvailableBalance = balances[profile]['BTC'];
const usd_bal: AvailableBalance = balances[profile]['USD'];
const eth_avail = Big(eth_bal.available);
const btc_avail = Big(btc_bal.available);
const usd_avail = Big(usd_bal.available);
console.log(`Enough Ethereum? ${eth_avail.gte(eth_needed)} Enough Bitcoin? ${btc_avail.gte(btc_needed)} Enough USD? ${usd_avail.gte(usd_needed)}`);
result = eth_avail.gte(eth_needed) && btc_avail.gte(btc_needed) && usd_avail.gte(usd_needed);
for (const cur in balances[profile]) {
const bal: AvailableBalance = balances[profile][cur];
console.log(`${cur}: Balance = ${bal.balance.toFixed(6)}, Available = ${bal.available.toFixed(6)}`);
}
}
return Promise.resolve(result);
}).catch(logError);
}
function processOrderResult(result: LiveOrder, order: PlaceOrderMessage) {
if (result.status === 'rejected') {
console.log('order failed - placing market message.');
orderMarketMessage(order.side, order.productId, order.size, order.price)
}
}
function logError(err: any): void {
console.log(err);
console.log(err.message, err.response ? `${err.response.status}: ${err.response.body.message}` : '');
}
function getRandomInt(min: number, max: number) {
return Math.floor(Math.random() * (max - min + 1)) + min;
} | erMessageWithREST(si | identifier_name |
btc-arbitrage.ts |
import * as GTT from 'gdax-trading-toolkit';
import { GDAXFeed } from "gdax-trading-toolkit/build/src/exchanges";
import { StreamMessage, TradeMessage } from "gdax-trading-toolkit/build/src/core";
import { LiveOrder } from "gdax-trading-toolkit/build/src/lib";
import { PlaceOrderMessage } from 'gdax-trading-toolkit/build/src/core';
import {AvailableBalance, Balances} from "./gdax-tt/src/exchanges/AuthenticatedExchangeAPI";
import { Big, BigJS } from "gdax-trading-toolkit/build/src/lib/types";
const products = ['BTC-USD', 'ETH-USD', 'ETH-BTC', 'LTC-USD', 'LTC-BTC'];
// I do this so that I can change the order of the products without consequence.
const LTC_BTC_i = products.indexOf('LTC-BTC');
const ETH_BTC_i = products.indexOf('ETH-BTC');
const LTC_USD_i = products.indexOf('LTC-USD');
const ETH_USD_i = products.indexOf('ETH-USD');
const BTC_USD_i = products.indexOf('BTC-USD');
// Create a single logger instance to pass around
const logger = GTT.utils.ConsoleLoggerFactory();
const padfloat = GTT.utils.padfloat;
const gdaxAPI = GTT.Factories.GDAX.DefaultAPI(logger);
const diff: Array<BigJS> = [Big(-100000), Big(-100000)];
let ready = [false, false]; // track if we've fired the loop for each exchange.
let latest_eth_btc_conversion = Big(0);
const latest = new Array<BigJS>(products.length);
const zero = Big(0);
GTT.Factories.GDAX.FeedFactory(logger, products).then((feed: GDAXFeed) => {
// Configure all message streams to use the same websocket feed
// Create the source message streams by creating a MessageStream for each product, using the same WS feed for each
const streams = products.map((product) => new GTT.Core.ProductFilter({ logger: logger, productId: product }));
const outStream = new Array(products.length);
for (let i = 0; i < products.length; i++) {
outStream[i] = feed.pipe(streams[i]);
latest[i] = Big(-100000);
}
for (let i = 0; i < latest.length; i++) {
outStream[i].on('data', (msg: StreamMessage) => {
if (msg.type === 'trade') {
mangeTradeMessage(i, msg as TradeMessage);
}
if(msg.type !== 'level' && msg.type !== 'myOrderPlaced' && msg.type !== 'tradeExecuted' && msg.type !== 'tradeFinalized' && msg.type !== 'trade' && msg.type !== 'snapshot' && msg.type !== 'ticker' && msg.type !== 'unknown'){
console.log('Message Type: ' + msg.type)
}
if (msg.type === 'tradeExecuted') {//|| msg.type === 'tradeFinalized'){
console.log('Message Type: ' + msg.type)
process.stdout.write("\x07");
}
});
}
});
function mangeTradeMessage(index: number, msg: TradeMessage) {
let price = Big(msg.price);
// make the two exchange rates (ETH-BTC, LTC-BTC)relative to BTC-USD
if (products[index] === 'ETH-BTC' || products[index] === 'LTC-BTC'){
if (products[index] === 'ETH-BTC' ) {
latest_eth_btc_conversion = price;
}
let btc_usd = latest[BTC_USD_i];
if (btc_usd.gt(zero)) {
latest[index] = price.times(btc_usd);
}
// once this next line has run once you are all necessary infomation is in place to trade for the respective currency.
products[index] === 'ETH-BTC' ? ready[0] = true : ready[1] = true;
} else {
latest[index] = price;
}
// calculate the diff in prices between buying currency x in BTC vs USD
diff[0] = latest[ETH_USD_i].minus(latest[ETH_BTC_i]);
diff[1] = latest[LTC_USD_i].minus( latest[LTC_BTC_i]);
// if were not ready yet dont try to process the arbitrage.
if (!ready[0] || latest[ETH_BTC_i].lt(zero) || latest[ETH_USD_i].lt(zero) || latest[BTC_USD_i].lt(zero)) {
return
}
printUnderline(latest, index);
printLatestPrices(diff);
let randNumber = getRandomInt(0, 200);
if(randNumber != 10) {
// console.log(`skip order: ${randNumber}`);
return
}
process.stdout.write('🎯');
let eth_diff = diff[0];
processArbitrage(eth_diff, latest[ETH_BTC_i], latest[ETH_USD_i], latest[BTC_USD_i], latest_eth_btc_conversion);
processArbitrage2(eth_diff, latest[ETH_BTC_i], latest[ETH_USD_i], latest[BTC_USD_i], latest_eth_btc_conversion);
}
function printLatestPrices(prices: BigJS[]) {
// const cur = ['BTC-USD', 'ETH-BTC', 'LTC-BTC'];
const diffLabels = ['ETH', 'LTC'];
const pstr = diffLabels.map((c, i) => `${c} ${padfloat(prices[i], 6, 6)}`);
const msg = pstr.join(' | ');
// process.stdout.write(msg + ' | ');
console.log(' | ' + msg);
}
function printUnderline(prices: BigJS[], changeIndex: number) {
const pstr = products.map((c, i) => `${c}${i === changeIndex ? "\x1b[36m" : ""} ${padfloat(prices[i], 6, 6)} ${i === changeIndex ? "\x1b[0m" : ""}`);
const msg = pstr.join(' | ');
process.stdout.write(msg)
}
function orderMessageWithREST(side: string, product: string, amount: string, price: string) {
const [base, quote] = product.split('-');
console.log(side + ' ' + base + ' ' + amount + ' ' + product + '@ ' + price);
const order: PlaceOrderMessage = {
time: new Date(),
type: 'placeOrder',
productId: product,
size: amount,
price: price,
side: side,
orderType: 'limit',
postOnly: true
};
gdaxAPI.placeOrder(order).then((result: LiveOrder) => {
console.log('Order executed', `Order to ${order.side} ${amount} ${base} for ${price} ${quote} placed. Result: ${result.status}`);
processOrderResult(result, order)
}).catch(logError);
return order;
}
function orderMarketMessage(side: string, product: string, amount: string, price: string) {
console.log(side + ' ' + amount + ' ' + product + '@ ' + price + ' - ' + amount);
const [base, quote] = product.split('-');
const order: PlaceOrderMessage = {
type: 'order',
time: new Date(),
productId: product,
orderType: 'market',
side: side,
size: amount
};
gdaxAPI.placeOrder(order).then((result: LiveOrder) => {
console.log('Market order executed ', `Order to ${order.side} ${amount} ${base} placed. Result: ${result.status}`);
});
return order;
}
let executedArbitrage = false;
function processArbitrage(arbitrageValue: BigJS, eth_price: BigJS, eth_price2: BigJS, btc_price: BigJS, eth_btc: BigJS) {
// arbitrageValue = eth_price2 - eth_price
let arbitrageLimit = Big(1);
let min_increment = Big(0.01);
if (arbitrageValue.lte(zero)) {return}
if (eth_btc.isZero() || eth_btc.lte(zero)) {return}
if (arbitrageValue.lt(arbitrageLimit)) {return}
// if (executedArbitrage === true) {return}
let buy_price = eth_btc;
let btc_min_amount = Big(0.01);
let amount = btc_min_amount.div(eth_btc); //0.01 / 0.04472 = 0.22361359
let sell_price = eth_price2.plus(min_increment);
let buy_price2 = btc_price.minus(min_increment);
let eth_needed = btc_min_amount.div(eth_btc);
let btc_needed = eth_btc.times(amount);
let usd_needed = btc_min_amount.times(btc_price.minus(min_increment));
console.log(`arbitrageValue: ${arbitrageValue}`);
sufficentBalances(btc_needed, eth_needed, usd_needed).then(result => {
if (!result) {
console.log('Insufficient Balances');
return
}
// alert me.
process.stdout.write("\x07");
// buy eth with bitcoin
orderMessageWithREST('buy', 'ETH-BTC', amount.toFixed(8), buy_price.toFixed(5));
console.log('buy ' + amount + ' eth @ ' + buy_price);
// sell eth for usd
orderMessageWithREST('sell', 'ETH-USD', amount.toFixed(8), sell_price.toFixed(2));
console.log('sell 0.01 eth for this many usd: ' + sell_price.toFixed(2));
// buy bitcoin with usd
orderMessageWithREST('buy', 'BTC-USD', `${btc_min_amount.toFixed(2)}`, buy_price2.toFixed(2));
console.log('buy bitcoin with usd: ' + buy_price2.toFixed(2));
executedArbitrage = true
});
executedArbitrage = true
}
function processArbitrage2(arbitrageValue: BigJS, eth_price: BigJS, eth_price2: BigJS, btc_price: BigJS, eth_btc: BigJS) {
// arbitrageValue = eth_price2 - eth_price
let arbitrageLimit = Big(-1);
let min_increment = Big(0.01);
if (arbitrageValue.gte(zero)) {return}
if (eth_btc.isZero() || eth_btc.lt(zero)) {re | if (arbitrageValue.gt(arbitrageLimit)) {return}
// if (executedArbitrage === true) {return}
// i need 0.01 BTC
// I need 0.01 / btc_eth exchange rate
// I need 0.01 * btc_usd exchange rate
let buy_price = eth_btc;
let btc_min_amount = Big(0.01);
let amount = btc_min_amount.div(eth_btc); //0.01 / 0.04472 = 0.22361359
let sell_price = eth_price2.minus(min_increment);
let buy_price2 = btc_price.plus(min_increment);
let eth_needed = amount;
let btc_needed = eth_btc.times(amount);
let usd_needed = btc_min_amount.times(btc_price.minus(min_increment));
console.log(`arbitrageValue: ${arbitrageValue}`);
sufficentBalances(btc_needed, eth_needed, usd_needed).then(result => {
if (!result) {
console.log('😩 Insufficient Balance!');
return
}
// alert me.
process.stdout.write("\x07");
// sell eth for bitcoin
orderMessageWithREST('sell', 'ETH-BTC', amount.toFixed(8), buy_price.toFixed(5));
console.log('sell ' + amount.toFixed(8) + ' eth @ ' + buy_price.toFixed(5));
// buy eth using usd
orderMessageWithREST('buy', 'ETH-USD', amount.toFixed(8), sell_price.toFixed(2));
console.log('buy 0.01 eth for this many usd: ' + sell_price.toFixed(2));
// sell bitcoin for usd
orderMessageWithREST('sell', 'BTC-USD', `${btc_min_amount}`, buy_price2.toFixed(2));
console.log('sell bitcoin with usd: ' + buy_price2.toFixed(2));
executedArbitrage = true
});
executedArbitrage = true
}
function sufficentBalances(btc_needed: BigJS, eth_needed: BigJS, usd_needed: BigJS ): Promise<boolean | void> {
// I need at least eth_usd_sell of ether to sell
// I need at least eth_btc_buy of btc to buy ether
// I need at least btc_usd_buy of usd to buy btc
console.log(`btc_needed: ${btc_needed}, eth_needed: ${eth_needed}, usd_needed: ${usd_needed}`);
return gdaxAPI.loadBalances().then((balances: Balances) => {
let result: boolean;
for (const profile in balances) {
const eth_bal: AvailableBalance = balances[profile]['ETH'];
const btc_bal: AvailableBalance = balances[profile]['BTC'];
const usd_bal: AvailableBalance = balances[profile]['USD'];
const eth_avail = Big(eth_bal.available);
const btc_avail = Big(btc_bal.available);
const usd_avail = Big(usd_bal.available);
console.log(`Enough Ethereum? ${eth_avail.gte(eth_needed)} Enough Bitcoin? ${btc_avail.gte(btc_needed)} Enough USD? ${usd_avail.gte(usd_needed)}`);
result = eth_avail.gte(eth_needed) && btc_avail.gte(btc_needed) && usd_avail.gte(usd_needed);
for (const cur in balances[profile]) {
const bal: AvailableBalance = balances[profile][cur];
console.log(`${cur}: Balance = ${bal.balance.toFixed(6)}, Available = ${bal.available.toFixed(6)}`);
}
}
return Promise.resolve(result);
}).catch(logError);
}
function processOrderResult(result: LiveOrder, order: PlaceOrderMessage) {
if (result.status === 'rejected') {
console.log('order failed - placing market message.');
orderMarketMessage(order.side, order.productId, order.size, order.price)
}
}
function logError(err: any): void {
console.log(err);
console.log(err.message, err.response ? `${err.response.status}: ${err.response.body.message}` : '');
}
function getRandomInt(min: number, max: number) {
return Math.floor(Math.random() * (max - min + 1)) + min;
} | turn}
| conditional_block |
btc-arbitrage.ts |
import * as GTT from 'gdax-trading-toolkit';
import { GDAXFeed } from "gdax-trading-toolkit/build/src/exchanges";
import { StreamMessage, TradeMessage } from "gdax-trading-toolkit/build/src/core";
import { LiveOrder } from "gdax-trading-toolkit/build/src/lib";
import { PlaceOrderMessage } from 'gdax-trading-toolkit/build/src/core';
import {AvailableBalance, Balances} from "./gdax-tt/src/exchanges/AuthenticatedExchangeAPI";
import { Big, BigJS } from "gdax-trading-toolkit/build/src/lib/types";
const products = ['BTC-USD', 'ETH-USD', 'ETH-BTC', 'LTC-USD', 'LTC-BTC'];
// I do this so that I can change the order of the products without consequence.
const LTC_BTC_i = products.indexOf('LTC-BTC');
const ETH_BTC_i = products.indexOf('ETH-BTC');
const LTC_USD_i = products.indexOf('LTC-USD');
const ETH_USD_i = products.indexOf('ETH-USD');
const BTC_USD_i = products.indexOf('BTC-USD');
// Create a single logger instance to pass around
const logger = GTT.utils.ConsoleLoggerFactory();
const padfloat = GTT.utils.padfloat;
const gdaxAPI = GTT.Factories.GDAX.DefaultAPI(logger);
const diff: Array<BigJS> = [Big(-100000), Big(-100000)];
let ready = [false, false]; // track if we've fired the loop for each exchange.
let latest_eth_btc_conversion = Big(0);
const latest = new Array<BigJS>(products.length);
const zero = Big(0);
GTT.Factories.GDAX.FeedFactory(logger, products).then((feed: GDAXFeed) => {
// Configure all message streams to use the same websocket feed
// Create the source message streams by creating a MessageStream for each product, using the same WS feed for each
const streams = products.map((product) => new GTT.Core.ProductFilter({ logger: logger, productId: product }));
const outStream = new Array(products.length);
for (let i = 0; i < products.length; i++) {
outStream[i] = feed.pipe(streams[i]);
latest[i] = Big(-100000);
}
for (let i = 0; i < latest.length; i++) {
outStream[i].on('data', (msg: StreamMessage) => {
if (msg.type === 'trade') {
mangeTradeMessage(i, msg as TradeMessage);
}
if(msg.type !== 'level' && msg.type !== 'myOrderPlaced' && msg.type !== 'tradeExecuted' && msg.type !== 'tradeFinalized' && msg.type !== 'trade' && msg.type !== 'snapshot' && msg.type !== 'ticker' && msg.type !== 'unknown'){
console.log('Message Type: ' + msg.type)
}
if (msg.type === 'tradeExecuted') {//|| msg.type === 'tradeFinalized'){
console.log('Message Type: ' + msg.type)
process.stdout.write("\x07");
}
});
}
});
function mangeTradeMessage(index: number, msg: TradeMessage) {
let price = Big(msg.price);
// make the two exchange rates (ETH-BTC, LTC-BTC)relative to BTC-USD
if (products[index] === 'ETH-BTC' || products[index] === 'LTC-BTC'){
if (products[index] === 'ETH-BTC' ) {
latest_eth_btc_conversion = price;
}
let btc_usd = latest[BTC_USD_i];
if (btc_usd.gt(zero)) {
latest[index] = price.times(btc_usd);
}
// once this next line has run once you are all necessary infomation is in place to trade for the respective currency.
products[index] === 'ETH-BTC' ? ready[0] = true : ready[1] = true;
} else {
latest[index] = price;
}
// calculate the diff in prices between buying currency x in BTC vs USD
diff[0] = latest[ETH_USD_i].minus(latest[ETH_BTC_i]);
diff[1] = latest[LTC_USD_i].minus( latest[LTC_BTC_i]);
// if were not ready yet dont try to process the arbitrage.
if (!ready[0] || latest[ETH_BTC_i].lt(zero) || latest[ETH_USD_i].lt(zero) || latest[BTC_USD_i].lt(zero)) {
return
}
printUnderline(latest, index);
printLatestPrices(diff);
let randNumber = getRandomInt(0, 200);
if(randNumber != 10) {
// console.log(`skip order: ${randNumber}`);
return
}
process.stdout.write('🎯');
let eth_diff = diff[0];
processArbitrage(eth_diff, latest[ETH_BTC_i], latest[ETH_USD_i], latest[BTC_USD_i], latest_eth_btc_conversion);
processArbitrage2(eth_diff, latest[ETH_BTC_i], latest[ETH_USD_i], latest[BTC_USD_i], latest_eth_btc_conversion);
}
function printLatestPrices(prices: BigJS[]) {
// const cur = ['BTC-USD', 'ETH-BTC', 'LTC-BTC'];
const diffLabels = ['ETH', 'LTC'];
const pstr = diffLabels.map((c, i) => `${c} ${padfloat(prices[i], 6, 6)}`);
const msg = pstr.join(' | ');
// process.stdout.write(msg + ' | ');
console.log(' | ' + msg);
}
function printUnderline(prices: BigJS[], changeIndex: number) {
const pstr = products.map((c, i) => `${c}${i === changeIndex ? "\x1b[36m" : ""} ${padfloat(prices[i], 6, 6)} ${i === changeIndex ? "\x1b[0m" : ""}`);
const msg = pstr.join(' | ');
process.stdout.write(msg)
}
function orderMessageWithREST(side: string, product: string, amount: string, price: string) {
const [base, quote] = product.split('-');
console.log(side + ' ' + base + ' ' + amount + ' ' + product + '@ ' + price);
const order: PlaceOrderMessage = {
time: new Date(),
type: 'placeOrder',
productId: product,
size: amount,
price: price,
side: side,
orderType: 'limit',
postOnly: true
};
gdaxAPI.placeOrder(order).then((result: LiveOrder) => {
console.log('Order executed', `Order to ${order.side} ${amount} ${base} for ${price} ${quote} placed. Result: ${result.status}`);
processOrderResult(result, order)
}).catch(logError);
return order;
}
function orderMarketMessage(side: string, product: string, amount: string, price: string) {
console.log(side + ' ' + amount + ' ' + product + '@ ' + price + ' - ' + amount);
const [base, quote] = product.split('-');
const order: PlaceOrderMessage = {
type: 'order',
time: new Date(),
productId: product,
orderType: 'market',
side: side,
size: amount
};
gdaxAPI.placeOrder(order).then((result: LiveOrder) => {
console.log('Market order executed ', `Order to ${order.side} ${amount} ${base} placed. Result: ${result.status}`);
});
return order;
}
let executedArbitrage = false;
function processArbitrage(arbitrageValue: BigJS, eth_price: BigJS, eth_price2: BigJS, btc_price: BigJS, eth_btc: BigJS) {
// arbitrageValue = eth_price2 - eth_price
let arbitrageLimit = Big(1);
let min_increment = Big(0.01);
if (arbitrageValue.lte(zero)) {return}
if (eth_btc.isZero() || eth_btc.lte(zero)) {return}
if (arbitrageValue.lt(arbitrageLimit)) {return}
// if (executedArbitrage === true) {return}
let buy_price = eth_btc;
let btc_min_amount = Big(0.01);
let amount = btc_min_amount.div(eth_btc); //0.01 / 0.04472 = 0.22361359
let sell_price = eth_price2.plus(min_increment);
let buy_price2 = btc_price.minus(min_increment);
let eth_needed = btc_min_amount.div(eth_btc);
let btc_needed = eth_btc.times(amount);
let usd_needed = btc_min_amount.times(btc_price.minus(min_increment));
console.log(`arbitrageValue: ${arbitrageValue}`);
sufficentBalances(btc_needed, eth_needed, usd_needed).then(result => {
if (!result) {
console.log('Insufficient Balances');
return
}
// alert me.
process.stdout.write("\x07");
// buy eth with bitcoin
orderMessageWithREST('buy', 'ETH-BTC', amount.toFixed(8), buy_price.toFixed(5));
console.log('buy ' + amount + ' eth @ ' + buy_price);
// sell eth for usd
orderMessageWithREST('sell', 'ETH-USD', amount.toFixed(8), sell_price.toFixed(2));
console.log('sell 0.01 eth for this many usd: ' + sell_price.toFixed(2));
// buy bitcoin with usd
orderMessageWithREST('buy', 'BTC-USD', `${btc_min_amount.toFixed(2)}`, buy_price2.toFixed(2));
console.log('buy bitcoin with usd: ' + buy_price2.toFixed(2));
executedArbitrage = true
});
executedArbitrage = true
}
function processArbitrage2(arbitrageValue: BigJS, eth_price: BigJS, eth_price2: BigJS, btc_price: BigJS, eth_btc: BigJS) {
| tion sufficentBalances(btc_needed: BigJS, eth_needed: BigJS, usd_needed: BigJS ): Promise<boolean | void> {
// I need at least eth_usd_sell of ether to sell
// I need at least eth_btc_buy of btc to buy ether
// I need at least btc_usd_buy of usd to buy btc
console.log(`btc_needed: ${btc_needed}, eth_needed: ${eth_needed}, usd_needed: ${usd_needed}`);
return gdaxAPI.loadBalances().then((balances: Balances) => {
let result: boolean;
for (const profile in balances) {
const eth_bal: AvailableBalance = balances[profile]['ETH'];
const btc_bal: AvailableBalance = balances[profile]['BTC'];
const usd_bal: AvailableBalance = balances[profile]['USD'];
const eth_avail = Big(eth_bal.available);
const btc_avail = Big(btc_bal.available);
const usd_avail = Big(usd_bal.available);
console.log(`Enough Ethereum? ${eth_avail.gte(eth_needed)} Enough Bitcoin? ${btc_avail.gte(btc_needed)} Enough USD? ${usd_avail.gte(usd_needed)}`);
result = eth_avail.gte(eth_needed) && btc_avail.gte(btc_needed) && usd_avail.gte(usd_needed);
for (const cur in balances[profile]) {
const bal: AvailableBalance = balances[profile][cur];
console.log(`${cur}: Balance = ${bal.balance.toFixed(6)}, Available = ${bal.available.toFixed(6)}`);
}
}
return Promise.resolve(result);
}).catch(logError);
}
function processOrderResult(result: LiveOrder, order: PlaceOrderMessage) {
if (result.status === 'rejected') {
console.log('order failed - placing market message.');
orderMarketMessage(order.side, order.productId, order.size, order.price)
}
}
function logError(err: any): void {
console.log(err);
console.log(err.message, err.response ? `${err.response.status}: ${err.response.body.message}` : '');
}
function getRandomInt(min: number, max: number) {
return Math.floor(Math.random() * (max - min + 1)) + min;
} | // arbitrageValue = eth_price2 - eth_price
let arbitrageLimit = Big(-1);
let min_increment = Big(0.01);
if (arbitrageValue.gte(zero)) {return}
if (eth_btc.isZero() || eth_btc.lt(zero)) {return}
if (arbitrageValue.gt(arbitrageLimit)) {return}
// if (executedArbitrage === true) {return}
// i need 0.01 BTC
// I need 0.01 / btc_eth exchange rate
// I need 0.01 * btc_usd exchange rate
let buy_price = eth_btc;
let btc_min_amount = Big(0.01);
let amount = btc_min_amount.div(eth_btc); //0.01 / 0.04472 = 0.22361359
let sell_price = eth_price2.minus(min_increment);
let buy_price2 = btc_price.plus(min_increment);
let eth_needed = amount;
let btc_needed = eth_btc.times(amount);
let usd_needed = btc_min_amount.times(btc_price.minus(min_increment));
console.log(`arbitrageValue: ${arbitrageValue}`);
sufficentBalances(btc_needed, eth_needed, usd_needed).then(result => {
if (!result) {
console.log('😩 Insufficient Balance!');
return
}
// alert me.
process.stdout.write("\x07");
// sell eth for bitcoin
orderMessageWithREST('sell', 'ETH-BTC', amount.toFixed(8), buy_price.toFixed(5));
console.log('sell ' + amount.toFixed(8) + ' eth @ ' + buy_price.toFixed(5));
// buy eth using usd
orderMessageWithREST('buy', 'ETH-USD', amount.toFixed(8), sell_price.toFixed(2));
console.log('buy 0.01 eth for this many usd: ' + sell_price.toFixed(2));
// sell bitcoin for usd
orderMessageWithREST('sell', 'BTC-USD', `${btc_min_amount}`, buy_price2.toFixed(2));
console.log('sell bitcoin with usd: ' + buy_price2.toFixed(2));
executedArbitrage = true
});
executedArbitrage = true
}
func | identifier_body |
asset.rs | // RGB20 Library: fungible digital assets for bitcoin & lightning
// Written in 2020-2021 by
// Dr. Maxim Orlovsky <orlovsky@pandoracore.com>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
//! Data structures and APIs for working with RGB20 assets
use chrono::{DateTime, NaiveDateTime, Utc};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "serde")]
use serde_with::{As, DisplayFromStr};
use std::collections::BTreeMap;
use std::convert::{TryFrom, TryInto};
use amplify::Wrapper;
use bitcoin::{OutPoint, Txid};
use lnpbp::chain::Chain;
use rgb::prelude::*;
use rgb::seal::WitnessVoutError;
use super::schema::{self, FieldType, OwnedRightType, TransitionType};
use crate::{
BurnReplace, Epoch, FractionalAmount, Issue, Nomination, PreciseAmount,
Renomination, Supply, SupplyMeasure,
};
/// Errors generated during RGB20 asset information parsing from the underlying
/// genesis or consignment data
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Display, From, Error)]
#[display(doc_comments)]
pub enum Error {
/// can't read asset data, since the provided information does not satisfy
/// schema requirements
UnsatisfiedSchemaRequirement,
/// genesis schema id does not match any of RGB20 schemata
WrongSchemaId,
/// genesis defines a seal referencing witness transaction while there
/// can't be a witness transaction for genesis
#[from(WitnessVoutError)]
GenesisSeal,
/// epoch seal definition for node {0} contains confidential data
EpochSealConfidential(NodeId),
/// nurn & replace seal definition for node {0} contains confidential data
BurnSealConfidential(NodeId),
/// inflation assignment (seal or state) for node {0} contains confidential
/// data
InflationAssignmentConfidential(NodeId),
/// Internal data inconsistency, as returned by the [`rgb::GraphAPI`]
/// methods
#[display(inner)]
#[from]
Inconsistency(rgb::ConsistencyError),
/// not of all epochs referenced in burn or burn & replace operation
/// history are known from the consignment
NotAllEpochsExposed,
}
/// Detailed RGB20 asset information
///
/// Structure presents complete set of RGB20 asset-related data which can be
/// extracted from the genesis or a consignment. It is not the source of the
/// truth, and the presence of the data in the structure does not imply their
/// validity, since the structure constructor does not validates blockchain or
/// LN-based transaction commitments or satisfaction of schema requirements.
///
/// The main reason of the structure is:
/// 1) to persist *cached* copy of the asset data without the requirement to
/// parse all stash transition each time in order to extract allocation
/// information;
/// 2) to present data from asset genesis or consignment for UI in convenient
/// form.
/// 3) to orchestrate generation of new state transitions taking into account
/// known asset information.
///
/// (1) is important for wallets, (2) is for more generic software, like
/// client-side-validated data explorers, developer & debugging tools etc and
/// (3) for asset-management software.
///
/// In both (2) and (3) case there is no need to persist the structure; genesis
/// /consignment should be persisted instead and the structure must be
/// reconstructed each time from that data upon the launch
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate", rename_all = "camelCase")
)]
#[derive(
Clone, Getters, PartialEq, Debug, Display, StrictEncode, StrictDecode,
)]
#[display("{genesis_nomination} ({id})")]
pub struct Asset {
/// Bech32-representation of the asset genesis
genesis: String,
/// Asset ID, which is equal to Contract ID and genesis ID
///
/// It can be used as a unique primary kep
id: ContractId,
/// Chain with which the asset is issued
#[cfg_attr(feature = "serde", serde(with = "As::<DisplayFromStr>"))]
chain: Chain,
/// Asset creation data
date: DateTime<Utc>,
/// Names assigned to the asset at the issue time
///
/// Nomination is a set of asset metadata assigned by the issuer, which
/// define core asset properties: ticker, name, decimal precision, contract
/// text.
#[cfg_attr(feature = "serde", serde(flatten))]
genesis_nomination: Nomination,
/// List of all known renominations.
///
/// This list does not include genesis nomination, which can be accessed
/// via [`Asset::genesis_nomination`]. The last item in the list contains
/// [`Asset::last_nomination`] data as a part of its renomination operation
/// details.
known_renominations: Vec<Renomination>,
/// All issues known from the available data (stash and/or provided
/// consignments)
///
/// Primary issue is always the first one; the rest are provided in
/// arbitrary order
known_issues: Vec<Issue>,
/// Single-use-seal controlling the beginning of the first epoch
epoch_opening_seal: Option<OutPoint>,
/// Burn & replacement epochs, organized according to the witness txid.
///
/// Witness transaction must be mined for the epoch to be real.
/// One of the inputs of this transaction MUST spend UTXO defined as a
/// seal closed by this epoch ([`Epoch::closes`])
epochs: Vec<Epoch>,
/// Detailed information about the asset supply (aggregated from the issue
/// and burning information kept inside the epochs data)
#[cfg_attr(feature = "serde", serde(flatten))]
supply: Supply,
/// Specifies outpoints controlling certain amounts of assets.
///
/// NB: Information here does not imply that the outputs are owned by the
/// current user or the owning transactions are mined/exist; this must be
/// determined by the wallet and depends on specific medium (blockchain,
/// LN)
known_allocations: Vec<Allocation>,
}
impl Asset {
/// Current asset ticker
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn ticker(&self) -> &str {
&self.active_nomination().ticker()
}
/// Current asset name
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn name(&self) -> &str {
&self.active_nomination().ticker()
}
/// Current version of the asset contract, represented in Ricardian form
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn ricardian_contract(&self) -> &str {
&self.active_nomination().ticker()
}
/// Current decimal precision of the asset value
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn decimal_precision(&self) -> u8 {
*self.active_nomination().decimal_precision()
}
/// Returns information (in atomic value units) about specific measure of
/// the asset supply, if known, or `None` otherwise
pub fn precise_supply(
&self,
measure: SupplyMeasure,
) -> Option<AtomicValue> {
Some(match measure {
SupplyMeasure::KnownCirculating => *self.supply.known_circulating(),
SupplyMeasure::TotalCirculating => {
match self.supply.total_circulating() {
None => return None,
Some(supply) => supply,
}
}
SupplyMeasure::IssueLimit => *self.supply.issue_limit(),
})
}
/// Returns information in form of a float number about specific measure of
/// the asset supply, if known, or [`f64::NAN`] otherwise
pub fn fractional_supply(
&self,
measure: SupplyMeasure,
) -> FractionalAmount {
let value = match self.precise_supply(measure) {
None => return FractionalAmount::NAN,
Some(supply) => supply,
};
PreciseAmount::transmutate_into(value, self.decimal_precision())
}
/// Nomination resulting from the last known renomination
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn last_nomination(&self) -> Option<&Nomination> {
self.known_renominations.last().map(|o| o.nomination())
}
/// Active nomination data.
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn active_nomination(&self) -> &Nomination {
self.last_nomination().unwrap_or(&self.genesis_nomination)
}
/// Returns sum of all known allocations, in atomic value units
#[inline]
pub fn known_value(&self) -> AtomicValue {
self.known_allocations.iter().map(Allocation::value).sum()
}
/// Returns sum of known allocation after applying `filter` function. Useful
/// for filtering UTXOs owned by the current wallet. The returned value is
/// in atomic units (see [`AtomicValue`]
pub fn known_filtered_value<F>(&self, filter: F) -> AtomicValue
where
F: Fn(&Allocation) -> bool,
{
self.known_allocations
.iter()
.filter(|allocation| filter(*allocation))
.map(Allocation::value)
.sum()
}
/// Returns sum of all known allocations, as a floating point value (see
/// [`FractionalAmount`])
pub fn known_amount(&self) -> FractionalAmount {
self.known_allocations
.iter()
.map(Allocation::value)
.map(|atomic| {
PreciseAmount::transmutate_into(
atomic,
self.decimal_precision(),
)
})
.sum()
}
/// Returns sum of known allocation after applying `filter` function. Useful
/// for filtering UTXOs owned by the current wallet. The returned amount is
/// a floating point number (see [`FractionalAmount`])
pub fn known_filtered_amount<F>(&self, filter: F) -> FractionalAmount
where
F: Fn(&Allocation) -> bool,
{
self.known_allocations
.iter()
.filter(|allocation| filter(*allocation))
.map(Allocation::value)
.map(|atomic| {
PreciseAmount::transmutate_into(
atomic,
self.decimal_precision(),
)
})
.sum()
}
/// Returns outpoints which when spent may indicate inflation happening
/// up to specific amount.
///
/// NB: Not of all inflation controlling points may be known
pub fn known_inflation(
&self,
) -> BTreeMap<OutPoint, (AtomicValue, Vec<u16>)> {
let mut inflation_list = BTreeMap::new();
for issue in self.known_issues() {
for (seal, data) in issue.inflation_assignments() {
inflation_list.insert(*seal, data.clone());
}
}
inflation_list
}
#[inline]
/// Lists all known allocations for the given bitcoin transaction
/// [`OutPoint`]
pub fn outpoint_allocations(&self, outpoint: OutPoint) -> Vec<Allocation> {
self.known_allocations
.iter()
.filter(|a| *a.outpoint() == outpoint)
.copied()
.collect()
}
/// Adds new allocation to the list of known allocations
pub fn add_allocation(
&mut self,
outpoint: OutPoint,
node_id: NodeId,
index: u16,
value: value::Revealed,
) -> bool {
let new_allocation = Allocation::with(node_id, index, outpoint, value);
if !self.known_allocations.contains(&new_allocation) {
self.known_allocations.push(new_allocation);
true
} else {
false
}
}
/// Adds issue to the list of known issues. This is an internal function
/// which should not be used directly; instead construct the asset structure
/// from the [`Consignment`] using [`Asset::try_from`] method.
fn add_issue(
&mut self,
consignment: &Consignment,
transition: &Transition,
witness: Txid,
) -> Result<(), Error> {
let closed_seals = consignment.seals_closed_with(
transition.node_id(),
OwnedRightType::Inflation,
witness,
)?;
let issue = Issue::with(self.id, closed_seals, transition, witness)?;
self.known_issues.push(issue);
Ok(())
}
/// Adds an epoch to the list of known epochs. This is an internal function
/// which should not be used directly; instead construct the asset structure
/// from the [`Consignment`] using [`Asset::try_from`] method.
fn add_epoch(
&mut self,
consignment: &Consignment,
transition: &Transition,
no: usize,
operations: Vec<BurnReplace>,
witness: Txid,
) -> Result<(), Error> {
let id = transition.node_id();
// 1. It must correctly extend known state, i.e. close UTXO for a seal
// defined by a state transition already belonging to the asset
let closed_seal = consignment
.seals_closed_with(id, OwnedRightType::OpenEpoch, witness)?
.into_iter()
.next()
.ok_or(Error::Inconsistency(
rgb::ConsistencyError::NoSealsClosed(
OwnedRightType::OpenEpoch.into(),
id,
),
))?;
let epoch = Epoch::with(
self.id,
no,
closed_seal,
transition,
operations,
witness,
)?;
self.epochs.insert(no as usize, epoch);
Ok(())
}
}
impl TryFrom<Genesis> for Asset {
type Error = Error;
fn try_from(genesis: Genesis) -> Result<Self, Self::Error> {
if genesis.schema_id() != schema::schema().schema_id() {
Err(Error::WrongSchemaId)?;
}
let genesis_meta = genesis.metadata(); | let supply = *genesis_meta
.u64(FieldType::IssuedSupply)
.first()
.ok_or(Error::UnsatisfiedSchemaRequirement)?;
let mut issue_limit = 0;
// Check if issue limit can be known
for assignment in
genesis.owned_rights_by_type(OwnedRightType::Inflation.into())
{
for state in assignment.to_data_assignment_vec() {
match state {
Assignment::Revealed { assigned_state, .. }
| Assignment::ConfidentialSeal { assigned_state, .. } => {
if issue_limit < core::u64::MAX {
issue_limit += assigned_state
.u64()
.ok_or(Error::UnsatisfiedSchemaRequirement)?
};
}
_ => issue_limit = core::u64::MAX,
}
}
}
let epoch_opening_seal = genesis
.revealed_seals_by_type(OwnedRightType::OpenEpoch.into())
.map_err(|_| Error::EpochSealConfidential(genesis.node_id()))?
.first()
.copied()
.map(|seal| seal.try_into())
.transpose()?;
let issue = Issue::try_from(&genesis)?;
let node_id = NodeId::from_inner(genesis.contract_id().into_inner());
let mut known_allocations = Vec::<Allocation>::new();
for assignment in
genesis.owned_rights_by_type(OwnedRightType::Assets.into())
{
assignment
.to_value_assignment_vec()
.into_iter()
.enumerate()
.for_each(|(index, assign)| {
if let Assignment::Revealed {
seal_definition:
seal::Revealed::TxOutpoint(outpoint_reveal),
assigned_state,
} = assign
{
known_allocations.push(Allocation::with(
node_id,
index as u16,
outpoint_reveal.into(),
assigned_state,
))
}
});
}
Ok(Asset {
genesis: genesis.to_string(),
id: genesis.contract_id(),
chain: genesis.chain().clone(),
genesis_nomination: Nomination::try_from(&genesis)?,
supply: Supply::with(supply, None, issue_limit),
date: DateTime::from_utc(
NaiveDateTime::from_timestamp(
*genesis_meta
.i64(FieldType::Timestamp)
.first()
.ok_or(Error::UnsatisfiedSchemaRequirement)?,
0,
),
Utc,
),
known_renominations: empty!(),
known_issues: vec![issue],
// we assume that each genesis allocation with revealed amount
// and known seal (they are always revealed together) belongs to us
known_allocations,
epochs: empty!(),
epoch_opening_seal,
})
}
}
impl TryFrom<Consignment> for Asset {
type Error = Error;
fn try_from(consignment: Consignment) -> Result<Self, Self::Error> {
// 1. Parse genesis
let mut asset: Asset = consignment.genesis.clone().try_into()?;
// 2. Parse burn & replacement operations
let mut epoch_operations: BTreeMap<NodeId, Vec<BurnReplace>> = empty!();
for transition in consignment.endpoint_transitions_by_types(&[
TransitionType::BurnAndReplace.into(),
TransitionType::Burn.into(),
]) {
let mut ops = consignment
.chain_iter(
transition.node_id(),
OwnedRightType::BurnReplace.into(),
)
.collect::<Vec<_>>();
ops.reverse();
if let Some((epoch, _)) = ops.pop() {
let epoch_id = epoch.node_id();
let mut operations = vec![];
for (no, (transition, witness)) in ops.into_iter().enumerate() {
let id = transition.node_id();
let closed_seal = consignment
.seals_closed_with(
id,
OwnedRightType::BurnReplace,
witness,
)?
.into_iter()
.next()
.ok_or(Error::Inconsistency(
rgb::ConsistencyError::NoSealsClosed(
OwnedRightType::BurnReplace.into(),
id,
),
))?;
operations.push(BurnReplace::with(
asset.id,
epoch_id,
no,
closed_seal,
transition,
witness,
)?)
}
epoch_operations.insert(epoch_id, operations);
}
}
// 3. Parse epochs
let epoch_transition = consignment
.endpoint_transitions_by_type(TransitionType::Epoch.into())
.into_iter()
.next();
if let Some(epoch_transition) = epoch_transition {
let mut chain = consignment
.chain_iter(
epoch_transition.node_id(),
OwnedRightType::OpenEpoch.into(),
)
.collect::<Vec<_>>();
chain.reverse();
for (no, (transition, witness)) in chain.into_iter().enumerate() {
let epoch_id = transition.node_id();
asset.add_epoch(
&consignment,
transition,
no,
epoch_operations.remove(&epoch_id).unwrap_or_default(),
witness,
)?;
}
}
if !epoch_operations.is_empty() {
return Err(Error::NotAllEpochsExposed);
}
// 4. Parse secondary issues
for (transition, witness) in
consignment.transition_witness_iter(&[TransitionType::Issue.into()])
{
asset.add_issue(&consignment, transition, witness)?;
}
// 5. Parse renominations
// TODO: Parse renominations
// 6. Parse allocations
for (transaction, witness) in consignment.transition_witness_iter(&[
TransitionType::Issue.into(),
TransitionType::BurnAndReplace.into(),
TransitionType::Transfer.into(),
TransitionType::RightsSplit.into(),
]) {
for assignments in
transaction.owned_rights_by_type(OwnedRightType::Assets.into())
{
for (index, (seal, state)) in assignments
.to_value_assignment_vec()
.into_iter()
.filter_map(Assignment::into_revealed)
.enumerate()
{
asset.add_allocation(
seal.to_outpoint_reveal(witness).into(),
transaction.node_id(),
index as u16,
state,
);
}
}
}
Ok(asset)
}
} | random_line_split | |
asset.rs | // RGB20 Library: fungible digital assets for bitcoin & lightning
// Written in 2020-2021 by
// Dr. Maxim Orlovsky <orlovsky@pandoracore.com>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
//! Data structures and APIs for working with RGB20 assets
use chrono::{DateTime, NaiveDateTime, Utc};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "serde")]
use serde_with::{As, DisplayFromStr};
use std::collections::BTreeMap;
use std::convert::{TryFrom, TryInto};
use amplify::Wrapper;
use bitcoin::{OutPoint, Txid};
use lnpbp::chain::Chain;
use rgb::prelude::*;
use rgb::seal::WitnessVoutError;
use super::schema::{self, FieldType, OwnedRightType, TransitionType};
use crate::{
BurnReplace, Epoch, FractionalAmount, Issue, Nomination, PreciseAmount,
Renomination, Supply, SupplyMeasure,
};
/// Errors generated during RGB20 asset information parsing from the underlying
/// genesis or consignment data
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Display, From, Error)]
#[display(doc_comments)]
pub enum Error {
/// can't read asset data, since the provided information does not satisfy
/// schema requirements
UnsatisfiedSchemaRequirement,
/// genesis schema id does not match any of RGB20 schemata
WrongSchemaId,
/// genesis defines a seal referencing witness transaction while there
/// can't be a witness transaction for genesis
#[from(WitnessVoutError)]
GenesisSeal,
/// epoch seal definition for node {0} contains confidential data
EpochSealConfidential(NodeId),
/// nurn & replace seal definition for node {0} contains confidential data
BurnSealConfidential(NodeId),
/// inflation assignment (seal or state) for node {0} contains confidential
/// data
InflationAssignmentConfidential(NodeId),
/// Internal data inconsistency, as returned by the [`rgb::GraphAPI`]
/// methods
#[display(inner)]
#[from]
Inconsistency(rgb::ConsistencyError),
/// not of all epochs referenced in burn or burn & replace operation
/// history are known from the consignment
NotAllEpochsExposed,
}
/// Detailed RGB20 asset information
///
/// Structure presents complete set of RGB20 asset-related data which can be
/// extracted from the genesis or a consignment. It is not the source of the
/// truth, and the presence of the data in the structure does not imply their
/// validity, since the structure constructor does not validates blockchain or
/// LN-based transaction commitments or satisfaction of schema requirements.
///
/// The main reason of the structure is:
/// 1) to persist *cached* copy of the asset data without the requirement to
/// parse all stash transition each time in order to extract allocation
/// information;
/// 2) to present data from asset genesis or consignment for UI in convenient
/// form.
/// 3) to orchestrate generation of new state transitions taking into account
/// known asset information.
///
/// (1) is important for wallets, (2) is for more generic software, like
/// client-side-validated data explorers, developer & debugging tools etc and
/// (3) for asset-management software.
///
/// In both (2) and (3) case there is no need to persist the structure; genesis
/// /consignment should be persisted instead and the structure must be
/// reconstructed each time from that data upon the launch
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate", rename_all = "camelCase")
)]
#[derive(
Clone, Getters, PartialEq, Debug, Display, StrictEncode, StrictDecode,
)]
#[display("{genesis_nomination} ({id})")]
pub struct Asset {
/// Bech32-representation of the asset genesis
genesis: String,
/// Asset ID, which is equal to Contract ID and genesis ID
///
/// It can be used as a unique primary kep
id: ContractId,
/// Chain with which the asset is issued
#[cfg_attr(feature = "serde", serde(with = "As::<DisplayFromStr>"))]
chain: Chain,
/// Asset creation data
date: DateTime<Utc>,
/// Names assigned to the asset at the issue time
///
/// Nomination is a set of asset metadata assigned by the issuer, which
/// define core asset properties: ticker, name, decimal precision, contract
/// text.
#[cfg_attr(feature = "serde", serde(flatten))]
genesis_nomination: Nomination,
/// List of all known renominations.
///
/// This list does not include genesis nomination, which can be accessed
/// via [`Asset::genesis_nomination`]. The last item in the list contains
/// [`Asset::last_nomination`] data as a part of its renomination operation
/// details.
known_renominations: Vec<Renomination>,
/// All issues known from the available data (stash and/or provided
/// consignments)
///
/// Primary issue is always the first one; the rest are provided in
/// arbitrary order
known_issues: Vec<Issue>,
/// Single-use-seal controlling the beginning of the first epoch
epoch_opening_seal: Option<OutPoint>,
/// Burn & replacement epochs, organized according to the witness txid.
///
/// Witness transaction must be mined for the epoch to be real.
/// One of the inputs of this transaction MUST spend UTXO defined as a
/// seal closed by this epoch ([`Epoch::closes`])
epochs: Vec<Epoch>,
/// Detailed information about the asset supply (aggregated from the issue
/// and burning information kept inside the epochs data)
#[cfg_attr(feature = "serde", serde(flatten))]
supply: Supply,
/// Specifies outpoints controlling certain amounts of assets.
///
/// NB: Information here does not imply that the outputs are owned by the
/// current user or the owning transactions are mined/exist; this must be
/// determined by the wallet and depends on specific medium (blockchain,
/// LN)
known_allocations: Vec<Allocation>,
}
impl Asset {
/// Current asset ticker
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn ticker(&self) -> &str {
&self.active_nomination().ticker()
}
/// Current asset name
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn name | lf) -> &str {
&self.active_nomination().ticker()
}
/// Current version of the asset contract, represented in Ricardian form
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn ricardian_contract(&self) -> &str {
&self.active_nomination().ticker()
}
/// Current decimal precision of the asset value
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn decimal_precision(&self) -> u8 {
*self.active_nomination().decimal_precision()
}
/// Returns information (in atomic value units) about specific measure of
/// the asset supply, if known, or `None` otherwise
pub fn precise_supply(
&self,
measure: SupplyMeasure,
) -> Option<AtomicValue> {
Some(match measure {
SupplyMeasure::KnownCirculating => *self.supply.known_circulating(),
SupplyMeasure::TotalCirculating => {
match self.supply.total_circulating() {
None => return None,
Some(supply) => supply,
}
}
SupplyMeasure::IssueLimit => *self.supply.issue_limit(),
})
}
/// Returns information in form of a float number about specific measure of
/// the asset supply, if known, or [`f64::NAN`] otherwise
pub fn fractional_supply(
&self,
measure: SupplyMeasure,
) -> FractionalAmount {
let value = match self.precise_supply(measure) {
None => return FractionalAmount::NAN,
Some(supply) => supply,
};
PreciseAmount::transmutate_into(value, self.decimal_precision())
}
/// Nomination resulting from the last known renomination
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn last_nomination(&self) -> Option<&Nomination> {
self.known_renominations.last().map(|o| o.nomination())
}
/// Active nomination data.
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn active_nomination(&self) -> &Nomination {
self.last_nomination().unwrap_or(&self.genesis_nomination)
}
/// Returns sum of all known allocations, in atomic value units
#[inline]
pub fn known_value(&self) -> AtomicValue {
self.known_allocations.iter().map(Allocation::value).sum()
}
/// Returns sum of known allocation after applying `filter` function. Useful
/// for filtering UTXOs owned by the current wallet. The returned value is
/// in atomic units (see [`AtomicValue`]
pub fn known_filtered_value<F>(&self, filter: F) -> AtomicValue
where
F: Fn(&Allocation) -> bool,
{
self.known_allocations
.iter()
.filter(|allocation| filter(*allocation))
.map(Allocation::value)
.sum()
}
/// Returns sum of all known allocations, as a floating point value (see
/// [`FractionalAmount`])
pub fn known_amount(&self) -> FractionalAmount {
self.known_allocations
.iter()
.map(Allocation::value)
.map(|atomic| {
PreciseAmount::transmutate_into(
atomic,
self.decimal_precision(),
)
})
.sum()
}
/// Returns sum of known allocation after applying `filter` function. Useful
/// for filtering UTXOs owned by the current wallet. The returned amount is
/// a floating point number (see [`FractionalAmount`])
pub fn known_filtered_amount<F>(&self, filter: F) -> FractionalAmount
where
F: Fn(&Allocation) -> bool,
{
self.known_allocations
.iter()
.filter(|allocation| filter(*allocation))
.map(Allocation::value)
.map(|atomic| {
PreciseAmount::transmutate_into(
atomic,
self.decimal_precision(),
)
})
.sum()
}
/// Returns outpoints which when spent may indicate inflation happening
/// up to specific amount.
///
/// NB: Not of all inflation controlling points may be known
pub fn known_inflation(
&self,
) -> BTreeMap<OutPoint, (AtomicValue, Vec<u16>)> {
let mut inflation_list = BTreeMap::new();
for issue in self.known_issues() {
for (seal, data) in issue.inflation_assignments() {
inflation_list.insert(*seal, data.clone());
}
}
inflation_list
}
#[inline]
/// Lists all known allocations for the given bitcoin transaction
/// [`OutPoint`]
pub fn outpoint_allocations(&self, outpoint: OutPoint) -> Vec<Allocation> {
self.known_allocations
.iter()
.filter(|a| *a.outpoint() == outpoint)
.copied()
.collect()
}
/// Adds new allocation to the list of known allocations
pub fn add_allocation(
&mut self,
outpoint: OutPoint,
node_id: NodeId,
index: u16,
value: value::Revealed,
) -> bool {
let new_allocation = Allocation::with(node_id, index, outpoint, value);
if !self.known_allocations.contains(&new_allocation) {
self.known_allocations.push(new_allocation);
true
} else {
false
}
}
/// Adds issue to the list of known issues. This is an internal function
/// which should not be used directly; instead construct the asset structure
/// from the [`Consignment`] using [`Asset::try_from`] method.
fn add_issue(
&mut self,
consignment: &Consignment,
transition: &Transition,
witness: Txid,
) -> Result<(), Error> {
let closed_seals = consignment.seals_closed_with(
transition.node_id(),
OwnedRightType::Inflation,
witness,
)?;
let issue = Issue::with(self.id, closed_seals, transition, witness)?;
self.known_issues.push(issue);
Ok(())
}
/// Adds an epoch to the list of known epochs. This is an internal function
/// which should not be used directly; instead construct the asset structure
/// from the [`Consignment`] using [`Asset::try_from`] method.
fn add_epoch(
&mut self,
consignment: &Consignment,
transition: &Transition,
no: usize,
operations: Vec<BurnReplace>,
witness: Txid,
) -> Result<(), Error> {
let id = transition.node_id();
// 1. It must correctly extend known state, i.e. close UTXO for a seal
// defined by a state transition already belonging to the asset
let closed_seal = consignment
.seals_closed_with(id, OwnedRightType::OpenEpoch, witness)?
.into_iter()
.next()
.ok_or(Error::Inconsistency(
rgb::ConsistencyError::NoSealsClosed(
OwnedRightType::OpenEpoch.into(),
id,
),
))?;
let epoch = Epoch::with(
self.id,
no,
closed_seal,
transition,
operations,
witness,
)?;
self.epochs.insert(no as usize, epoch);
Ok(())
}
}
impl TryFrom<Genesis> for Asset {
type Error = Error;
fn try_from(genesis: Genesis) -> Result<Self, Self::Error> {
if genesis.schema_id() != schema::schema().schema_id() {
Err(Error::WrongSchemaId)?;
}
let genesis_meta = genesis.metadata();
let supply = *genesis_meta
.u64(FieldType::IssuedSupply)
.first()
.ok_or(Error::UnsatisfiedSchemaRequirement)?;
let mut issue_limit = 0;
// Check if issue limit can be known
for assignment in
genesis.owned_rights_by_type(OwnedRightType::Inflation.into())
{
for state in assignment.to_data_assignment_vec() {
match state {
Assignment::Revealed { assigned_state, .. }
| Assignment::ConfidentialSeal { assigned_state, .. } => {
if issue_limit < core::u64::MAX {
issue_limit += assigned_state
.u64()
.ok_or(Error::UnsatisfiedSchemaRequirement)?
};
}
_ => issue_limit = core::u64::MAX,
}
}
}
let epoch_opening_seal = genesis
.revealed_seals_by_type(OwnedRightType::OpenEpoch.into())
.map_err(|_| Error::EpochSealConfidential(genesis.node_id()))?
.first()
.copied()
.map(|seal| seal.try_into())
.transpose()?;
let issue = Issue::try_from(&genesis)?;
let node_id = NodeId::from_inner(genesis.contract_id().into_inner());
let mut known_allocations = Vec::<Allocation>::new();
for assignment in
genesis.owned_rights_by_type(OwnedRightType::Assets.into())
{
assignment
.to_value_assignment_vec()
.into_iter()
.enumerate()
.for_each(|(index, assign)| {
if let Assignment::Revealed {
seal_definition:
seal::Revealed::TxOutpoint(outpoint_reveal),
assigned_state,
} = assign
{
known_allocations.push(Allocation::with(
node_id,
index as u16,
outpoint_reveal.into(),
assigned_state,
))
}
});
}
Ok(Asset {
genesis: genesis.to_string(),
id: genesis.contract_id(),
chain: genesis.chain().clone(),
genesis_nomination: Nomination::try_from(&genesis)?,
supply: Supply::with(supply, None, issue_limit),
date: DateTime::from_utc(
NaiveDateTime::from_timestamp(
*genesis_meta
.i64(FieldType::Timestamp)
.first()
.ok_or(Error::UnsatisfiedSchemaRequirement)?,
0,
),
Utc,
),
known_renominations: empty!(),
known_issues: vec![issue],
// we assume that each genesis allocation with revealed amount
// and known seal (they are always revealed together) belongs to us
known_allocations,
epochs: empty!(),
epoch_opening_seal,
})
}
}
impl TryFrom<Consignment> for Asset {
type Error = Error;
fn try_from(consignment: Consignment) -> Result<Self, Self::Error> {
// 1. Parse genesis
let mut asset: Asset = consignment.genesis.clone().try_into()?;
// 2. Parse burn & replacement operations
let mut epoch_operations: BTreeMap<NodeId, Vec<BurnReplace>> = empty!();
for transition in consignment.endpoint_transitions_by_types(&[
TransitionType::BurnAndReplace.into(),
TransitionType::Burn.into(),
]) {
let mut ops = consignment
.chain_iter(
transition.node_id(),
OwnedRightType::BurnReplace.into(),
)
.collect::<Vec<_>>();
ops.reverse();
if let Some((epoch, _)) = ops.pop() {
let epoch_id = epoch.node_id();
let mut operations = vec![];
for (no, (transition, witness)) in ops.into_iter().enumerate() {
let id = transition.node_id();
let closed_seal = consignment
.seals_closed_with(
id,
OwnedRightType::BurnReplace,
witness,
)?
.into_iter()
.next()
.ok_or(Error::Inconsistency(
rgb::ConsistencyError::NoSealsClosed(
OwnedRightType::BurnReplace.into(),
id,
),
))?;
operations.push(BurnReplace::with(
asset.id,
epoch_id,
no,
closed_seal,
transition,
witness,
)?)
}
epoch_operations.insert(epoch_id, operations);
}
}
// 3. Parse epochs
let epoch_transition = consignment
.endpoint_transitions_by_type(TransitionType::Epoch.into())
.into_iter()
.next();
if let Some(epoch_transition) = epoch_transition {
let mut chain = consignment
.chain_iter(
epoch_transition.node_id(),
OwnedRightType::OpenEpoch.into(),
)
.collect::<Vec<_>>();
chain.reverse();
for (no, (transition, witness)) in chain.into_iter().enumerate() {
let epoch_id = transition.node_id();
asset.add_epoch(
&consignment,
transition,
no,
epoch_operations.remove(&epoch_id).unwrap_or_default(),
witness,
)?;
}
}
if !epoch_operations.is_empty() {
return Err(Error::NotAllEpochsExposed);
}
// 4. Parse secondary issues
for (transition, witness) in
consignment.transition_witness_iter(&[TransitionType::Issue.into()])
{
asset.add_issue(&consignment, transition, witness)?;
}
// 5. Parse renominations
// TODO: Parse renominations
// 6. Parse allocations
for (transaction, witness) in consignment.transition_witness_iter(&[
TransitionType::Issue.into(),
TransitionType::BurnAndReplace.into(),
TransitionType::Transfer.into(),
TransitionType::RightsSplit.into(),
]) {
for assignments in
transaction.owned_rights_by_type(OwnedRightType::Assets.into())
{
for (index, (seal, state)) in assignments
.to_value_assignment_vec()
.into_iter()
.filter_map(Assignment::into_revealed)
.enumerate()
{
asset.add_allocation(
seal.to_outpoint_reveal(witness).into(),
transaction.node_id(),
index as u16,
state,
);
}
}
}
Ok(asset)
}
}
| (&se | identifier_name |
asset.rs | // RGB20 Library: fungible digital assets for bitcoin & lightning
// Written in 2020-2021 by
// Dr. Maxim Orlovsky <orlovsky@pandoracore.com>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
//! Data structures and APIs for working with RGB20 assets
use chrono::{DateTime, NaiveDateTime, Utc};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "serde")]
use serde_with::{As, DisplayFromStr};
use std::collections::BTreeMap;
use std::convert::{TryFrom, TryInto};
use amplify::Wrapper;
use bitcoin::{OutPoint, Txid};
use lnpbp::chain::Chain;
use rgb::prelude::*;
use rgb::seal::WitnessVoutError;
use super::schema::{self, FieldType, OwnedRightType, TransitionType};
use crate::{
BurnReplace, Epoch, FractionalAmount, Issue, Nomination, PreciseAmount,
Renomination, Supply, SupplyMeasure,
};
/// Errors generated during RGB20 asset information parsing from the underlying
/// genesis or consignment data
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Display, From, Error)]
#[display(doc_comments)]
pub enum Error {
/// can't read asset data, since the provided information does not satisfy
/// schema requirements
UnsatisfiedSchemaRequirement,
/// genesis schema id does not match any of RGB20 schemata
WrongSchemaId,
/// genesis defines a seal referencing witness transaction while there
/// can't be a witness transaction for genesis
#[from(WitnessVoutError)]
GenesisSeal,
/// epoch seal definition for node {0} contains confidential data
EpochSealConfidential(NodeId),
/// nurn & replace seal definition for node {0} contains confidential data
BurnSealConfidential(NodeId),
/// inflation assignment (seal or state) for node {0} contains confidential
/// data
InflationAssignmentConfidential(NodeId),
/// Internal data inconsistency, as returned by the [`rgb::GraphAPI`]
/// methods
#[display(inner)]
#[from]
Inconsistency(rgb::ConsistencyError),
/// not of all epochs referenced in burn or burn & replace operation
/// history are known from the consignment
NotAllEpochsExposed,
}
/// Detailed RGB20 asset information
///
/// Structure presents complete set of RGB20 asset-related data which can be
/// extracted from the genesis or a consignment. It is not the source of the
/// truth, and the presence of the data in the structure does not imply their
/// validity, since the structure constructor does not validates blockchain or
/// LN-based transaction commitments or satisfaction of schema requirements.
///
/// The main reason of the structure is:
/// 1) to persist *cached* copy of the asset data without the requirement to
/// parse all stash transition each time in order to extract allocation
/// information;
/// 2) to present data from asset genesis or consignment for UI in convenient
/// form.
/// 3) to orchestrate generation of new state transitions taking into account
/// known asset information.
///
/// (1) is important for wallets, (2) is for more generic software, like
/// client-side-validated data explorers, developer & debugging tools etc and
/// (3) for asset-management software.
///
/// In both (2) and (3) case there is no need to persist the structure; genesis
/// /consignment should be persisted instead and the structure must be
/// reconstructed each time from that data upon the launch
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_crate", rename_all = "camelCase")
)]
#[derive(
Clone, Getters, PartialEq, Debug, Display, StrictEncode, StrictDecode,
)]
#[display("{genesis_nomination} ({id})")]
pub struct Asset {
/// Bech32-representation of the asset genesis
genesis: String,
/// Asset ID, which is equal to Contract ID and genesis ID
///
/// It can be used as a unique primary kep
id: ContractId,
/// Chain with which the asset is issued
#[cfg_attr(feature = "serde", serde(with = "As::<DisplayFromStr>"))]
chain: Chain,
/// Asset creation data
date: DateTime<Utc>,
/// Names assigned to the asset at the issue time
///
/// Nomination is a set of asset metadata assigned by the issuer, which
/// define core asset properties: ticker, name, decimal precision, contract
/// text.
#[cfg_attr(feature = "serde", serde(flatten))]
genesis_nomination: Nomination,
/// List of all known renominations.
///
/// This list does not include genesis nomination, which can be accessed
/// via [`Asset::genesis_nomination`]. The last item in the list contains
/// [`Asset::last_nomination`] data as a part of its renomination operation
/// details.
known_renominations: Vec<Renomination>,
/// All issues known from the available data (stash and/or provided
/// consignments)
///
/// Primary issue is always the first one; the rest are provided in
/// arbitrary order
known_issues: Vec<Issue>,
/// Single-use-seal controlling the beginning of the first epoch
epoch_opening_seal: Option<OutPoint>,
/// Burn & replacement epochs, organized according to the witness txid.
///
/// Witness transaction must be mined for the epoch to be real.
/// One of the inputs of this transaction MUST spend UTXO defined as a
/// seal closed by this epoch ([`Epoch::closes`])
epochs: Vec<Epoch>,
/// Detailed information about the asset supply (aggregated from the issue
/// and burning information kept inside the epochs data)
#[cfg_attr(feature = "serde", serde(flatten))]
supply: Supply,
/// Specifies outpoints controlling certain amounts of assets.
///
/// NB: Information here does not imply that the outputs are owned by the
/// current user or the owning transactions are mined/exist; this must be
/// determined by the wallet and depends on specific medium (blockchain,
/// LN)
known_allocations: Vec<Allocation>,
}
impl Asset {
/// Current asset ticker
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn ticker(&self) -> &str {
&self.active_nomination().ticker()
}
/// Current asset name
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn name(&self) -> &str {
&self.active_nomination().ticker()
}
/// Current version of the asset contract, represented in Ricardian form
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn ricardian_contract(&self) -> &str {
&self.active_nomination().ticker()
}
/// Current decimal precision of the asset value
///
/// Current value determined by the last known renomination operation –
/// or, by the genesis nomination, if no renomination are known
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn decimal_precision(&self) -> u8 {
*self.active_nomination().decimal_precision()
}
/// Returns information (in atomic value units) about specific measure of
/// the asset supply, if known, or `None` otherwise
pub fn precise_supply(
&self,
measure: SupplyMeasure,
) -> Option<AtomicValue> {
Some(match measure {
SupplyMeasure::KnownCirculating => *self.supply.known_circulating(),
SupplyMeasure::TotalCirculating => {
match self.supply.total_circulating() {
None => return None,
Some(supply) => supply,
}
}
SupplyMeasure::IssueLimit => *self.supply.issue_limit(),
})
}
/// Returns information in form of a float number about specific measure of
/// the asset supply, if known, or [`f64::NAN`] otherwise
pub fn fractional_supply(
&self,
measure: SupplyMeasure,
) -> FractionalAmount {
let value = match self.precise_supply(measure) {
None => return FractionalAmount::NAN,
Some(supply) => supply,
};
PreciseAmount::transmutate_into(value, self.decimal_precision())
}
/// Nomination resulting from the last known renomination
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn last_nomination(&self) -> Option<&Nomination> {
self.known_renominations.last().map(|o| o.nomination())
}
/// Active nomination data.
///
/// NB: the returned result may not match the current valid nomination,
/// since if there were further not yet known nominations the value
/// returned by this function will not match the valid data
#[inline]
pub fn active_nomination(&self) -> &Nomination {
| / Returns sum of all known allocations, in atomic value units
#[inline]
pub fn known_value(&self) -> AtomicValue {
self.known_allocations.iter().map(Allocation::value).sum()
}
/// Returns sum of known allocation after applying `filter` function. Useful
/// for filtering UTXOs owned by the current wallet. The returned value is
/// in atomic units (see [`AtomicValue`]
pub fn known_filtered_value<F>(&self, filter: F) -> AtomicValue
where
F: Fn(&Allocation) -> bool,
{
self.known_allocations
.iter()
.filter(|allocation| filter(*allocation))
.map(Allocation::value)
.sum()
}
/// Returns sum of all known allocations, as a floating point value (see
/// [`FractionalAmount`])
pub fn known_amount(&self) -> FractionalAmount {
self.known_allocations
.iter()
.map(Allocation::value)
.map(|atomic| {
PreciseAmount::transmutate_into(
atomic,
self.decimal_precision(),
)
})
.sum()
}
/// Returns sum of known allocation after applying `filter` function. Useful
/// for filtering UTXOs owned by the current wallet. The returned amount is
/// a floating point number (see [`FractionalAmount`])
pub fn known_filtered_amount<F>(&self, filter: F) -> FractionalAmount
where
F: Fn(&Allocation) -> bool,
{
self.known_allocations
.iter()
.filter(|allocation| filter(*allocation))
.map(Allocation::value)
.map(|atomic| {
PreciseAmount::transmutate_into(
atomic,
self.decimal_precision(),
)
})
.sum()
}
/// Returns outpoints which when spent may indicate inflation happening
/// up to specific amount.
///
/// NB: Not of all inflation controlling points may be known
pub fn known_inflation(
&self,
) -> BTreeMap<OutPoint, (AtomicValue, Vec<u16>)> {
let mut inflation_list = BTreeMap::new();
for issue in self.known_issues() {
for (seal, data) in issue.inflation_assignments() {
inflation_list.insert(*seal, data.clone());
}
}
inflation_list
}
#[inline]
/// Lists all known allocations for the given bitcoin transaction
/// [`OutPoint`]
pub fn outpoint_allocations(&self, outpoint: OutPoint) -> Vec<Allocation> {
self.known_allocations
.iter()
.filter(|a| *a.outpoint() == outpoint)
.copied()
.collect()
}
/// Adds new allocation to the list of known allocations
pub fn add_allocation(
&mut self,
outpoint: OutPoint,
node_id: NodeId,
index: u16,
value: value::Revealed,
) -> bool {
let new_allocation = Allocation::with(node_id, index, outpoint, value);
if !self.known_allocations.contains(&new_allocation) {
self.known_allocations.push(new_allocation);
true
} else {
false
}
}
/// Adds issue to the list of known issues. This is an internal function
/// which should not be used directly; instead construct the asset structure
/// from the [`Consignment`] using [`Asset::try_from`] method.
fn add_issue(
&mut self,
consignment: &Consignment,
transition: &Transition,
witness: Txid,
) -> Result<(), Error> {
let closed_seals = consignment.seals_closed_with(
transition.node_id(),
OwnedRightType::Inflation,
witness,
)?;
let issue = Issue::with(self.id, closed_seals, transition, witness)?;
self.known_issues.push(issue);
Ok(())
}
/// Adds an epoch to the list of known epochs. This is an internal function
/// which should not be used directly; instead construct the asset structure
/// from the [`Consignment`] using [`Asset::try_from`] method.
fn add_epoch(
&mut self,
consignment: &Consignment,
transition: &Transition,
no: usize,
operations: Vec<BurnReplace>,
witness: Txid,
) -> Result<(), Error> {
let id = transition.node_id();
// 1. It must correctly extend known state, i.e. close UTXO for a seal
// defined by a state transition already belonging to the asset
let closed_seal = consignment
.seals_closed_with(id, OwnedRightType::OpenEpoch, witness)?
.into_iter()
.next()
.ok_or(Error::Inconsistency(
rgb::ConsistencyError::NoSealsClosed(
OwnedRightType::OpenEpoch.into(),
id,
),
))?;
let epoch = Epoch::with(
self.id,
no,
closed_seal,
transition,
operations,
witness,
)?;
self.epochs.insert(no as usize, epoch);
Ok(())
}
}
impl TryFrom<Genesis> for Asset {
type Error = Error;
fn try_from(genesis: Genesis) -> Result<Self, Self::Error> {
if genesis.schema_id() != schema::schema().schema_id() {
Err(Error::WrongSchemaId)?;
}
let genesis_meta = genesis.metadata();
let supply = *genesis_meta
.u64(FieldType::IssuedSupply)
.first()
.ok_or(Error::UnsatisfiedSchemaRequirement)?;
let mut issue_limit = 0;
// Check if issue limit can be known
for assignment in
genesis.owned_rights_by_type(OwnedRightType::Inflation.into())
{
for state in assignment.to_data_assignment_vec() {
match state {
Assignment::Revealed { assigned_state, .. }
| Assignment::ConfidentialSeal { assigned_state, .. } => {
if issue_limit < core::u64::MAX {
issue_limit += assigned_state
.u64()
.ok_or(Error::UnsatisfiedSchemaRequirement)?
};
}
_ => issue_limit = core::u64::MAX,
}
}
}
let epoch_opening_seal = genesis
.revealed_seals_by_type(OwnedRightType::OpenEpoch.into())
.map_err(|_| Error::EpochSealConfidential(genesis.node_id()))?
.first()
.copied()
.map(|seal| seal.try_into())
.transpose()?;
let issue = Issue::try_from(&genesis)?;
let node_id = NodeId::from_inner(genesis.contract_id().into_inner());
let mut known_allocations = Vec::<Allocation>::new();
for assignment in
genesis.owned_rights_by_type(OwnedRightType::Assets.into())
{
assignment
.to_value_assignment_vec()
.into_iter()
.enumerate()
.for_each(|(index, assign)| {
if let Assignment::Revealed {
seal_definition:
seal::Revealed::TxOutpoint(outpoint_reveal),
assigned_state,
} = assign
{
known_allocations.push(Allocation::with(
node_id,
index as u16,
outpoint_reveal.into(),
assigned_state,
))
}
});
}
Ok(Asset {
genesis: genesis.to_string(),
id: genesis.contract_id(),
chain: genesis.chain().clone(),
genesis_nomination: Nomination::try_from(&genesis)?,
supply: Supply::with(supply, None, issue_limit),
date: DateTime::from_utc(
NaiveDateTime::from_timestamp(
*genesis_meta
.i64(FieldType::Timestamp)
.first()
.ok_or(Error::UnsatisfiedSchemaRequirement)?,
0,
),
Utc,
),
known_renominations: empty!(),
known_issues: vec![issue],
// we assume that each genesis allocation with revealed amount
// and known seal (they are always revealed together) belongs to us
known_allocations,
epochs: empty!(),
epoch_opening_seal,
})
}
}
impl TryFrom<Consignment> for Asset {
type Error = Error;
fn try_from(consignment: Consignment) -> Result<Self, Self::Error> {
// 1. Parse genesis
let mut asset: Asset = consignment.genesis.clone().try_into()?;
// 2. Parse burn & replacement operations
let mut epoch_operations: BTreeMap<NodeId, Vec<BurnReplace>> = empty!();
for transition in consignment.endpoint_transitions_by_types(&[
TransitionType::BurnAndReplace.into(),
TransitionType::Burn.into(),
]) {
let mut ops = consignment
.chain_iter(
transition.node_id(),
OwnedRightType::BurnReplace.into(),
)
.collect::<Vec<_>>();
ops.reverse();
if let Some((epoch, _)) = ops.pop() {
let epoch_id = epoch.node_id();
let mut operations = vec![];
for (no, (transition, witness)) in ops.into_iter().enumerate() {
let id = transition.node_id();
let closed_seal = consignment
.seals_closed_with(
id,
OwnedRightType::BurnReplace,
witness,
)?
.into_iter()
.next()
.ok_or(Error::Inconsistency(
rgb::ConsistencyError::NoSealsClosed(
OwnedRightType::BurnReplace.into(),
id,
),
))?;
operations.push(BurnReplace::with(
asset.id,
epoch_id,
no,
closed_seal,
transition,
witness,
)?)
}
epoch_operations.insert(epoch_id, operations);
}
}
// 3. Parse epochs
let epoch_transition = consignment
.endpoint_transitions_by_type(TransitionType::Epoch.into())
.into_iter()
.next();
if let Some(epoch_transition) = epoch_transition {
let mut chain = consignment
.chain_iter(
epoch_transition.node_id(),
OwnedRightType::OpenEpoch.into(),
)
.collect::<Vec<_>>();
chain.reverse();
for (no, (transition, witness)) in chain.into_iter().enumerate() {
let epoch_id = transition.node_id();
asset.add_epoch(
&consignment,
transition,
no,
epoch_operations.remove(&epoch_id).unwrap_or_default(),
witness,
)?;
}
}
if !epoch_operations.is_empty() {
return Err(Error::NotAllEpochsExposed);
}
// 4. Parse secondary issues
for (transition, witness) in
consignment.transition_witness_iter(&[TransitionType::Issue.into()])
{
asset.add_issue(&consignment, transition, witness)?;
}
// 5. Parse renominations
// TODO: Parse renominations
// 6. Parse allocations
for (transaction, witness) in consignment.transition_witness_iter(&[
TransitionType::Issue.into(),
TransitionType::BurnAndReplace.into(),
TransitionType::Transfer.into(),
TransitionType::RightsSplit.into(),
]) {
for assignments in
transaction.owned_rights_by_type(OwnedRightType::Assets.into())
{
for (index, (seal, state)) in assignments
.to_value_assignment_vec()
.into_iter()
.filter_map(Assignment::into_revealed)
.enumerate()
{
asset.add_allocation(
seal.to_outpoint_reveal(witness).into(),
transaction.node_id(),
index as u16,
state,
);
}
}
}
Ok(asset)
}
}
| self.last_nomination().unwrap_or(&self.genesis_nomination)
}
// | identifier_body |
csv_import_accts_txns.rs | // Copyright (c) 2017-2020, scoobybejesus
// Redistributions must include the license: https://github.com/scoobybejesus/cryptools/blob/master/LEGAL.txt
use std::error::Error;
use std::process;
use std::fs::File;
use std::cell::{RefCell};
use std::collections::{HashMap};
use std::path::PathBuf;
use chrono::NaiveDate;
use decimal::d128;
use crate::transaction::{Transaction, ActionRecord};
use crate::account::{Account, RawAccount};
use crate::decimal_utils::{round_d128_1e8};
pub fn import_from_csv(
import_file_path: PathBuf,
iso_date_style: bool,
separator: &String,
raw_acct_map: &mut HashMap<u16, RawAccount>,
acct_map: &mut HashMap<u16, Account>,
action_records: &mut HashMap<u32, ActionRecord>,
transactions_map: &mut HashMap<u32, Transaction>,
) -> Result<(), Box<dyn Error>> {
let file = match File::open(import_file_path) {
Ok(x) => {
// println!("\nCSV ledger file opened successfully.\n");
x
},
Err(e) => {
println!("Invalid import_file_path");
eprintln!("System error: {}", e);
std::process::exit(1);
}
};
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_reader(file);
import_accounts(&mut rdr, raw_acct_map, acct_map)?;
import_transactions(
&mut rdr,
iso_date_style,
&separator,
action_records,
transactions_map,
)?;
Ok(())
}
fn import_accounts(
rdr: &mut csv::Reader<File>,
raw_acct_map: &mut HashMap<u16, RawAccount>,
acct_map: &mut HashMap<u16, Account>,
) -> Result<(), Box<dyn Error>> {
let header1 = rdr.headers()?.clone(); // account_num
let mut header2: csv::StringRecord = csv::StringRecord::new(); // name
let mut header3: csv::StringRecord = csv::StringRecord::new(); // ticker
let header4: csv::StringRecord; // is_margin
// Account Creation loop. With rdr.has_headers() set to true above, the first record here is the second row of the CSV
for result in rdr.records() {
// This initial iteration through records will break after the 4th row, after accounts have been created
let record = result?;
if header2.len() == 0 {
header2 = record.clone();
continue // After header2 is set, continue to next record
}
else if header3.len() == 0 {
header3 = record.clone();
continue // After header3 is set, continue to next record
}
else {
header4 = record.clone();
// println!("Assigned last header, record: {:?}", record);
| // A StringRecord doesn't accept the same range indexing needed below, so a Vec of Strings will be used
let headerstrings: Vec<String> = header1.into_iter().map(|field| field.to_string()).collect();
let acct_num_warn = "Transactions will not import correctly if account numbers in the CSV import file aren't
ordered chronologically (i.e., beginning in column 4 - the 1st account column - the value should be 1.
The next column's value should be 2, then 3, etc, until the final account).";
// Header row variables have been set. It's now time to set up the accounts.
println!("\nCreating accounts...");
let length = &headerstrings.len();
for (idx, field) in headerstrings[3..*length].iter().enumerate() {
// Parse account numbers.
let account_num = field.trim().parse::<u16>().expect(&format!("Header row account number should parse into u16: {}", field));
// For now, their columns aren't remembered. Instead, they must have a particular index. 0th idx is the 1st account, and so on.
if account_num != ((idx + 1) as u16) {
println!("FATAL: CSV Import: {}", acct_num_warn);
std::process::exit(1);
}
let ind = idx+3; // Add three because the idx skips the first three 'key' columns
let name:String = header2[ind].trim().to_string();
let ticker:String = header3[ind].trim().to_string(); // no .to_uppercase() b/c margin...
let margin_string = &header4.clone()[ind];
let is_margin:bool = match margin_string.to_lowercase().trim() {
"no" | "non" | "false" => false,
"yes" | "margin" | "true" => true,
_ => {
println!("\n FATAL: CSV Import: Couldn't parse margin value for account {} {} \n",account_num, name);
process::exit(1)
}
};
let just_account: RawAccount = RawAccount {
account_num,
name,
ticker,
is_margin,
};
raw_acct_map.insert(account_num, just_account);
let account: Account = Account {
raw_key: account_num,
list_of_lots: RefCell::new([].to_vec())
};
acct_map.insert(account_num, account);
}
break // This `break` exits this scope so `accounts` can be accessed in `import_transactions`. The rdr stays put.
}
};
Ok(())
}
fn import_transactions(
rdr: &mut csv::Reader<File>,
iso_date_style: bool,
separator: &String,
action_records: &mut HashMap<u32, ActionRecord>,
txns_map: &mut HashMap<u32, Transaction>,
) -> Result<(), Box<dyn Error>> {
let mut this_tx_number = 0;
let mut this_ar_number = 0;
let mut changed_action_records = 0;
let mut changed_txn_num = Vec::new();
println!("Creating transactions...");
for result in rdr.records() {
// rdr's cursor is at row 5, which is the first transaction row
let record = result?;
this_tx_number += 1;
// First, initialize metadata fields.
let mut this_tx_date: &str = "";
let mut this_proceeds: &str;
let mut this_memo: &str = "";
let mut proceeds_parsed = 0f32;
// Next, create action_records.
let mut action_records_map_keys_vec: Vec<u32> = Vec::with_capacity(2);
let mut outgoing_ar: Option<ActionRecord> = None;
let mut incoming_ar: Option<ActionRecord> = None;
let mut outgoing_ar_num: Option<u32> = None;
let mut incoming_ar_num: Option<u32> = None;
for (idx, field) in record.iter().enumerate() {
// Set metadata fields on first three fields.
if idx == 0 { this_tx_date = field; }
else if idx == 1 {
let no_comma_string = field.replace(",", "");
proceeds_parsed = no_comma_string.parse::<f32>()?;
}
else if idx == 2 { this_memo = field; }
// Check for empty strings. If not empty, it's a value for an action_record.
else if field != "" {
this_ar_number += 1;
let ind = idx; // starts at 3, which is the fourth field
let acct_idx = ind - 2; // acct_num and acct_key would be idx + 1, so subtract 2 from ind to get 1
let account_key = acct_idx as u16;
let amount_str = field.replace(",", "");
let mut amount = amount_str.parse::<d128>().unwrap();
// When parsing to a d128, it won't error; rather it'll return a NaN. It must now check for NaN,
// and, if found, attempt to sanitize. These checks will convert accounting/comma format to the expected
// format by removing parentheses from negatives and adding a minus sign in the front. It will also
// attempt to remove empty spaces and currency symbols or designations (e.g. $ or USD).
if amount.is_nan() {
let b = sanitize_string_for_d128_parsing_basic(field).parse::<d128>().unwrap();
amount = b;
};
if amount.is_nan() {
let c = sanitize_string_for_d128_parsing_full(field).parse::<d128>().unwrap();
amount = c;
};
if amount.is_nan() {
println!("FATAL: Couldn't convert amount to d128 for transaction:\n{:#?}", record);
std::process::exit(1);
}
let amount_rounded = round_d128_1e8(&amount);
if amount != amount_rounded { changed_action_records += 1; changed_txn_num.push(this_tx_number); }
let action_record = ActionRecord {
account_key,
amount: amount_rounded,
tx_key: this_tx_number,
self_ar_key: this_ar_number,
movements: RefCell::new([].to_vec()),
};
if amount > d128!(0.0) {
incoming_ar = Some(action_record);
incoming_ar_num = Some(this_ar_number);
action_records_map_keys_vec.push(incoming_ar_num.unwrap())
} else {
outgoing_ar = Some(action_record);
outgoing_ar_num = Some(this_ar_number);
action_records_map_keys_vec.insert(0, outgoing_ar_num.unwrap())
};
}
}
// Note: the rust Trait implementation of FromStr for f32 is capable of parsing:
// '3.14'
// '-3.14'
// '2.5E10', or equivalently, '2.5e10'
// '2.5E-10'
// '5.'
// '.5', or, equivalently, '0.5'
// 'inf', '-inf', 'NaN'
// Notable observations from the list:
// (a) scientific notation is accepted
// (b) accounting format (numbers in parens representing negative numbers) is not explicitly accepted
// Additionally notable:
// (a) the decimal separator must be a period
// (b) there can be no commas
// (c) there can be no currency info ($120 or 120USD, etc. will fail to parse)
// In summary, it appears to only allow: (i) numeric chars, (ii) a period, and/or (iii) a minus sign
//
// The Decimal::d128 implementation of FromStr calls into a C library, and that lib hasn't
// been reviewed (by me), but it is thought/hoped to follow similar parsing conventions,
// though there's no guarantee. Nevertheless, the above notes *appear* to hold true for d128.
fn sanitize_string_for_d128_parsing_basic(field: &str) -> String {
// First, remove commas.
let no_comma_string = field.replace(",", "");
let almost_done = no_comma_string.replace(" ", "");
// Next, if ASCII (better be), check for accounting formatting
if almost_done.is_ascii() {
if almost_done.as_bytes()[0] == "(".as_bytes()[0] {
let half_fixed = almost_done.replace("(", "-");
let negative_with_minus = half_fixed.replace(")", "");
return negative_with_minus
}
}
almost_done
}
fn sanitize_string_for_d128_parsing_full(field: &str) -> String {
let mut near_done = "".to_string();
// First, remove commas.
let no_comma_string = field.replace(",", "");
let almost_done = no_comma_string.replace(" ", "");
// Next, if ASCII (better be), check for accounting formating
if almost_done.is_ascii() {
if almost_done.as_bytes()[0] == "(".as_bytes()[0] {
let half_fixed = almost_done.replace("(", "-");
let negative_with_minus = half_fixed.replace(")", "");
near_done = negative_with_minus;
} else {
near_done = almost_done;
}
} else {
near_done = almost_done;
}
// Strip non-numeric and non-period characters
let all_done: String = near_done.chars()
.filter(|x|
x.is_numeric() |
(x == &(".".as_bytes()[0] as char)) |
(x == &("-".as_bytes()[0] as char)))
.collect();
all_done
}
if let Some(incoming_ar) = incoming_ar {
let x = incoming_ar_num.unwrap();
action_records.insert(x, incoming_ar);
}
if let Some(outgoing_ar) = outgoing_ar {
let y = outgoing_ar_num.unwrap();
action_records.insert(y, outgoing_ar);
}
let format_yy: String;
let format_yyyy: String;
if iso_date_style {
format_yyyy = "%Y".to_owned() + separator + "%m" + separator + "%d";
format_yy = "%y".to_owned() + separator + "%m" + separator + "%d";
} else {
format_yyyy = "%m".to_owned() + separator + "%d" + separator + "%Y";
format_yy = "%m".to_owned() + separator + "%d" + separator + "%y";
}
let tx_date = NaiveDate::parse_from_str(this_tx_date, &format_yy)
.unwrap_or_else(|_| NaiveDate::parse_from_str(this_tx_date, &format_yyyy)
.expect("
FATAL: Transaction date parsing failed. You must tell the program the format of the date in your CSV Input File. The date separator \
is expected to be a hyphen. The dating format is expected to be \"American\" (%m-%d-%y), not ISO 8601 (%y-%m-%d). You may set different \
date format options via command line flag, environment variable or .env file. Perhaps first run with `--help` or see `.env.example.`\n")
);
let transaction = Transaction {
tx_number: this_tx_number,
date_as_string: this_tx_date.to_string(),
date: tx_date,
user_memo: this_memo.to_string(),
proceeds: proceeds_parsed,
action_record_idx_vec: action_records_map_keys_vec,
};
txns_map.insert(this_tx_number, transaction);
};
if changed_action_records > 0 {
println!(" Changed actionrecord amounts due to rounding precision: {}. Changed txn numbers: {:?}.", changed_action_records, changed_txn_num);
}
Ok(())
} | random_line_split | |
csv_import_accts_txns.rs | // Copyright (c) 2017-2020, scoobybejesus
// Redistributions must include the license: https://github.com/scoobybejesus/cryptools/blob/master/LEGAL.txt
use std::error::Error;
use std::process;
use std::fs::File;
use std::cell::{RefCell};
use std::collections::{HashMap};
use std::path::PathBuf;
use chrono::NaiveDate;
use decimal::d128;
use crate::transaction::{Transaction, ActionRecord};
use crate::account::{Account, RawAccount};
use crate::decimal_utils::{round_d128_1e8};
pub fn import_from_csv(
import_file_path: PathBuf,
iso_date_style: bool,
separator: &String,
raw_acct_map: &mut HashMap<u16, RawAccount>,
acct_map: &mut HashMap<u16, Account>,
action_records: &mut HashMap<u32, ActionRecord>,
transactions_map: &mut HashMap<u32, Transaction>,
) -> Result<(), Box<dyn Error>> {
let file = match File::open(import_file_path) {
Ok(x) => {
// println!("\nCSV ledger file opened successfully.\n");
x
},
Err(e) => {
println!("Invalid import_file_path");
eprintln!("System error: {}", e);
std::process::exit(1);
}
};
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_reader(file);
import_accounts(&mut rdr, raw_acct_map, acct_map)?;
import_transactions(
&mut rdr,
iso_date_style,
&separator,
action_records,
transactions_map,
)?;
Ok(())
}
fn import_accounts(
rdr: &mut csv::Reader<File>,
raw_acct_map: &mut HashMap<u16, RawAccount>,
acct_map: &mut HashMap<u16, Account>,
) -> Result<(), Box<dyn Error>> {
let header1 = rdr.headers()?.clone(); // account_num
let mut header2: csv::StringRecord = csv::StringRecord::new(); // name
let mut header3: csv::StringRecord = csv::StringRecord::new(); // ticker
let header4: csv::StringRecord; // is_margin
// Account Creation loop. With rdr.has_headers() set to true above, the first record here is the second row of the CSV
for result in rdr.records() {
// This initial iteration through records will break after the 4th row, after accounts have been created
let record = result?;
if header2.len() == 0 {
header2 = record.clone();
continue // After header2 is set, continue to next record
}
else if header3.len() == 0 {
header3 = record.clone();
continue // After header3 is set, continue to next record
}
else {
header4 = record.clone();
// println!("Assigned last header, record: {:?}", record);
// A StringRecord doesn't accept the same range indexing needed below, so a Vec of Strings will be used
let headerstrings: Vec<String> = header1.into_iter().map(|field| field.to_string()).collect();
let acct_num_warn = "Transactions will not import correctly if account numbers in the CSV import file aren't
ordered chronologically (i.e., beginning in column 4 - the 1st account column - the value should be 1.
The next column's value should be 2, then 3, etc, until the final account).";
// Header row variables have been set. It's now time to set up the accounts.
println!("\nCreating accounts...");
let length = &headerstrings.len();
for (idx, field) in headerstrings[3..*length].iter().enumerate() {
// Parse account numbers.
let account_num = field.trim().parse::<u16>().expect(&format!("Header row account number should parse into u16: {}", field));
// For now, their columns aren't remembered. Instead, they must have a particular index. 0th idx is the 1st account, and so on.
if account_num != ((idx + 1) as u16) {
println!("FATAL: CSV Import: {}", acct_num_warn);
std::process::exit(1);
}
let ind = idx+3; // Add three because the idx skips the first three 'key' columns
let name:String = header2[ind].trim().to_string();
let ticker:String = header3[ind].trim().to_string(); // no .to_uppercase() b/c margin...
let margin_string = &header4.clone()[ind];
let is_margin:bool = match margin_string.to_lowercase().trim() {
"no" | "non" | "false" => false,
"yes" | "margin" | "true" => true,
_ => {
println!("\n FATAL: CSV Import: Couldn't parse margin value for account {} {} \n",account_num, name);
process::exit(1)
}
};
let just_account: RawAccount = RawAccount {
account_num,
name,
ticker,
is_margin,
};
raw_acct_map.insert(account_num, just_account);
let account: Account = Account {
raw_key: account_num,
list_of_lots: RefCell::new([].to_vec())
};
acct_map.insert(account_num, account);
}
break // This `break` exits this scope so `accounts` can be accessed in `import_transactions`. The rdr stays put.
}
};
Ok(())
}
fn import_transactions(
rdr: &mut csv::Reader<File>,
iso_date_style: bool,
separator: &String,
action_records: &mut HashMap<u32, ActionRecord>,
txns_map: &mut HashMap<u32, Transaction>,
) -> Result<(), Box<dyn Error>> {
let mut this_tx_number = 0;
let mut this_ar_number = 0;
let mut changed_action_records = 0;
let mut changed_txn_num = Vec::new();
println!("Creating transactions...");
for result in rdr.records() {
// rdr's cursor is at row 5, which is the first transaction row
let record = result?;
this_tx_number += 1;
// First, initialize metadata fields.
let mut this_tx_date: &str = "";
let mut this_proceeds: &str;
let mut this_memo: &str = "";
let mut proceeds_parsed = 0f32;
// Next, create action_records.
let mut action_records_map_keys_vec: Vec<u32> = Vec::with_capacity(2);
let mut outgoing_ar: Option<ActionRecord> = None;
let mut incoming_ar: Option<ActionRecord> = None;
let mut outgoing_ar_num: Option<u32> = None;
let mut incoming_ar_num: Option<u32> = None;
for (idx, field) in record.iter().enumerate() {
// Set metadata fields on first three fields.
if idx == 0 { this_tx_date = field; }
else if idx == 1 {
let no_comma_string = field.replace(",", "");
proceeds_parsed = no_comma_string.parse::<f32>()?;
}
else if idx == 2 { this_memo = field; }
// Check for empty strings. If not empty, it's a value for an action_record.
else if field != "" {
this_ar_number += 1;
let ind = idx; // starts at 3, which is the fourth field
let acct_idx = ind - 2; // acct_num and acct_key would be idx + 1, so subtract 2 from ind to get 1
let account_key = acct_idx as u16;
let amount_str = field.replace(",", "");
let mut amount = amount_str.parse::<d128>().unwrap();
// When parsing to a d128, it won't error; rather it'll return a NaN. It must now check for NaN,
// and, if found, attempt to sanitize. These checks will convert accounting/comma format to the expected
// format by removing parentheses from negatives and adding a minus sign in the front. It will also
// attempt to remove empty spaces and currency symbols or designations (e.g. $ or USD).
if amount.is_nan() {
let b = sanitize_string_for_d128_parsing_basic(field).parse::<d128>().unwrap();
amount = b;
};
if amount.is_nan() {
let c = sanitize_string_for_d128_parsing_full(field).parse::<d128>().unwrap();
amount = c;
};
if amount.is_nan() {
println!("FATAL: Couldn't convert amount to d128 for transaction:\n{:#?}", record);
std::process::exit(1);
}
let amount_rounded = round_d128_1e8(&amount);
if amount != amount_rounded { changed_action_records += 1; changed_txn_num.push(this_tx_number); }
let action_record = ActionRecord {
account_key,
amount: amount_rounded,
tx_key: this_tx_number,
self_ar_key: this_ar_number,
movements: RefCell::new([].to_vec()),
};
if amount > d128!(0.0) {
incoming_ar = Some(action_record);
incoming_ar_num = Some(this_ar_number);
action_records_map_keys_vec.push(incoming_ar_num.unwrap())
} else {
outgoing_ar = Some(action_record);
outgoing_ar_num = Some(this_ar_number);
action_records_map_keys_vec.insert(0, outgoing_ar_num.unwrap())
};
}
}
// Note: the rust Trait implementation of FromStr for f32 is capable of parsing:
// '3.14'
// '-3.14'
// '2.5E10', or equivalently, '2.5e10'
// '2.5E-10'
// '5.'
// '.5', or, equivalently, '0.5'
// 'inf', '-inf', 'NaN'
// Notable observations from the list:
// (a) scientific notation is accepted
// (b) accounting format (numbers in parens representing negative numbers) is not explicitly accepted
// Additionally notable:
// (a) the decimal separator must be a period
// (b) there can be no commas
// (c) there can be no currency info ($120 or 120USD, etc. will fail to parse)
// In summary, it appears to only allow: (i) numeric chars, (ii) a period, and/or (iii) a minus sign
//
// The Decimal::d128 implementation of FromStr calls into a C library, and that lib hasn't
// been reviewed (by me), but it is thought/hoped to follow similar parsing conventions,
// though there's no guarantee. Nevertheless, the above notes *appear* to hold true for d128.
fn | (field: &str) -> String {
// First, remove commas.
let no_comma_string = field.replace(",", "");
let almost_done = no_comma_string.replace(" ", "");
// Next, if ASCII (better be), check for accounting formatting
if almost_done.is_ascii() {
if almost_done.as_bytes()[0] == "(".as_bytes()[0] {
let half_fixed = almost_done.replace("(", "-");
let negative_with_minus = half_fixed.replace(")", "");
return negative_with_minus
}
}
almost_done
}
fn sanitize_string_for_d128_parsing_full(field: &str) -> String {
let mut near_done = "".to_string();
// First, remove commas.
let no_comma_string = field.replace(",", "");
let almost_done = no_comma_string.replace(" ", "");
// Next, if ASCII (better be), check for accounting formating
if almost_done.is_ascii() {
if almost_done.as_bytes()[0] == "(".as_bytes()[0] {
let half_fixed = almost_done.replace("(", "-");
let negative_with_minus = half_fixed.replace(")", "");
near_done = negative_with_minus;
} else {
near_done = almost_done;
}
} else {
near_done = almost_done;
}
// Strip non-numeric and non-period characters
let all_done: String = near_done.chars()
.filter(|x|
x.is_numeric() |
(x == &(".".as_bytes()[0] as char)) |
(x == &("-".as_bytes()[0] as char)))
.collect();
all_done
}
if let Some(incoming_ar) = incoming_ar {
let x = incoming_ar_num.unwrap();
action_records.insert(x, incoming_ar);
}
if let Some(outgoing_ar) = outgoing_ar {
let y = outgoing_ar_num.unwrap();
action_records.insert(y, outgoing_ar);
}
let format_yy: String;
let format_yyyy: String;
if iso_date_style {
format_yyyy = "%Y".to_owned() + separator + "%m" + separator + "%d";
format_yy = "%y".to_owned() + separator + "%m" + separator + "%d";
} else {
format_yyyy = "%m".to_owned() + separator + "%d" + separator + "%Y";
format_yy = "%m".to_owned() + separator + "%d" + separator + "%y";
}
let tx_date = NaiveDate::parse_from_str(this_tx_date, &format_yy)
.unwrap_or_else(|_| NaiveDate::parse_from_str(this_tx_date, &format_yyyy)
.expect("
FATAL: Transaction date parsing failed. You must tell the program the format of the date in your CSV Input File. The date separator \
is expected to be a hyphen. The dating format is expected to be \"American\" (%m-%d-%y), not ISO 8601 (%y-%m-%d). You may set different \
date format options via command line flag, environment variable or .env file. Perhaps first run with `--help` or see `.env.example.`\n")
);
let transaction = Transaction {
tx_number: this_tx_number,
date_as_string: this_tx_date.to_string(),
date: tx_date,
user_memo: this_memo.to_string(),
proceeds: proceeds_parsed,
action_record_idx_vec: action_records_map_keys_vec,
};
txns_map.insert(this_tx_number, transaction);
};
if changed_action_records > 0 {
println!(" Changed actionrecord amounts due to rounding precision: {}. Changed txn numbers: {:?}.", changed_action_records, changed_txn_num);
}
Ok(())
}
| sanitize_string_for_d128_parsing_basic | identifier_name |
csv_import_accts_txns.rs | // Copyright (c) 2017-2020, scoobybejesus
// Redistributions must include the license: https://github.com/scoobybejesus/cryptools/blob/master/LEGAL.txt
use std::error::Error;
use std::process;
use std::fs::File;
use std::cell::{RefCell};
use std::collections::{HashMap};
use std::path::PathBuf;
use chrono::NaiveDate;
use decimal::d128;
use crate::transaction::{Transaction, ActionRecord};
use crate::account::{Account, RawAccount};
use crate::decimal_utils::{round_d128_1e8};
pub fn import_from_csv(
import_file_path: PathBuf,
iso_date_style: bool,
separator: &String,
raw_acct_map: &mut HashMap<u16, RawAccount>,
acct_map: &mut HashMap<u16, Account>,
action_records: &mut HashMap<u32, ActionRecord>,
transactions_map: &mut HashMap<u32, Transaction>,
) -> Result<(), Box<dyn Error>> {
let file = match File::open(import_file_path) {
Ok(x) => {
// println!("\nCSV ledger file opened successfully.\n");
x
},
Err(e) => {
println!("Invalid import_file_path");
eprintln!("System error: {}", e);
std::process::exit(1);
}
};
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_reader(file);
import_accounts(&mut rdr, raw_acct_map, acct_map)?;
import_transactions(
&mut rdr,
iso_date_style,
&separator,
action_records,
transactions_map,
)?;
Ok(())
}
fn import_accounts(
rdr: &mut csv::Reader<File>,
raw_acct_map: &mut HashMap<u16, RawAccount>,
acct_map: &mut HashMap<u16, Account>,
) -> Result<(), Box<dyn Error>> {
let header1 = rdr.headers()?.clone(); // account_num
let mut header2: csv::StringRecord = csv::StringRecord::new(); // name
let mut header3: csv::StringRecord = csv::StringRecord::new(); // ticker
let header4: csv::StringRecord; // is_margin
// Account Creation loop. With rdr.has_headers() set to true above, the first record here is the second row of the CSV
for result in rdr.records() {
// This initial iteration through records will break after the 4th row, after accounts have been created
let record = result?;
if header2.len() == 0 {
header2 = record.clone();
continue // After header2 is set, continue to next record
}
else if header3.len() == 0 {
header3 = record.clone();
continue // After header3 is set, continue to next record
}
else {
header4 = record.clone();
// println!("Assigned last header, record: {:?}", record);
// A StringRecord doesn't accept the same range indexing needed below, so a Vec of Strings will be used
let headerstrings: Vec<String> = header1.into_iter().map(|field| field.to_string()).collect();
let acct_num_warn = "Transactions will not import correctly if account numbers in the CSV import file aren't
ordered chronologically (i.e., beginning in column 4 - the 1st account column - the value should be 1.
The next column's value should be 2, then 3, etc, until the final account).";
// Header row variables have been set. It's now time to set up the accounts.
println!("\nCreating accounts...");
let length = &headerstrings.len();
for (idx, field) in headerstrings[3..*length].iter().enumerate() {
// Parse account numbers.
let account_num = field.trim().parse::<u16>().expect(&format!("Header row account number should parse into u16: {}", field));
// For now, their columns aren't remembered. Instead, they must have a particular index. 0th idx is the 1st account, and so on.
if account_num != ((idx + 1) as u16) {
println!("FATAL: CSV Import: {}", acct_num_warn);
std::process::exit(1);
}
let ind = idx+3; // Add three because the idx skips the first three 'key' columns
let name:String = header2[ind].trim().to_string();
let ticker:String = header3[ind].trim().to_string(); // no .to_uppercase() b/c margin...
let margin_string = &header4.clone()[ind];
let is_margin:bool = match margin_string.to_lowercase().trim() {
"no" | "non" | "false" => false,
"yes" | "margin" | "true" => true,
_ => {
println!("\n FATAL: CSV Import: Couldn't parse margin value for account {} {} \n",account_num, name);
process::exit(1)
}
};
let just_account: RawAccount = RawAccount {
account_num,
name,
ticker,
is_margin,
};
raw_acct_map.insert(account_num, just_account);
let account: Account = Account {
raw_key: account_num,
list_of_lots: RefCell::new([].to_vec())
};
acct_map.insert(account_num, account);
}
break // This `break` exits this scope so `accounts` can be accessed in `import_transactions`. The rdr stays put.
}
};
Ok(())
}
fn import_transactions(
rdr: &mut csv::Reader<File>,
iso_date_style: bool,
separator: &String,
action_records: &mut HashMap<u32, ActionRecord>,
txns_map: &mut HashMap<u32, Transaction>,
) -> Result<(), Box<dyn Error>> {
let mut this_tx_number = 0;
let mut this_ar_number = 0;
let mut changed_action_records = 0;
let mut changed_txn_num = Vec::new();
println!("Creating transactions...");
for result in rdr.records() {
// rdr's cursor is at row 5, which is the first transaction row
let record = result?;
this_tx_number += 1;
// First, initialize metadata fields.
let mut this_tx_date: &str = "";
let mut this_proceeds: &str;
let mut this_memo: &str = "";
let mut proceeds_parsed = 0f32;
// Next, create action_records.
let mut action_records_map_keys_vec: Vec<u32> = Vec::with_capacity(2);
let mut outgoing_ar: Option<ActionRecord> = None;
let mut incoming_ar: Option<ActionRecord> = None;
let mut outgoing_ar_num: Option<u32> = None;
let mut incoming_ar_num: Option<u32> = None;
for (idx, field) in record.iter().enumerate() {
// Set metadata fields on first three fields.
if idx == 0 { this_tx_date = field; }
else if idx == 1 {
let no_comma_string = field.replace(",", "");
proceeds_parsed = no_comma_string.parse::<f32>()?;
}
else if idx == 2 { this_memo = field; }
// Check for empty strings. If not empty, it's a value for an action_record.
else if field != "" {
this_ar_number += 1;
let ind = idx; // starts at 3, which is the fourth field
let acct_idx = ind - 2; // acct_num and acct_key would be idx + 1, so subtract 2 from ind to get 1
let account_key = acct_idx as u16;
let amount_str = field.replace(",", "");
let mut amount = amount_str.parse::<d128>().unwrap();
// When parsing to a d128, it won't error; rather it'll return a NaN. It must now check for NaN,
// and, if found, attempt to sanitize. These checks will convert accounting/comma format to the expected
// format by removing parentheses from negatives and adding a minus sign in the front. It will also
// attempt to remove empty spaces and currency symbols or designations (e.g. $ or USD).
if amount.is_nan() {
let b = sanitize_string_for_d128_parsing_basic(field).parse::<d128>().unwrap();
amount = b;
};
if amount.is_nan() {
let c = sanitize_string_for_d128_parsing_full(field).parse::<d128>().unwrap();
amount = c;
};
if amount.is_nan() {
println!("FATAL: Couldn't convert amount to d128 for transaction:\n{:#?}", record);
std::process::exit(1);
}
let amount_rounded = round_d128_1e8(&amount);
if amount != amount_rounded { changed_action_records += 1; changed_txn_num.push(this_tx_number); }
let action_record = ActionRecord {
account_key,
amount: amount_rounded,
tx_key: this_tx_number,
self_ar_key: this_ar_number,
movements: RefCell::new([].to_vec()),
};
if amount > d128!(0.0) {
incoming_ar = Some(action_record);
incoming_ar_num = Some(this_ar_number);
action_records_map_keys_vec.push(incoming_ar_num.unwrap())
} else {
outgoing_ar = Some(action_record);
outgoing_ar_num = Some(this_ar_number);
action_records_map_keys_vec.insert(0, outgoing_ar_num.unwrap())
};
}
}
// Note: the rust Trait implementation of FromStr for f32 is capable of parsing:
// '3.14'
// '-3.14'
// '2.5E10', or equivalently, '2.5e10'
// '2.5E-10'
// '5.'
// '.5', or, equivalently, '0.5'
// 'inf', '-inf', 'NaN'
// Notable observations from the list:
// (a) scientific notation is accepted
// (b) accounting format (numbers in parens representing negative numbers) is not explicitly accepted
// Additionally notable:
// (a) the decimal separator must be a period
// (b) there can be no commas
// (c) there can be no currency info ($120 or 120USD, etc. will fail to parse)
// In summary, it appears to only allow: (i) numeric chars, (ii) a period, and/or (iii) a minus sign
//
// The Decimal::d128 implementation of FromStr calls into a C library, and that lib hasn't
// been reviewed (by me), but it is thought/hoped to follow similar parsing conventions,
// though there's no guarantee. Nevertheless, the above notes *appear* to hold true for d128.
fn sanitize_string_for_d128_parsing_basic(field: &str) -> String {
// First, remove commas.
let no_comma_string = field.replace(",", "");
let almost_done = no_comma_string.replace(" ", "");
// Next, if ASCII (better be), check for accounting formatting
if almost_done.is_ascii() {
if almost_done.as_bytes()[0] == "(".as_bytes()[0] {
let half_fixed = almost_done.replace("(", "-");
let negative_with_minus = half_fixed.replace(")", "");
return negative_with_minus
}
}
almost_done
}
fn sanitize_string_for_d128_parsing_full(field: &str) -> String |
if let Some(incoming_ar) = incoming_ar {
let x = incoming_ar_num.unwrap();
action_records.insert(x, incoming_ar);
}
if let Some(outgoing_ar) = outgoing_ar {
let y = outgoing_ar_num.unwrap();
action_records.insert(y, outgoing_ar);
}
let format_yy: String;
let format_yyyy: String;
if iso_date_style {
format_yyyy = "%Y".to_owned() + separator + "%m" + separator + "%d";
format_yy = "%y".to_owned() + separator + "%m" + separator + "%d";
} else {
format_yyyy = "%m".to_owned() + separator + "%d" + separator + "%Y";
format_yy = "%m".to_owned() + separator + "%d" + separator + "%y";
}
let tx_date = NaiveDate::parse_from_str(this_tx_date, &format_yy)
.unwrap_or_else(|_| NaiveDate::parse_from_str(this_tx_date, &format_yyyy)
.expect("
FATAL: Transaction date parsing failed. You must tell the program the format of the date in your CSV Input File. The date separator \
is expected to be a hyphen. The dating format is expected to be \"American\" (%m-%d-%y), not ISO 8601 (%y-%m-%d). You may set different \
date format options via command line flag, environment variable or .env file. Perhaps first run with `--help` or see `.env.example.`\n")
);
let transaction = Transaction {
tx_number: this_tx_number,
date_as_string: this_tx_date.to_string(),
date: tx_date,
user_memo: this_memo.to_string(),
proceeds: proceeds_parsed,
action_record_idx_vec: action_records_map_keys_vec,
};
txns_map.insert(this_tx_number, transaction);
};
if changed_action_records > 0 {
println!(" Changed actionrecord amounts due to rounding precision: {}. Changed txn numbers: {:?}.", changed_action_records, changed_txn_num);
}
Ok(())
}
| {
let mut near_done = "".to_string();
// First, remove commas.
let no_comma_string = field.replace(",", "");
let almost_done = no_comma_string.replace(" ", "");
// Next, if ASCII (better be), check for accounting formating
if almost_done.is_ascii() {
if almost_done.as_bytes()[0] == "(".as_bytes()[0] {
let half_fixed = almost_done.replace("(", "-");
let negative_with_minus = half_fixed.replace(")", "");
near_done = negative_with_minus;
} else {
near_done = almost_done;
}
} else {
near_done = almost_done;
}
// Strip non-numeric and non-period characters
let all_done: String = near_done.chars()
.filter(|x|
x.is_numeric() |
(x == &(".".as_bytes()[0] as char)) |
(x == &("-".as_bytes()[0] as char)))
.collect();
all_done
} | identifier_body |
csv_import_accts_txns.rs | // Copyright (c) 2017-2020, scoobybejesus
// Redistributions must include the license: https://github.com/scoobybejesus/cryptools/blob/master/LEGAL.txt
use std::error::Error;
use std::process;
use std::fs::File;
use std::cell::{RefCell};
use std::collections::{HashMap};
use std::path::PathBuf;
use chrono::NaiveDate;
use decimal::d128;
use crate::transaction::{Transaction, ActionRecord};
use crate::account::{Account, RawAccount};
use crate::decimal_utils::{round_d128_1e8};
pub fn import_from_csv(
import_file_path: PathBuf,
iso_date_style: bool,
separator: &String,
raw_acct_map: &mut HashMap<u16, RawAccount>,
acct_map: &mut HashMap<u16, Account>,
action_records: &mut HashMap<u32, ActionRecord>,
transactions_map: &mut HashMap<u32, Transaction>,
) -> Result<(), Box<dyn Error>> {
let file = match File::open(import_file_path) {
Ok(x) => {
// println!("\nCSV ledger file opened successfully.\n");
x
},
Err(e) => {
println!("Invalid import_file_path");
eprintln!("System error: {}", e);
std::process::exit(1);
}
};
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_reader(file);
import_accounts(&mut rdr, raw_acct_map, acct_map)?;
import_transactions(
&mut rdr,
iso_date_style,
&separator,
action_records,
transactions_map,
)?;
Ok(())
}
fn import_accounts(
rdr: &mut csv::Reader<File>,
raw_acct_map: &mut HashMap<u16, RawAccount>,
acct_map: &mut HashMap<u16, Account>,
) -> Result<(), Box<dyn Error>> {
let header1 = rdr.headers()?.clone(); // account_num
let mut header2: csv::StringRecord = csv::StringRecord::new(); // name
let mut header3: csv::StringRecord = csv::StringRecord::new(); // ticker
let header4: csv::StringRecord; // is_margin
// Account Creation loop. With rdr.has_headers() set to true above, the first record here is the second row of the CSV
for result in rdr.records() {
// This initial iteration through records will break after the 4th row, after accounts have been created
let record = result?;
if header2.len() == 0 {
header2 = record.clone();
continue // After header2 is set, continue to next record
}
else if header3.len() == 0 {
header3 = record.clone();
continue // After header3 is set, continue to next record
}
else {
header4 = record.clone();
// println!("Assigned last header, record: {:?}", record);
// A StringRecord doesn't accept the same range indexing needed below, so a Vec of Strings will be used
let headerstrings: Vec<String> = header1.into_iter().map(|field| field.to_string()).collect();
let acct_num_warn = "Transactions will not import correctly if account numbers in the CSV import file aren't
ordered chronologically (i.e., beginning in column 4 - the 1st account column - the value should be 1.
The next column's value should be 2, then 3, etc, until the final account).";
// Header row variables have been set. It's now time to set up the accounts.
println!("\nCreating accounts...");
let length = &headerstrings.len();
for (idx, field) in headerstrings[3..*length].iter().enumerate() {
// Parse account numbers.
let account_num = field.trim().parse::<u16>().expect(&format!("Header row account number should parse into u16: {}", field));
// For now, their columns aren't remembered. Instead, they must have a particular index. 0th idx is the 1st account, and so on.
if account_num != ((idx + 1) as u16) {
println!("FATAL: CSV Import: {}", acct_num_warn);
std::process::exit(1);
}
let ind = idx+3; // Add three because the idx skips the first three 'key' columns
let name:String = header2[ind].trim().to_string();
let ticker:String = header3[ind].trim().to_string(); // no .to_uppercase() b/c margin...
let margin_string = &header4.clone()[ind];
let is_margin:bool = match margin_string.to_lowercase().trim() {
"no" | "non" | "false" => false,
"yes" | "margin" | "true" => true,
_ => {
println!("\n FATAL: CSV Import: Couldn't parse margin value for account {} {} \n",account_num, name);
process::exit(1)
}
};
let just_account: RawAccount = RawAccount {
account_num,
name,
ticker,
is_margin,
};
raw_acct_map.insert(account_num, just_account);
let account: Account = Account {
raw_key: account_num,
list_of_lots: RefCell::new([].to_vec())
};
acct_map.insert(account_num, account);
}
break // This `break` exits this scope so `accounts` can be accessed in `import_transactions`. The rdr stays put.
}
};
Ok(())
}
fn import_transactions(
rdr: &mut csv::Reader<File>,
iso_date_style: bool,
separator: &String,
action_records: &mut HashMap<u32, ActionRecord>,
txns_map: &mut HashMap<u32, Transaction>,
) -> Result<(), Box<dyn Error>> {
let mut this_tx_number = 0;
let mut this_ar_number = 0;
let mut changed_action_records = 0;
let mut changed_txn_num = Vec::new();
println!("Creating transactions...");
for result in rdr.records() {
// rdr's cursor is at row 5, which is the first transaction row
let record = result?;
this_tx_number += 1;
// First, initialize metadata fields.
let mut this_tx_date: &str = "";
let mut this_proceeds: &str;
let mut this_memo: &str = "";
let mut proceeds_parsed = 0f32;
// Next, create action_records.
let mut action_records_map_keys_vec: Vec<u32> = Vec::with_capacity(2);
let mut outgoing_ar: Option<ActionRecord> = None;
let mut incoming_ar: Option<ActionRecord> = None;
let mut outgoing_ar_num: Option<u32> = None;
let mut incoming_ar_num: Option<u32> = None;
for (idx, field) in record.iter().enumerate() {
// Set metadata fields on first three fields.
if idx == 0 { this_tx_date = field; }
else if idx == 1 {
let no_comma_string = field.replace(",", "");
proceeds_parsed = no_comma_string.parse::<f32>()?;
}
else if idx == 2 { this_memo = field; }
// Check for empty strings. If not empty, it's a value for an action_record.
else if field != "" {
this_ar_number += 1;
let ind = idx; // starts at 3, which is the fourth field
let acct_idx = ind - 2; // acct_num and acct_key would be idx + 1, so subtract 2 from ind to get 1
let account_key = acct_idx as u16;
let amount_str = field.replace(",", "");
let mut amount = amount_str.parse::<d128>().unwrap();
// When parsing to a d128, it won't error; rather it'll return a NaN. It must now check for NaN,
// and, if found, attempt to sanitize. These checks will convert accounting/comma format to the expected
// format by removing parentheses from negatives and adding a minus sign in the front. It will also
// attempt to remove empty spaces and currency symbols or designations (e.g. $ or USD).
if amount.is_nan() {
let b = sanitize_string_for_d128_parsing_basic(field).parse::<d128>().unwrap();
amount = b;
};
if amount.is_nan() | ;
if amount.is_nan() {
println!("FATAL: Couldn't convert amount to d128 for transaction:\n{:#?}", record);
std::process::exit(1);
}
let amount_rounded = round_d128_1e8(&amount);
if amount != amount_rounded { changed_action_records += 1; changed_txn_num.push(this_tx_number); }
let action_record = ActionRecord {
account_key,
amount: amount_rounded,
tx_key: this_tx_number,
self_ar_key: this_ar_number,
movements: RefCell::new([].to_vec()),
};
if amount > d128!(0.0) {
incoming_ar = Some(action_record);
incoming_ar_num = Some(this_ar_number);
action_records_map_keys_vec.push(incoming_ar_num.unwrap())
} else {
outgoing_ar = Some(action_record);
outgoing_ar_num = Some(this_ar_number);
action_records_map_keys_vec.insert(0, outgoing_ar_num.unwrap())
};
}
}
// Note: the rust Trait implementation of FromStr for f32 is capable of parsing:
// '3.14'
// '-3.14'
// '2.5E10', or equivalently, '2.5e10'
// '2.5E-10'
// '5.'
// '.5', or, equivalently, '0.5'
// 'inf', '-inf', 'NaN'
// Notable observations from the list:
// (a) scientific notation is accepted
// (b) accounting format (numbers in parens representing negative numbers) is not explicitly accepted
// Additionally notable:
// (a) the decimal separator must be a period
// (b) there can be no commas
// (c) there can be no currency info ($120 or 120USD, etc. will fail to parse)
// In summary, it appears to only allow: (i) numeric chars, (ii) a period, and/or (iii) a minus sign
//
// The Decimal::d128 implementation of FromStr calls into a C library, and that lib hasn't
// been reviewed (by me), but it is thought/hoped to follow similar parsing conventions,
// though there's no guarantee. Nevertheless, the above notes *appear* to hold true for d128.
fn sanitize_string_for_d128_parsing_basic(field: &str) -> String {
// First, remove commas.
let no_comma_string = field.replace(",", "");
let almost_done = no_comma_string.replace(" ", "");
// Next, if ASCII (better be), check for accounting formatting
if almost_done.is_ascii() {
if almost_done.as_bytes()[0] == "(".as_bytes()[0] {
let half_fixed = almost_done.replace("(", "-");
let negative_with_minus = half_fixed.replace(")", "");
return negative_with_minus
}
}
almost_done
}
fn sanitize_string_for_d128_parsing_full(field: &str) -> String {
let mut near_done = "".to_string();
// First, remove commas.
let no_comma_string = field.replace(",", "");
let almost_done = no_comma_string.replace(" ", "");
// Next, if ASCII (better be), check for accounting formating
if almost_done.is_ascii() {
if almost_done.as_bytes()[0] == "(".as_bytes()[0] {
let half_fixed = almost_done.replace("(", "-");
let negative_with_minus = half_fixed.replace(")", "");
near_done = negative_with_minus;
} else {
near_done = almost_done;
}
} else {
near_done = almost_done;
}
// Strip non-numeric and non-period characters
let all_done: String = near_done.chars()
.filter(|x|
x.is_numeric() |
(x == &(".".as_bytes()[0] as char)) |
(x == &("-".as_bytes()[0] as char)))
.collect();
all_done
}
if let Some(incoming_ar) = incoming_ar {
let x = incoming_ar_num.unwrap();
action_records.insert(x, incoming_ar);
}
if let Some(outgoing_ar) = outgoing_ar {
let y = outgoing_ar_num.unwrap();
action_records.insert(y, outgoing_ar);
}
let format_yy: String;
let format_yyyy: String;
if iso_date_style {
format_yyyy = "%Y".to_owned() + separator + "%m" + separator + "%d";
format_yy = "%y".to_owned() + separator + "%m" + separator + "%d";
} else {
format_yyyy = "%m".to_owned() + separator + "%d" + separator + "%Y";
format_yy = "%m".to_owned() + separator + "%d" + separator + "%y";
}
let tx_date = NaiveDate::parse_from_str(this_tx_date, &format_yy)
.unwrap_or_else(|_| NaiveDate::parse_from_str(this_tx_date, &format_yyyy)
.expect("
FATAL: Transaction date parsing failed. You must tell the program the format of the date in your CSV Input File. The date separator \
is expected to be a hyphen. The dating format is expected to be \"American\" (%m-%d-%y), not ISO 8601 (%y-%m-%d). You may set different \
date format options via command line flag, environment variable or .env file. Perhaps first run with `--help` or see `.env.example.`\n")
);
let transaction = Transaction {
tx_number: this_tx_number,
date_as_string: this_tx_date.to_string(),
date: tx_date,
user_memo: this_memo.to_string(),
proceeds: proceeds_parsed,
action_record_idx_vec: action_records_map_keys_vec,
};
txns_map.insert(this_tx_number, transaction);
};
if changed_action_records > 0 {
println!(" Changed actionrecord amounts due to rounding precision: {}. Changed txn numbers: {:?}.", changed_action_records, changed_txn_num);
}
Ok(())
}
| {
let c = sanitize_string_for_d128_parsing_full(field).parse::<d128>().unwrap();
amount = c;
} | conditional_block |
servergroup.go | package servergroup
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"path"
"strings"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/storage/remote"
"github.com/sirupsen/logrus"
"github.com/jacksontj/promxy/pkg/middleware"
"github.com/jacksontj/promxy/pkg/promclient"
// sd_config "github.com/prometheus/prometheus/discovery/config"
)
var (
// TODO: have a marker for "which" servergroup
serverGroupSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "server_group_request_duration_seconds",
Help: "Summary of calls to servergroup instances",
}, []string{"host", "call", "status"})
)
func init() {
prometheus.MustRegister(serverGroupSummary)
}
// New creates a new servergroup
func NewServerGroup() (*ServerGroup, error) {
ctx, ctxCancel := context.WithCancel(context.Background())
// Create the targetSet (which will maintain all of the updating etc. in the background)
sg := &ServerGroup{
ctx: ctx,
ctxCancel: ctxCancel,
Ready: make(chan struct{}),
}
logCfg := &promlog.Config{
Level: &promlog.AllowedLevel{},
Format: &promlog.AllowedFormat{},
}
if err := logCfg.Level.Set("info"); err != nil {
return nil, err
}
sg.targetManager = discovery.NewManager(ctx, promlog.New(logCfg))
// Background the updating
go sg.targetManager.Run()
go sg.Sync()
return sg, nil
}
// ServerGroupState encapsulates the state of a serverGroup from service discovery
type ServerGroupState struct {
// Targets is the list of target URLs for this discovery round
Targets []string
apiClient promclient.API
ctx context.Context
ctxCancel context.CancelFunc
}
// ServerGroup encapsulates a set of prometheus downstreams to query/aggregate
type ServerGroup struct {
ctx context.Context
ctxCancel context.CancelFunc
loaded bool
Ready chan struct{}
// TODO: lock/atomics on cfg and client
Cfg *Config
client *http.Client
targetManager *discovery.Manager
OriginalURLs []string
state atomic.Value
}
// Cancel stops backround processes (e.g. discovery manager)
func (s *ServerGroup) Cancel() {
s.ctxCancel()
}
// RoundTrip allows us to intercept and mutate downstream HTTP requests at the transport level
func (s *ServerGroup) RoundTrip(r *http.Request) (*http.Response, error) {
for k, v := range middleware.GetHeaders(r.Context()) {
r.Header.Set(k, v)
}
return s.client.Transport.RoundTrip(r)
}
// Sync updates the targets from our discovery manager
func (s *ServerGroup) Sync() {
syncCh := s.targetManager.SyncCh()
for {
select {
case <-s.ctx.Done():
return
case targetGroupMap := <-syncCh:
logrus.Debug("Updating targets from discovery manager")
// TODO: retry and error handling
err := s.loadTargetGroupMap(targetGroupMap)
for err != nil {
logrus.Errorf("Error loading servergroup, retrying: %v", err)
// TODO: configurable backoff
select {
case <-time.After(time.Second):
err = s.loadTargetGroupMap(targetGroupMap)
case <-s.ctx.Done():
return
}
}
}
}
}
func (s *ServerGroup) loadTargetGroupMap(targetGroupMap map[string][]*targetgroup.Group) (err error) {
targets := make([]string, 0)
apiClients := make([]promclient.API, 0)
ctx, ctxCancel := context.WithCancel(context.Background())
defer func() {
if err != nil {
ctxCancel()
}
}()
for _, targetGroupList := range targetGroupMap {
for _, targetGroup := range targetGroupList {
for _, target := range targetGroup.Targets {
lbls := make([]labels.Label, 0, len(target)+len(targetGroup.Labels)+2)
for ln, lv := range target {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
}
for ln, lv := range targetGroup.Labels {
if _, ok := target[ln]; !ok {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
}
}
lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: string(s.Cfg.Scheme)})
lbls = append(lbls, labels.Label{Name: PathPrefixLabel, Value: string(s.Cfg.PathPrefix)})
lset := labels.New(lbls...)
logrus.Tracef("Potential target pre-relabel: %v", lset)
lset = relabel.Process(lset, s.Cfg.RelabelConfigs...)
logrus.Tracef("Potential target post-relabel: %v", lset)
// Check if the target was dropped, if so we skip it
if len(lset) == 0 {
continue
}
// If there is no address, then we can't use this set of targets
if v := lset.Get(model.AddressLabel); v == "" {
return fmt.Errorf("discovery target is missing address label: %v", lset)
}
u := &url.URL{
Scheme: lset.Get(model.SchemeLabel),
Host: lset.Get(model.AddressLabel),
Path: lset.Get(PathPrefixLabel),
}
targets = append(targets, u.Host)
client, err := api.NewClient(api.Config{Address: u.String(), RoundTripper: s})
if err != nil {
return err
}
if len(s.Cfg.QueryParams) > 0 {
client = promclient.NewClientArgsWrap(client, s.Cfg.QueryParams)
}
var apiClient promclient.API
apiClient = &promclient.PromAPIV1{v1.NewAPI(client)}
// If debug logging is enabled, wrap the client with a debugAPI client
// Since these are called in the reverse order of what we add, we want
// to make sure that this is the first wrap of the client
if logrus.GetLevel() >= logrus.DebugLevel {
apiClient = &promclient.DebugAPI{apiClient, u.String()}
}
if s.Cfg.RemoteRead {
u.Path = path.Join(u.Path, s.Cfg.RemoteReadPath)
cfg := &remote.ClientConfig{
URL: &config_util.URL{u},
HTTPClientConfig: s.Cfg.HTTPConfig.HTTPConfig,
Timeout: model.Duration(time.Minute * 2),
}
remoteStorageClient, err := remote.NewReadClient("foo", cfg)
if err != nil {
return err
}
apiClient = &promclient.PromAPIRemoteRead{apiClient, remoteStorageClient}
}
// Optionally add time range layers
if s.Cfg.AbsoluteTimeRangeConfig != nil {
apiClient = &promclient.AbsoluteTimeFilter{
API: apiClient,
Start: s.Cfg.AbsoluteTimeRangeConfig.Start,
End: s.Cfg.AbsoluteTimeRangeConfig.End,
Truncate: s.Cfg.AbsoluteTimeRangeConfig.Truncate,
}
}
if s.Cfg.RelativeTimeRangeConfig != nil {
apiClient = &promclient.RelativeTimeFilter{
API: apiClient,
Start: s.Cfg.RelativeTimeRangeConfig.Start,
End: s.Cfg.RelativeTimeRangeConfig.End,
Truncate: s.Cfg.RelativeTimeRangeConfig.Truncate,
}
}
// We remove all private labels after we set the target entry
modelLabelSet := make(model.LabelSet, len(lset))
for _, lbl := range lset {
if !strings.HasPrefix(string(lbl.Name), model.ReservedLabelPrefix) {
modelLabelSet[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value)
}
}
// Add labels
apiClient = &promclient.AddLabelClient{apiClient, modelLabelSet.Merge(s.Cfg.Labels)}
// Add MetricRelabel if set
if len(s.Cfg.MetricsRelabelConfigs) > 0 {
tmp, err := promclient.NewMetricsRelabelClient(apiClient, s.Cfg.MetricsRelabelConfigs)
if err != nil {
return err
}
apiClient = tmp
}
// Add LabelFilter if configured
if s.Cfg.LabelFilterConfig != nil {
apiClient, err = promclient.NewLabelFilterClient(ctx, apiClient, s.Cfg.LabelFilterConfig)
if err != nil |
}
apiClients = append(apiClients, apiClient)
}
}
}
apiClientMetricFunc := func(i int, api, status string, took float64) {
serverGroupSummary.WithLabelValues(targets[i], api, status).Observe(took)
}
logrus.Debugf("Updating targets from discovery manager: %v", targets)
apiClient, err := promclient.NewMultiAPI(apiClients, s.Cfg.GetAntiAffinity(), apiClientMetricFunc, 1, s.Cfg.GetPreferMax())
if err != nil {
return err
}
newState := &ServerGroupState{
Targets: targets,
apiClient: apiClient,
ctx: ctx,
ctxCancel: ctxCancel,
}
if s.Cfg.IgnoreError {
newState.apiClient = &promclient.IgnoreErrorAPI{newState.apiClient}
}
oldState := s.State() // Fetch the current state (so we can stop it)
s.state.Store(newState) // Store new state
if oldState != nil {
oldState.ctxCancel() // Cancel the old state
}
if !s.loaded {
s.loaded = true
close(s.Ready)
}
return nil
}
// ApplyConfig applies new configuration to the ServerGroup
// TODO: move config + client into state object to be swapped with atomics
func (s *ServerGroup) ApplyConfig(cfg *Config) error {
s.Cfg = cfg
// Copy/paste from upstream prometheus/common until https://github.com/prometheus/common/issues/144 is resolved
tlsConfig, err := config_util.NewTLSConfig(&cfg.HTTPConfig.HTTPConfig.TLSConfig)
if err != nil {
return errors.Wrap(err, "error loading TLS client config")
}
// The only timeout we care about is the configured scrape timeout.
// It is applied on request. So we leave out any timings here.
var rt http.RoundTripper = &http.Transport{
Proxy: http.ProxyURL(cfg.HTTPConfig.HTTPConfig.ProxyURL.URL),
MaxIdleConns: 20000,
MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
DisableKeepAlives: false,
TLSClientConfig: tlsConfig,
// 5 minutes is typically above the maximum sane scrape interval. So we can
// use keepalive for all configurations.
IdleConnTimeout: 5 * time.Minute,
DialContext: (&net.Dialer{Timeout: cfg.HTTPConfig.DialTimeout}).DialContext,
ResponseHeaderTimeout: cfg.Timeout,
}
// If a bearer token is provided, create a round tripper that will set the
// Authorization header correctly on each request.
if len(cfg.HTTPConfig.HTTPConfig.BearerToken) > 0 {
rt = config_util.NewAuthorizationCredentialsRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerToken, rt)
} else if len(cfg.HTTPConfig.HTTPConfig.BearerTokenFile) > 0 {
rt = config_util.NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerTokenFile, rt)
}
if cfg.HTTPConfig.HTTPConfig.BasicAuth != nil {
rt = config_util.NewBasicAuthRoundTripper(cfg.HTTPConfig.HTTPConfig.BasicAuth.Username, cfg.HTTPConfig.HTTPConfig.BasicAuth.Password, cfg.HTTPConfig.HTTPConfig.BasicAuth.PasswordFile, rt)
}
s.client = &http.Client{Transport: rt}
if err := s.targetManager.ApplyConfig(map[string]discovery.Configs{"foo": cfg.ServiceDiscoveryConfigs}); err != nil {
return err
}
return nil
}
// State returns the current ServerGroupState
func (s *ServerGroup) State() *ServerGroupState {
tmp := s.state.Load()
if ret, ok := tmp.(*ServerGroupState); ok {
return ret
}
return nil
}
// GetValue loads the raw data for a given set of matchers in the time range
func (s *ServerGroup) GetValue(ctx context.Context, start, end time.Time, matchers []*labels.Matcher) (model.Value, v1.Warnings, error) {
return s.State().apiClient.GetValue(ctx, start, end, matchers)
}
// Query performs a query for the given time.
func (s *ServerGroup) Query(ctx context.Context, query string, ts time.Time) (model.Value, v1.Warnings, error) {
return s.State().apiClient.Query(ctx, query, ts)
}
// QueryRange performs a query for the given range.
func (s *ServerGroup) QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) {
return s.State().apiClient.QueryRange(ctx, query, r)
}
// LabelValues performs a query for the values of the given label.
func (s *ServerGroup) LabelValues(ctx context.Context, label string, matchers []string, startTime time.Time, endTime time.Time) (model.LabelValues, v1.Warnings, error) {
return s.State().apiClient.LabelValues(ctx, label, matchers, startTime, endTime)
}
// LabelNames returns all the unique label names present in the block in sorted order.
func (s *ServerGroup) LabelNames(ctx context.Context, matchers []string, startTime time.Time, endTime time.Time) ([]string, v1.Warnings, error) {
return s.State().apiClient.LabelNames(ctx, matchers, startTime, endTime)
}
// Series finds series by label matchers.
func (s *ServerGroup) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, v1.Warnings, error) {
return s.State().apiClient.Series(ctx, matches, startTime, endTime)
}
// Metadata returns metadata about metrics currently scraped by the metric name.
func (s *ServerGroup) Metadata(ctx context.Context, metric, limit string) (map[string][]v1.Metadata, error) {
return s.State().apiClient.Metadata(ctx, metric, limit)
}
| {
return err
} | conditional_block |
servergroup.go | package servergroup
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"path"
"strings"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/storage/remote"
"github.com/sirupsen/logrus"
"github.com/jacksontj/promxy/pkg/middleware"
"github.com/jacksontj/promxy/pkg/promclient"
// sd_config "github.com/prometheus/prometheus/discovery/config"
)
var (
// TODO: have a marker for "which" servergroup
serverGroupSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "server_group_request_duration_seconds",
Help: "Summary of calls to servergroup instances",
}, []string{"host", "call", "status"})
)
func init() {
prometheus.MustRegister(serverGroupSummary)
}
// New creates a new servergroup
func NewServerGroup() (*ServerGroup, error) {
ctx, ctxCancel := context.WithCancel(context.Background())
// Create the targetSet (which will maintain all of the updating etc. in the background)
sg := &ServerGroup{
ctx: ctx,
ctxCancel: ctxCancel,
Ready: make(chan struct{}),
}
logCfg := &promlog.Config{
Level: &promlog.AllowedLevel{},
Format: &promlog.AllowedFormat{},
}
if err := logCfg.Level.Set("info"); err != nil {
return nil, err
}
sg.targetManager = discovery.NewManager(ctx, promlog.New(logCfg))
// Background the updating
go sg.targetManager.Run()
go sg.Sync()
return sg, nil
}
// ServerGroupState encapsulates the state of a serverGroup from service discovery
type ServerGroupState struct {
// Targets is the list of target URLs for this discovery round
Targets []string
apiClient promclient.API
ctx context.Context
ctxCancel context.CancelFunc
}
// ServerGroup encapsulates a set of prometheus downstreams to query/aggregate
type ServerGroup struct {
ctx context.Context
ctxCancel context.CancelFunc
loaded bool
Ready chan struct{}
// TODO: lock/atomics on cfg and client
Cfg *Config
client *http.Client
targetManager *discovery.Manager
OriginalURLs []string
state atomic.Value
}
// Cancel stops backround processes (e.g. discovery manager)
func (s *ServerGroup) Cancel() {
s.ctxCancel()
}
// RoundTrip allows us to intercept and mutate downstream HTTP requests at the transport level
func (s *ServerGroup) RoundTrip(r *http.Request) (*http.Response, error) {
for k, v := range middleware.GetHeaders(r.Context()) {
r.Header.Set(k, v)
}
return s.client.Transport.RoundTrip(r)
}
// Sync updates the targets from our discovery manager
func (s *ServerGroup) Sync() {
syncCh := s.targetManager.SyncCh()
for {
select {
case <-s.ctx.Done():
return
case targetGroupMap := <-syncCh:
logrus.Debug("Updating targets from discovery manager")
// TODO: retry and error handling
err := s.loadTargetGroupMap(targetGroupMap)
for err != nil {
logrus.Errorf("Error loading servergroup, retrying: %v", err)
// TODO: configurable backoff
select {
case <-time.After(time.Second):
err = s.loadTargetGroupMap(targetGroupMap)
case <-s.ctx.Done():
return
}
}
}
}
}
func (s *ServerGroup) loadTargetGroupMap(targetGroupMap map[string][]*targetgroup.Group) (err error) {
targets := make([]string, 0)
apiClients := make([]promclient.API, 0)
ctx, ctxCancel := context.WithCancel(context.Background())
defer func() {
if err != nil {
ctxCancel()
}
}()
for _, targetGroupList := range targetGroupMap {
for _, targetGroup := range targetGroupList {
for _, target := range targetGroup.Targets {
lbls := make([]labels.Label, 0, len(target)+len(targetGroup.Labels)+2)
for ln, lv := range target {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
}
for ln, lv := range targetGroup.Labels {
if _, ok := target[ln]; !ok {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
}
}
lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: string(s.Cfg.Scheme)})
lbls = append(lbls, labels.Label{Name: PathPrefixLabel, Value: string(s.Cfg.PathPrefix)})
lset := labels.New(lbls...)
logrus.Tracef("Potential target pre-relabel: %v", lset)
lset = relabel.Process(lset, s.Cfg.RelabelConfigs...)
logrus.Tracef("Potential target post-relabel: %v", lset)
// Check if the target was dropped, if so we skip it
if len(lset) == 0 {
continue
}
// If there is no address, then we can't use this set of targets
if v := lset.Get(model.AddressLabel); v == "" {
return fmt.Errorf("discovery target is missing address label: %v", lset)
}
u := &url.URL{
Scheme: lset.Get(model.SchemeLabel),
Host: lset.Get(model.AddressLabel),
Path: lset.Get(PathPrefixLabel),
}
targets = append(targets, u.Host)
client, err := api.NewClient(api.Config{Address: u.String(), RoundTripper: s})
if err != nil {
return err
}
if len(s.Cfg.QueryParams) > 0 {
client = promclient.NewClientArgsWrap(client, s.Cfg.QueryParams)
}
var apiClient promclient.API
apiClient = &promclient.PromAPIV1{v1.NewAPI(client)}
// If debug logging is enabled, wrap the client with a debugAPI client
// Since these are called in the reverse order of what we add, we want
// to make sure that this is the first wrap of the client
if logrus.GetLevel() >= logrus.DebugLevel {
apiClient = &promclient.DebugAPI{apiClient, u.String()}
}
if s.Cfg.RemoteRead {
u.Path = path.Join(u.Path, s.Cfg.RemoteReadPath)
cfg := &remote.ClientConfig{
URL: &config_util.URL{u},
HTTPClientConfig: s.Cfg.HTTPConfig.HTTPConfig,
Timeout: model.Duration(time.Minute * 2),
}
remoteStorageClient, err := remote.NewReadClient("foo", cfg)
if err != nil {
return err
}
apiClient = &promclient.PromAPIRemoteRead{apiClient, remoteStorageClient}
}
// Optionally add time range layers
if s.Cfg.AbsoluteTimeRangeConfig != nil {
apiClient = &promclient.AbsoluteTimeFilter{
API: apiClient,
Start: s.Cfg.AbsoluteTimeRangeConfig.Start,
End: s.Cfg.AbsoluteTimeRangeConfig.End,
Truncate: s.Cfg.AbsoluteTimeRangeConfig.Truncate,
}
}
if s.Cfg.RelativeTimeRangeConfig != nil {
apiClient = &promclient.RelativeTimeFilter{
API: apiClient,
Start: s.Cfg.RelativeTimeRangeConfig.Start,
End: s.Cfg.RelativeTimeRangeConfig.End,
Truncate: s.Cfg.RelativeTimeRangeConfig.Truncate,
}
}
// We remove all private labels after we set the target entry
modelLabelSet := make(model.LabelSet, len(lset))
for _, lbl := range lset {
if !strings.HasPrefix(string(lbl.Name), model.ReservedLabelPrefix) {
modelLabelSet[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value)
}
}
// Add labels
apiClient = &promclient.AddLabelClient{apiClient, modelLabelSet.Merge(s.Cfg.Labels)}
// Add MetricRelabel if set
if len(s.Cfg.MetricsRelabelConfigs) > 0 {
tmp, err := promclient.NewMetricsRelabelClient(apiClient, s.Cfg.MetricsRelabelConfigs)
if err != nil {
return err
}
apiClient = tmp
}
// Add LabelFilter if configured
if s.Cfg.LabelFilterConfig != nil {
apiClient, err = promclient.NewLabelFilterClient(ctx, apiClient, s.Cfg.LabelFilterConfig)
if err != nil {
return err
}
}
apiClients = append(apiClients, apiClient)
}
}
}
apiClientMetricFunc := func(i int, api, status string, took float64) {
serverGroupSummary.WithLabelValues(targets[i], api, status).Observe(took)
}
logrus.Debugf("Updating targets from discovery manager: %v", targets)
apiClient, err := promclient.NewMultiAPI(apiClients, s.Cfg.GetAntiAffinity(), apiClientMetricFunc, 1, s.Cfg.GetPreferMax())
if err != nil {
return err
}
newState := &ServerGroupState{
Targets: targets,
apiClient: apiClient,
ctx: ctx,
ctxCancel: ctxCancel,
}
if s.Cfg.IgnoreError {
newState.apiClient = &promclient.IgnoreErrorAPI{newState.apiClient}
}
oldState := s.State() // Fetch the current state (so we can stop it)
s.state.Store(newState) // Store new state
if oldState != nil {
oldState.ctxCancel() // Cancel the old state
}
if !s.loaded {
s.loaded = true
close(s.Ready)
}
return nil
}
// ApplyConfig applies new configuration to the ServerGroup
// TODO: move config + client into state object to be swapped with atomics
func (s *ServerGroup) ApplyConfig(cfg *Config) error {
s.Cfg = cfg
// Copy/paste from upstream prometheus/common until https://github.com/prometheus/common/issues/144 is resolved
tlsConfig, err := config_util.NewTLSConfig(&cfg.HTTPConfig.HTTPConfig.TLSConfig)
if err != nil {
return errors.Wrap(err, "error loading TLS client config")
}
// The only timeout we care about is the configured scrape timeout.
// It is applied on request. So we leave out any timings here.
var rt http.RoundTripper = &http.Transport{
Proxy: http.ProxyURL(cfg.HTTPConfig.HTTPConfig.ProxyURL.URL),
MaxIdleConns: 20000,
MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
DisableKeepAlives: false,
TLSClientConfig: tlsConfig,
// 5 minutes is typically above the maximum sane scrape interval. So we can
// use keepalive for all configurations.
IdleConnTimeout: 5 * time.Minute,
DialContext: (&net.Dialer{Timeout: cfg.HTTPConfig.DialTimeout}).DialContext,
ResponseHeaderTimeout: cfg.Timeout,
}
// If a bearer token is provided, create a round tripper that will set the
// Authorization header correctly on each request.
if len(cfg.HTTPConfig.HTTPConfig.BearerToken) > 0 {
rt = config_util.NewAuthorizationCredentialsRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerToken, rt)
} else if len(cfg.HTTPConfig.HTTPConfig.BearerTokenFile) > 0 {
rt = config_util.NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerTokenFile, rt)
}
if cfg.HTTPConfig.HTTPConfig.BasicAuth != nil {
rt = config_util.NewBasicAuthRoundTripper(cfg.HTTPConfig.HTTPConfig.BasicAuth.Username, cfg.HTTPConfig.HTTPConfig.BasicAuth.Password, cfg.HTTPConfig.HTTPConfig.BasicAuth.PasswordFile, rt)
}
s.client = &http.Client{Transport: rt}
if err := s.targetManager.ApplyConfig(map[string]discovery.Configs{"foo": cfg.ServiceDiscoveryConfigs}); err != nil {
return err
}
return nil
}
// State returns the current ServerGroupState
func (s *ServerGroup) State() *ServerGroupState {
tmp := s.state.Load()
if ret, ok := tmp.(*ServerGroupState); ok {
return ret
}
return nil
}
// GetValue loads the raw data for a given set of matchers in the time range
func (s *ServerGroup) GetValue(ctx context.Context, start, end time.Time, matchers []*labels.Matcher) (model.Value, v1.Warnings, error) {
return s.State().apiClient.GetValue(ctx, start, end, matchers)
}
// Query performs a query for the given time.
func (s *ServerGroup) Query(ctx context.Context, query string, ts time.Time) (model.Value, v1.Warnings, error) {
return s.State().apiClient.Query(ctx, query, ts)
}
// QueryRange performs a query for the given range.
func (s *ServerGroup) QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) {
return s.State().apiClient.QueryRange(ctx, query, r)
}
// LabelValues performs a query for the values of the given label.
func (s *ServerGroup) LabelValues(ctx context.Context, label string, matchers []string, startTime time.Time, endTime time.Time) (model.LabelValues, v1.Warnings, error) {
return s.State().apiClient.LabelValues(ctx, label, matchers, startTime, endTime)
}
// LabelNames returns all the unique label names present in the block in sorted order.
func (s *ServerGroup) LabelNames(ctx context.Context, matchers []string, startTime time.Time, endTime time.Time) ([]string, v1.Warnings, error) {
return s.State().apiClient.LabelNames(ctx, matchers, startTime, endTime)
}
// Series finds series by label matchers.
func (s *ServerGroup) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, v1.Warnings, error) {
return s.State().apiClient.Series(ctx, matches, startTime, endTime)
}
// Metadata returns metadata about metrics currently scraped by the metric name.
func (s *ServerGroup) Metadata(ctx context.Context, metric, limit string) (map[string][]v1.Metadata, error) | {
return s.State().apiClient.Metadata(ctx, metric, limit)
} | identifier_body | |
servergroup.go | package servergroup
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"path"
"strings"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/storage/remote"
"github.com/sirupsen/logrus"
"github.com/jacksontj/promxy/pkg/middleware"
"github.com/jacksontj/promxy/pkg/promclient"
// sd_config "github.com/prometheus/prometheus/discovery/config"
)
var (
// TODO: have a marker for "which" servergroup
serverGroupSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "server_group_request_duration_seconds",
Help: "Summary of calls to servergroup instances",
}, []string{"host", "call", "status"})
)
func init() {
prometheus.MustRegister(serverGroupSummary)
}
// New creates a new servergroup
func NewServerGroup() (*ServerGroup, error) {
ctx, ctxCancel := context.WithCancel(context.Background())
// Create the targetSet (which will maintain all of the updating etc. in the background)
sg := &ServerGroup{
ctx: ctx,
ctxCancel: ctxCancel,
Ready: make(chan struct{}),
}
logCfg := &promlog.Config{
Level: &promlog.AllowedLevel{},
Format: &promlog.AllowedFormat{},
}
if err := logCfg.Level.Set("info"); err != nil {
return nil, err
}
sg.targetManager = discovery.NewManager(ctx, promlog.New(logCfg))
// Background the updating
go sg.targetManager.Run()
go sg.Sync()
return sg, nil
}
// ServerGroupState encapsulates the state of a serverGroup from service discovery
type ServerGroupState struct {
// Targets is the list of target URLs for this discovery round
Targets []string
apiClient promclient.API
ctx context.Context
ctxCancel context.CancelFunc
}
// ServerGroup encapsulates a set of prometheus downstreams to query/aggregate
type ServerGroup struct {
ctx context.Context
ctxCancel context.CancelFunc
loaded bool
Ready chan struct{}
// TODO: lock/atomics on cfg and client
Cfg *Config
client *http.Client
targetManager *discovery.Manager
OriginalURLs []string
state atomic.Value
}
// Cancel stops backround processes (e.g. discovery manager)
func (s *ServerGroup) Cancel() {
s.ctxCancel()
}
// RoundTrip allows us to intercept and mutate downstream HTTP requests at the transport level
func (s *ServerGroup) RoundTrip(r *http.Request) (*http.Response, error) {
for k, v := range middleware.GetHeaders(r.Context()) {
r.Header.Set(k, v)
}
return s.client.Transport.RoundTrip(r)
}
// Sync updates the targets from our discovery manager
func (s *ServerGroup) Sync() {
syncCh := s.targetManager.SyncCh()
for {
select {
case <-s.ctx.Done():
return
case targetGroupMap := <-syncCh:
logrus.Debug("Updating targets from discovery manager")
// TODO: retry and error handling
err := s.loadTargetGroupMap(targetGroupMap)
for err != nil {
logrus.Errorf("Error loading servergroup, retrying: %v", err)
// TODO: configurable backoff
select {
case <-time.After(time.Second):
err = s.loadTargetGroupMap(targetGroupMap)
case <-s.ctx.Done():
return
}
}
}
}
}
func (s *ServerGroup) loadTargetGroupMap(targetGroupMap map[string][]*targetgroup.Group) (err error) {
targets := make([]string, 0)
apiClients := make([]promclient.API, 0)
ctx, ctxCancel := context.WithCancel(context.Background())
defer func() {
if err != nil {
ctxCancel()
}
}()
for _, targetGroupList := range targetGroupMap {
for _, targetGroup := range targetGroupList {
for _, target := range targetGroup.Targets {
lbls := make([]labels.Label, 0, len(target)+len(targetGroup.Labels)+2)
for ln, lv := range target {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
}
for ln, lv := range targetGroup.Labels {
if _, ok := target[ln]; !ok {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
}
}
lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: string(s.Cfg.Scheme)})
lbls = append(lbls, labels.Label{Name: PathPrefixLabel, Value: string(s.Cfg.PathPrefix)})
lset := labels.New(lbls...)
logrus.Tracef("Potential target pre-relabel: %v", lset)
lset = relabel.Process(lset, s.Cfg.RelabelConfigs...)
logrus.Tracef("Potential target post-relabel: %v", lset)
// Check if the target was dropped, if so we skip it
if len(lset) == 0 {
continue
}
// If there is no address, then we can't use this set of targets
if v := lset.Get(model.AddressLabel); v == "" {
return fmt.Errorf("discovery target is missing address label: %v", lset)
}
u := &url.URL{
Scheme: lset.Get(model.SchemeLabel),
Host: lset.Get(model.AddressLabel),
Path: lset.Get(PathPrefixLabel),
}
targets = append(targets, u.Host)
client, err := api.NewClient(api.Config{Address: u.String(), RoundTripper: s})
if err != nil {
return err
}
if len(s.Cfg.QueryParams) > 0 {
client = promclient.NewClientArgsWrap(client, s.Cfg.QueryParams)
}
var apiClient promclient.API
apiClient = &promclient.PromAPIV1{v1.NewAPI(client)}
// If debug logging is enabled, wrap the client with a debugAPI client
// Since these are called in the reverse order of what we add, we want
// to make sure that this is the first wrap of the client
if logrus.GetLevel() >= logrus.DebugLevel {
apiClient = &promclient.DebugAPI{apiClient, u.String()}
}
if s.Cfg.RemoteRead {
u.Path = path.Join(u.Path, s.Cfg.RemoteReadPath)
cfg := &remote.ClientConfig{
URL: &config_util.URL{u},
HTTPClientConfig: s.Cfg.HTTPConfig.HTTPConfig,
Timeout: model.Duration(time.Minute * 2),
}
remoteStorageClient, err := remote.NewReadClient("foo", cfg)
if err != nil {
return err
}
apiClient = &promclient.PromAPIRemoteRead{apiClient, remoteStorageClient}
}
// Optionally add time range layers
if s.Cfg.AbsoluteTimeRangeConfig != nil {
apiClient = &promclient.AbsoluteTimeFilter{
API: apiClient,
Start: s.Cfg.AbsoluteTimeRangeConfig.Start,
End: s.Cfg.AbsoluteTimeRangeConfig.End,
Truncate: s.Cfg.AbsoluteTimeRangeConfig.Truncate,
}
}
if s.Cfg.RelativeTimeRangeConfig != nil {
apiClient = &promclient.RelativeTimeFilter{
API: apiClient,
Start: s.Cfg.RelativeTimeRangeConfig.Start,
End: s.Cfg.RelativeTimeRangeConfig.End,
Truncate: s.Cfg.RelativeTimeRangeConfig.Truncate,
}
}
// We remove all private labels after we set the target entry
modelLabelSet := make(model.LabelSet, len(lset))
for _, lbl := range lset {
if !strings.HasPrefix(string(lbl.Name), model.ReservedLabelPrefix) {
modelLabelSet[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value)
}
}
// Add labels
apiClient = &promclient.AddLabelClient{apiClient, modelLabelSet.Merge(s.Cfg.Labels)}
// Add MetricRelabel if set
if len(s.Cfg.MetricsRelabelConfigs) > 0 {
tmp, err := promclient.NewMetricsRelabelClient(apiClient, s.Cfg.MetricsRelabelConfigs)
if err != nil {
return err
}
apiClient = tmp
}
// Add LabelFilter if configured
if s.Cfg.LabelFilterConfig != nil {
apiClient, err = promclient.NewLabelFilterClient(ctx, apiClient, s.Cfg.LabelFilterConfig)
if err != nil {
return err
}
}
apiClients = append(apiClients, apiClient)
}
}
}
apiClientMetricFunc := func(i int, api, status string, took float64) {
serverGroupSummary.WithLabelValues(targets[i], api, status).Observe(took)
}
logrus.Debugf("Updating targets from discovery manager: %v", targets)
apiClient, err := promclient.NewMultiAPI(apiClients, s.Cfg.GetAntiAffinity(), apiClientMetricFunc, 1, s.Cfg.GetPreferMax())
if err != nil {
return err
}
newState := &ServerGroupState{
Targets: targets,
apiClient: apiClient,
ctx: ctx,
ctxCancel: ctxCancel,
}
if s.Cfg.IgnoreError {
newState.apiClient = &promclient.IgnoreErrorAPI{newState.apiClient}
}
oldState := s.State() // Fetch the current state (so we can stop it)
s.state.Store(newState) // Store new state
if oldState != nil {
oldState.ctxCancel() // Cancel the old state
}
if !s.loaded {
s.loaded = true
close(s.Ready)
}
return nil
}
// ApplyConfig applies new configuration to the ServerGroup
// TODO: move config + client into state object to be swapped with atomics
func (s *ServerGroup) ApplyConfig(cfg *Config) error {
s.Cfg = cfg
// Copy/paste from upstream prometheus/common until https://github.com/prometheus/common/issues/144 is resolved
tlsConfig, err := config_util.NewTLSConfig(&cfg.HTTPConfig.HTTPConfig.TLSConfig)
if err != nil {
return errors.Wrap(err, "error loading TLS client config")
}
// The only timeout we care about is the configured scrape timeout.
// It is applied on request. So we leave out any timings here.
var rt http.RoundTripper = &http.Transport{
Proxy: http.ProxyURL(cfg.HTTPConfig.HTTPConfig.ProxyURL.URL),
MaxIdleConns: 20000,
MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
DisableKeepAlives: false,
TLSClientConfig: tlsConfig,
// 5 minutes is typically above the maximum sane scrape interval. So we can
// use keepalive for all configurations.
IdleConnTimeout: 5 * time.Minute,
DialContext: (&net.Dialer{Timeout: cfg.HTTPConfig.DialTimeout}).DialContext,
ResponseHeaderTimeout: cfg.Timeout,
}
// If a bearer token is provided, create a round tripper that will set the
// Authorization header correctly on each request.
if len(cfg.HTTPConfig.HTTPConfig.BearerToken) > 0 {
rt = config_util.NewAuthorizationCredentialsRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerToken, rt)
} else if len(cfg.HTTPConfig.HTTPConfig.BearerTokenFile) > 0 {
rt = config_util.NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerTokenFile, rt)
}
if cfg.HTTPConfig.HTTPConfig.BasicAuth != nil {
rt = config_util.NewBasicAuthRoundTripper(cfg.HTTPConfig.HTTPConfig.BasicAuth.Username, cfg.HTTPConfig.HTTPConfig.BasicAuth.Password, cfg.HTTPConfig.HTTPConfig.BasicAuth.PasswordFile, rt)
}
s.client = &http.Client{Transport: rt}
if err := s.targetManager.ApplyConfig(map[string]discovery.Configs{"foo": cfg.ServiceDiscoveryConfigs}); err != nil {
return err
}
return nil
}
// State returns the current ServerGroupState
func (s *ServerGroup) State() *ServerGroupState {
tmp := s.state.Load()
if ret, ok := tmp.(*ServerGroupState); ok {
return ret
}
return nil
}
// GetValue loads the raw data for a given set of matchers in the time range
func (s *ServerGroup) GetValue(ctx context.Context, start, end time.Time, matchers []*labels.Matcher) (model.Value, v1.Warnings, error) {
return s.State().apiClient.GetValue(ctx, start, end, matchers)
}
// Query performs a query for the given time.
func (s *ServerGroup) Query(ctx context.Context, query string, ts time.Time) (model.Value, v1.Warnings, error) {
return s.State().apiClient.Query(ctx, query, ts)
}
// QueryRange performs a query for the given range.
func (s *ServerGroup) QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) {
return s.State().apiClient.QueryRange(ctx, query, r)
}
// LabelValues performs a query for the values of the given label.
func (s *ServerGroup) LabelValues(ctx context.Context, label string, matchers []string, startTime time.Time, endTime time.Time) (model.LabelValues, v1.Warnings, error) {
return s.State().apiClient.LabelValues(ctx, label, matchers, startTime, endTime)
}
// LabelNames returns all the unique label names present in the block in sorted order.
func (s *ServerGroup) | (ctx context.Context, matchers []string, startTime time.Time, endTime time.Time) ([]string, v1.Warnings, error) {
return s.State().apiClient.LabelNames(ctx, matchers, startTime, endTime)
}
// Series finds series by label matchers.
func (s *ServerGroup) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, v1.Warnings, error) {
return s.State().apiClient.Series(ctx, matches, startTime, endTime)
}
// Metadata returns metadata about metrics currently scraped by the metric name.
func (s *ServerGroup) Metadata(ctx context.Context, metric, limit string) (map[string][]v1.Metadata, error) {
return s.State().apiClient.Metadata(ctx, metric, limit)
}
| LabelNames | identifier_name |
servergroup.go | package servergroup
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"path"
"strings"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/storage/remote"
"github.com/sirupsen/logrus"
"github.com/jacksontj/promxy/pkg/middleware"
"github.com/jacksontj/promxy/pkg/promclient"
// sd_config "github.com/prometheus/prometheus/discovery/config"
)
var (
// TODO: have a marker for "which" servergroup
serverGroupSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "server_group_request_duration_seconds",
Help: "Summary of calls to servergroup instances",
}, []string{"host", "call", "status"})
)
func init() {
prometheus.MustRegister(serverGroupSummary)
}
// New creates a new servergroup
func NewServerGroup() (*ServerGroup, error) {
ctx, ctxCancel := context.WithCancel(context.Background())
// Create the targetSet (which will maintain all of the updating etc. in the background)
sg := &ServerGroup{
ctx: ctx,
ctxCancel: ctxCancel,
Ready: make(chan struct{}),
}
logCfg := &promlog.Config{
Level: &promlog.AllowedLevel{},
Format: &promlog.AllowedFormat{},
}
if err := logCfg.Level.Set("info"); err != nil {
return nil, err
}
sg.targetManager = discovery.NewManager(ctx, promlog.New(logCfg))
// Background the updating
go sg.targetManager.Run()
go sg.Sync()
return sg, nil
}
// ServerGroupState encapsulates the state of a serverGroup from service discovery
type ServerGroupState struct {
// Targets is the list of target URLs for this discovery round
Targets []string
apiClient promclient.API
ctx context.Context
ctxCancel context.CancelFunc
}
// ServerGroup encapsulates a set of prometheus downstreams to query/aggregate
type ServerGroup struct {
ctx context.Context
ctxCancel context.CancelFunc
loaded bool
Ready chan struct{}
// TODO: lock/atomics on cfg and client
Cfg *Config
client *http.Client
targetManager *discovery.Manager
OriginalURLs []string
state atomic.Value
}
// Cancel stops backround processes (e.g. discovery manager)
func (s *ServerGroup) Cancel() {
s.ctxCancel()
}
// RoundTrip allows us to intercept and mutate downstream HTTP requests at the transport level
func (s *ServerGroup) RoundTrip(r *http.Request) (*http.Response, error) {
for k, v := range middleware.GetHeaders(r.Context()) {
r.Header.Set(k, v)
}
return s.client.Transport.RoundTrip(r)
}
// Sync updates the targets from our discovery manager
func (s *ServerGroup) Sync() {
syncCh := s.targetManager.SyncCh()
for {
select {
case <-s.ctx.Done():
return
case targetGroupMap := <-syncCh:
logrus.Debug("Updating targets from discovery manager")
// TODO: retry and error handling
err := s.loadTargetGroupMap(targetGroupMap)
for err != nil {
logrus.Errorf("Error loading servergroup, retrying: %v", err)
// TODO: configurable backoff
select {
case <-time.After(time.Second):
err = s.loadTargetGroupMap(targetGroupMap)
case <-s.ctx.Done():
return
}
}
}
}
}
func (s *ServerGroup) loadTargetGroupMap(targetGroupMap map[string][]*targetgroup.Group) (err error) {
targets := make([]string, 0)
apiClients := make([]promclient.API, 0)
ctx, ctxCancel := context.WithCancel(context.Background())
defer func() {
if err != nil {
ctxCancel()
}
}()
for _, targetGroupList := range targetGroupMap {
for _, targetGroup := range targetGroupList {
for _, target := range targetGroup.Targets {
lbls := make([]labels.Label, 0, len(target)+len(targetGroup.Labels)+2)
for ln, lv := range target {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
}
for ln, lv := range targetGroup.Labels {
if _, ok := target[ln]; !ok {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
}
}
lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: string(s.Cfg.Scheme)})
lbls = append(lbls, labels.Label{Name: PathPrefixLabel, Value: string(s.Cfg.PathPrefix)})
lset := labels.New(lbls...)
logrus.Tracef("Potential target pre-relabel: %v", lset)
lset = relabel.Process(lset, s.Cfg.RelabelConfigs...)
logrus.Tracef("Potential target post-relabel: %v", lset)
// Check if the target was dropped, if so we skip it
if len(lset) == 0 {
continue
}
// If there is no address, then we can't use this set of targets
if v := lset.Get(model.AddressLabel); v == "" {
return fmt.Errorf("discovery target is missing address label: %v", lset)
}
u := &url.URL{
Scheme: lset.Get(model.SchemeLabel),
Host: lset.Get(model.AddressLabel),
Path: lset.Get(PathPrefixLabel),
}
targets = append(targets, u.Host)
client, err := api.NewClient(api.Config{Address: u.String(), RoundTripper: s})
if err != nil {
return err
}
if len(s.Cfg.QueryParams) > 0 {
client = promclient.NewClientArgsWrap(client, s.Cfg.QueryParams)
}
var apiClient promclient.API
apiClient = &promclient.PromAPIV1{v1.NewAPI(client)}
// If debug logging is enabled, wrap the client with a debugAPI client
// Since these are called in the reverse order of what we add, we want
// to make sure that this is the first wrap of the client
if logrus.GetLevel() >= logrus.DebugLevel {
apiClient = &promclient.DebugAPI{apiClient, u.String()}
}
if s.Cfg.RemoteRead {
u.Path = path.Join(u.Path, s.Cfg.RemoteReadPath)
cfg := &remote.ClientConfig{
URL: &config_util.URL{u},
HTTPClientConfig: s.Cfg.HTTPConfig.HTTPConfig,
Timeout: model.Duration(time.Minute * 2),
}
remoteStorageClient, err := remote.NewReadClient("foo", cfg)
if err != nil {
return err
}
apiClient = &promclient.PromAPIRemoteRead{apiClient, remoteStorageClient}
}
// Optionally add time range layers
if s.Cfg.AbsoluteTimeRangeConfig != nil {
apiClient = &promclient.AbsoluteTimeFilter{ | End: s.Cfg.AbsoluteTimeRangeConfig.End,
Truncate: s.Cfg.AbsoluteTimeRangeConfig.Truncate,
}
}
if s.Cfg.RelativeTimeRangeConfig != nil {
apiClient = &promclient.RelativeTimeFilter{
API: apiClient,
Start: s.Cfg.RelativeTimeRangeConfig.Start,
End: s.Cfg.RelativeTimeRangeConfig.End,
Truncate: s.Cfg.RelativeTimeRangeConfig.Truncate,
}
}
// We remove all private labels after we set the target entry
modelLabelSet := make(model.LabelSet, len(lset))
for _, lbl := range lset {
if !strings.HasPrefix(string(lbl.Name), model.ReservedLabelPrefix) {
modelLabelSet[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value)
}
}
// Add labels
apiClient = &promclient.AddLabelClient{apiClient, modelLabelSet.Merge(s.Cfg.Labels)}
// Add MetricRelabel if set
if len(s.Cfg.MetricsRelabelConfigs) > 0 {
tmp, err := promclient.NewMetricsRelabelClient(apiClient, s.Cfg.MetricsRelabelConfigs)
if err != nil {
return err
}
apiClient = tmp
}
// Add LabelFilter if configured
if s.Cfg.LabelFilterConfig != nil {
apiClient, err = promclient.NewLabelFilterClient(ctx, apiClient, s.Cfg.LabelFilterConfig)
if err != nil {
return err
}
}
apiClients = append(apiClients, apiClient)
}
}
}
apiClientMetricFunc := func(i int, api, status string, took float64) {
serverGroupSummary.WithLabelValues(targets[i], api, status).Observe(took)
}
logrus.Debugf("Updating targets from discovery manager: %v", targets)
apiClient, err := promclient.NewMultiAPI(apiClients, s.Cfg.GetAntiAffinity(), apiClientMetricFunc, 1, s.Cfg.GetPreferMax())
if err != nil {
return err
}
newState := &ServerGroupState{
Targets: targets,
apiClient: apiClient,
ctx: ctx,
ctxCancel: ctxCancel,
}
if s.Cfg.IgnoreError {
newState.apiClient = &promclient.IgnoreErrorAPI{newState.apiClient}
}
oldState := s.State() // Fetch the current state (so we can stop it)
s.state.Store(newState) // Store new state
if oldState != nil {
oldState.ctxCancel() // Cancel the old state
}
if !s.loaded {
s.loaded = true
close(s.Ready)
}
return nil
}
// ApplyConfig applies new configuration to the ServerGroup
// TODO: move config + client into state object to be swapped with atomics
func (s *ServerGroup) ApplyConfig(cfg *Config) error {
s.Cfg = cfg
// Copy/paste from upstream prometheus/common until https://github.com/prometheus/common/issues/144 is resolved
tlsConfig, err := config_util.NewTLSConfig(&cfg.HTTPConfig.HTTPConfig.TLSConfig)
if err != nil {
return errors.Wrap(err, "error loading TLS client config")
}
// The only timeout we care about is the configured scrape timeout.
// It is applied on request. So we leave out any timings here.
var rt http.RoundTripper = &http.Transport{
Proxy: http.ProxyURL(cfg.HTTPConfig.HTTPConfig.ProxyURL.URL),
MaxIdleConns: 20000,
MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
DisableKeepAlives: false,
TLSClientConfig: tlsConfig,
// 5 minutes is typically above the maximum sane scrape interval. So we can
// use keepalive for all configurations.
IdleConnTimeout: 5 * time.Minute,
DialContext: (&net.Dialer{Timeout: cfg.HTTPConfig.DialTimeout}).DialContext,
ResponseHeaderTimeout: cfg.Timeout,
}
// If a bearer token is provided, create a round tripper that will set the
// Authorization header correctly on each request.
if len(cfg.HTTPConfig.HTTPConfig.BearerToken) > 0 {
rt = config_util.NewAuthorizationCredentialsRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerToken, rt)
} else if len(cfg.HTTPConfig.HTTPConfig.BearerTokenFile) > 0 {
rt = config_util.NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.HTTPConfig.HTTPConfig.BearerTokenFile, rt)
}
if cfg.HTTPConfig.HTTPConfig.BasicAuth != nil {
rt = config_util.NewBasicAuthRoundTripper(cfg.HTTPConfig.HTTPConfig.BasicAuth.Username, cfg.HTTPConfig.HTTPConfig.BasicAuth.Password, cfg.HTTPConfig.HTTPConfig.BasicAuth.PasswordFile, rt)
}
s.client = &http.Client{Transport: rt}
if err := s.targetManager.ApplyConfig(map[string]discovery.Configs{"foo": cfg.ServiceDiscoveryConfigs}); err != nil {
return err
}
return nil
}
// State returns the current ServerGroupState
func (s *ServerGroup) State() *ServerGroupState {
tmp := s.state.Load()
if ret, ok := tmp.(*ServerGroupState); ok {
return ret
}
return nil
}
// GetValue loads the raw data for a given set of matchers in the time range
func (s *ServerGroup) GetValue(ctx context.Context, start, end time.Time, matchers []*labels.Matcher) (model.Value, v1.Warnings, error) {
return s.State().apiClient.GetValue(ctx, start, end, matchers)
}
// Query performs a query for the given time.
func (s *ServerGroup) Query(ctx context.Context, query string, ts time.Time) (model.Value, v1.Warnings, error) {
return s.State().apiClient.Query(ctx, query, ts)
}
// QueryRange performs a query for the given range.
func (s *ServerGroup) QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) {
return s.State().apiClient.QueryRange(ctx, query, r)
}
// LabelValues performs a query for the values of the given label.
func (s *ServerGroup) LabelValues(ctx context.Context, label string, matchers []string, startTime time.Time, endTime time.Time) (model.LabelValues, v1.Warnings, error) {
return s.State().apiClient.LabelValues(ctx, label, matchers, startTime, endTime)
}
// LabelNames returns all the unique label names present in the block in sorted order.
func (s *ServerGroup) LabelNames(ctx context.Context, matchers []string, startTime time.Time, endTime time.Time) ([]string, v1.Warnings, error) {
return s.State().apiClient.LabelNames(ctx, matchers, startTime, endTime)
}
// Series finds series by label matchers.
func (s *ServerGroup) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, v1.Warnings, error) {
return s.State().apiClient.Series(ctx, matches, startTime, endTime)
}
// Metadata returns metadata about metrics currently scraped by the metric name.
func (s *ServerGroup) Metadata(ctx context.Context, metric, limit string) (map[string][]v1.Metadata, error) {
return s.State().apiClient.Metadata(ctx, metric, limit)
} | API: apiClient,
Start: s.Cfg.AbsoluteTimeRangeConfig.Start, | random_line_split |
server.js | /*
* name: server
* version: 0.5.0
* update: bug fix/ add openSelecter
* date: 2017-04-28
*/
define(function(require, exports, module) {
"use strict";
var $ = app.util;
var etpl = require('etpl');
//资源路径处理
var _source = function(source, host) {
if (!$.trim(source)) {
return "";
}
host = host && host.split ? host : appcfg.host.source;
if (/^([\w-]+:)?\/\/([^\/]+)/.test(source)) {
//source = host + source.replace(/^([\w-]+:)?\/\/([^\/]+)/,'');
} else {
source = host + source;
}
return source.replace(/\\/g, '/');
};
//时间格式处理
var _getDate = function(source, ignore_minute, logfunction) {
var myDate;
var separate = '-';
var minute = '';
if (source === void(0)) {
source = new Date();
}
logfunction && logfunction(source);
if (source.split) {
source = source.replace(/\-/g, '/');
} else if (isNaN(parseInt(source))) {
source = source.toString().replace(/\-/g, '/');
} else {
source = new Date(source);
}
logfunction && logfunction(source);
if (new Date(source) && (new Date(source)).getDate) {
myDate = new Date(source);
logfunction && logfunction(myDate);
if (!ignore_minute) {
minute = (myDate.getHours() < 10 ? " 0" : " ") + myDate.getHours() + ":" + (myDate.getMinutes() < 10 ? "0" : "") + myDate.getMinutes();
}
return myDate.getFullYear() + separate + (myDate.getMonth() + 1) + separate + (myDate.getDate() < 10 ? '0' : '') + myDate.getDate() + minute;
} else {
return source.slice(0, 16);
}
};
//日期格式化
etpl.addFilter('date', function(source, ignore_minute) {
return _getDate(source, ignore_minute);
});
//图片域名处理
etpl.addFilter('source', function(source, host) {
return _source(source, host);
});
//货币小数点
etpl.addFilter('decimal', function(source, index) {
var num = parseFloat(source),
i = index ? index : 1;
if (isNaN(num)) {
return source;
}
return num.toFixed(i);
});
//退出登录
var _logout = function() {
app.storage.remove('user');
//注销推送
var ajpush = api.require('ajpush');
ajpush.bindAliasAndTags({
alias: '',
tags: []
}, function(ret) {
if (ret.statusCode) {
console.log('推送已注销');
}
});
app.openView({
closeback: true
}, 'member', 'login');
};
//存储用户信息
var _initUser = function(userData) {
if (!userData) {
return app.toast('初始化用户信息失败');
}
if (userData.photo && ($.trim(userData.photo) === '')) {
userData.photo = '';
} else {
userData.photo = _source(userData.photo);
}
// if ($.trim(userData.nowScore) === '') {
// userData.nowScore = 0;
// }
if ($.trim(userData.realName) === '') {
userData.realName = '';
}
app.storage.val('user', userData);
//app初始化
app.storage.val('appInit', 1);
//注册推送
if (userData.tag) {
var ajpush = api.require('ajpush');
ajpush.bindAliasAndTags({
alias: "user_" + userData.id,
tags: userData.tag.split(',')
}, function(ret) {
if (ret.statusCode) {
console.log("user_" + userData.id + "成功注册推送");
}
});
}
};
//推送开关
var _push = {
open: function(cb) {
var ajpush = api.require('ajpush');
if (ajpush) {
ajpush.resumePush(function(ret) {
if (typeof cb === 'function') {
cb(ret && ret.status);
}
});
} else {
console.log('ajpush插件未就绪');
}
},
close: function(cb) {
var ajpush = api.require('ajpush');
if (ajpush) {
ajpush.stopPush(function(ret) {
if (typeof cb === 'function') {
cb(ret && ret.status);
}
});
} else {
console.log('ajpush插件未就绪');
}
}
};
//获取用户信息
var _getUser = function(hold) {
//测试数据
return {
id: "0001",
headImg: seajs.root + '/res/img/avat.jpg',
nickName: '珊珊',
realName: '伐木累',
nowScore: 99,
mobile: '15067589521'
};
var _user = app.storage.val('user');
if (!$.isPlainObject(_user)) {
app.ready(function() {
app.alert('请先登录!', function() {
app.openView(null, 'member', 'login');
}, {
bgclose: false
});
});
return {};
}
return _user;
};
//坐标反查
var _getAddrByLoc = function(lat, lng, config) {
var def = {
callback: null,
silent: false
};
var opt = $.extend(def, config || {});
var map = api.require('bMap');
var getTimeout = setTimeout(function() {
app.loading.hide();
app.toast('检索超时,请重试', 2000);
}, appcfg.set.longtime);
if (!lat || !lng) {
return app.toast('坐标反查参数错误');
}
if (!opt.silent) {
app.loading.show('正在检索地址...');
}
map.getNameFromCoords({
lon: lng,
lat: lat
}, function(ret, err) {
app.loading.hide();
clearTimeout(getTimeout);
if (err) {
var baiduerrmap = ['', '检索词有岐义', '检索地址有岐义', '没有找到检索结果', 'key错误', '网络连接错误', '网络连接超时', '还未完成鉴权,请在鉴权通过后重试'];
return console.log('百度坐标反查:' + baiduerrmap[err.code]);
}
if (ret.status) {
opt.callback(ret);
} else {
app.toast('百度地图API错误', 2000);
}
});
};
//回传用户注册地
var _uploadifyLocation = function() {
var hasLocat;
var userData = _getUser();
var updateUser = function(location) {
app.ajax({
url: appcfg.api.uploadifyLocation,
data: {
"member_id": userData.id,
"province": location.province,
"city": location.city,
"area": location.district
},
success: function(res) {
if (res.status === 'Y') {
} else {
console.log('回传用户地理位置返回异常:' + res.msg);
}
},
error: function(o) {
console.log('回传用户地理位置发生错误');
}
});
};
_getLocation(function(lat, lng) {
_getAddrByLoc(lat, lng, {
silent: true,
callback: function(res) {
var location = {};
location.lng = lng;
location.lat = lat;
location.province = res.province;
location.city = res.city;
location.district = res.district;
location.streetName = res.streetName;
location.streetNumber = res.streetNumber;
updateUser(location);
}
});
});
};
//收集信息
var _collection = function() {
var oldInfo = app.storage.val('DeviceInfo') || {},
newInfo = {},
send = function(extraParam) {
var userData = app.storage.val('user'),
hasChange;
extraParam.saveDate = _getDate(false, true);
//日期过滤
if (oldInfo.saveDate && oldInfo.saveDate >= extraParam.saveDate) {
return null;
}
//信息改变过滤
$.each(extraParam, function(i, e) {
if (e !== oldInfo[i]) {
hasChange = true;
return null;
}
});
if (hasChange && $.isPlainObject(userData)) {
app.storage.val('DeviceInfo', extraParam);
var data = $.extend({
member_id: userData.id
}, extraParam);
app.ajax({
url: appcfg.api.loginLog,
data: data,
success: function(res) {
},
error: function() {
console.log('回传设备信息时发生错误');
}
});
}
};
newInfo.app_version = appcfg.set.version;
newInfo.os = api.systemType;
newInfo.connect_status = api.connectionType;
newInfo.mobile_operator_name = api.operator;
newInfo.model = api.deviceModel;
_getLocation(function(lat, lng) {
newInfo.latitude = lat;
newInfo.longitude = lng;
send(newInfo);
}, function() {
send(newInfo);
});
};
//数据预取
var _preGet = function(cb) {
var got = 0,
preGetList = _preGet.prototype.preGetList,
getOne = function() {
got++;
if (got >= preGetList.length && typeof(cb) === 'function') {
cb();
got = null;
getOne = null;
preGetList = null;
}
};
//开始加载
$.each(preGetList, function(i, e) {
app.ajax({
url: e.url,
data: e.data,
success: function(res) {
getOne();
if (res.status === 'Y') {
var data = res.data;
if (data.split) {
data = JSON.parse(data);
}
app.storage.val(e.key, data);
}
},
error: function() {}
});
});
};
_preGet.prototype.preGetList = [];
//预取配置信息
_preGet.prototype.preGetList.push({
key: 'websiteConfig',
url: appcfg.api.websiteConfig,
data: {}
});
//预取数据
var _checkPreget = function() {
var preGetList = _preGet.prototype.preGetList,
isDone = true;
$.each(preGetList, function(i, e) {
if (!app.storage.val(e.key)) {
_preGet();
isDone = false;
return false;
}
});
return isDone;
};
//检查升级
var _checkUpdate = function(platform, silence) {
var mam = api.require('mam');
mam.checkUpdate(function(ret, err) {
if (ret) {
var result = ret.result;
if (result.update === true && result.closed === false) {
app.confirm(ret.updateTip, function() {
if (platform == 'ios') {
api.installApp({
appUri: result.source
});
} else if (platform == 'android') {
api.download({
url: result.source,
report: true
}, function(ret, err) {
if (ret && 0 === ret.state) { /* 下载进度 */
app.toast("正在下载:" + ret.percent + "%", 1000);
}
if (ret && 1 === ret.state) { /* 下载完成 */
var savePath = ret.savePath;
api.installApp({
appUri: savePath
});
}
});
}
}, null, {
bar: true,
title: '升级到 V' + result.version
});
} else if (!silence) {
app.alert("暂无更新");
}
} else if (!silence) {
app.alert(err.msg);
}
});
};
//获取地理位置
var _getLocation = function(callback, errcb) {
var bMap = api.require('bMap');
var chaoshi = setTimeout(function() {
app.loading.hide();
bMap.stopLocation();
if (app.storage.val('gps')) { | } else {
if (typeof(errcb) === 'function') {
errcb();
} else {
app.toast('GPS定位超时!', 1000);
}
}
}, appcfg.set.outime);
bMap.getLocation({
accuracy: '10m',
autoStop: true,
filter: 1
}, function(ret, err) {
app.loading.hide();
if (ret && ret.status) {
chaoshi = clearTimeout(chaoshi);
if(ret.lat && ret.lon){
app.storage.val('gps', {
lat: ret.lat,
lng: ret.lon
});
}else{
console.log('bMap.getLocation定位异常');
}
bMap.stopLocation();
if (typeof(callback) === 'function') {
callback(ret.lat, ret.lon);
}
} else {
if (typeof(errcb) === 'function') {
errcb();
} else {
app.toast('GPS定位失败:' + JSON.stringify(err) );
}
}
});
};
//指定DOM打开地图
var _openBaiduMap = function(dom, data, refresh) {
if (!$.isPlainObject(data) || !data.longitude || !data.latitude) {
return app.toast('参数缺失,无法打开地图');
}
var bdMapParam = {
lat: data.latitude,
lng: data.longitude
};
app.storage.val('bdMapData', bdMapParam);
if (refresh) {
app.window.evaluate('', 'bdMapView', 'refresh()');
} else {
setTimeout(function() {
var offset = $("#" + dom)[0].getBoundingClientRect();
app.window.popoverElement({
id: dom,
name: 'bdMapView',
url: seajs.root + '/view/common/baiduMap/temp.html',
top: parseInt(window.selfTop) + offset.top,
bounce: false
});
}, 0);
}
};
//公用模板
var _commonTemp = function(tempName, data) {
var templateCache = app.storage.val('templateCache') || {};
if (!data) {
data = {};
}
if(templateCache[tempName]){
return templateCache[tempName];
}
var etplEngine = new etpl.Engine();
var template = api.readFile({
sync: true,
path: seajs.root + '/res/temp/template.html'
});
etplEngine.compile(template);
var Render = etplEngine.getRenderer(tempName);
if(Render){
var html = Render(data);
templateCache[tempName] = html;
app.storage.val('templateCache', templateCache);
return html;
} else {
console.log('找不到指定模板:' + tempName);
}
};
var cacheImg = function(element, callback) {
var placeholderPic = seajs.root + '/res/img/placeholder.jpg';
var remoteEle;
if ($(element)[0].getAttribute('data-remote')) {
remoteEle = $(element);
} else {
remoteEle = $(element)[0].querySelectorAll('[data-remote]');
}
app.ready(function() {
var cacheCount = 0;
$.each(remoteEle, function(i, ele) {
var remote = ele.getAttribute('data-remote') || placeholderPic;
api.imageCache({
url: remote,
policy: "cache_else_network"
}, function(ret, err) {
var url = ret.url;
if (ele.tagName.toLowerCase() === 'img') {
ele.setAttribute('src', url);
} else {
ele.style.backgroundImage = "url(" + url + ")";
}
ele.removeAttribute('data-remote');
cacheCount++;
if(cacheCount===remoteEle.length){
typeof callback === 'function' && callback();
}
});
});
});
return remoteEle;
};
module.exports = {
logout: _logout,
initUser: _initUser,
getUser: _getUser,
push: _push,
preGet: _preGet,
checkPreget: _checkPreget,
source: _source,
getDate: _getDate,
checkUpdate: _checkUpdate,
uploadifyLocation: _uploadifyLocation,
openBaiduMap: _openBaiduMap,
commonTemp: _commonTemp,
getAddrByLoc: _getAddrByLoc,
getLocation: _getLocation,
collection: _collection,
cacheImg: cacheImg
};
}); | var gpsCache = app.storage.val('gps');
if (typeof(callback) === 'function') {
callback(gpsCache.lat, gpsCache.lng);
}
console.log('定位超时,使用缓存数据'); | random_line_split |
server.js | /*
* name: server
* version: 0.5.0
* update: bug fix/ add openSelecter
* date: 2017-04-28
*/
define(function(require, exports, module) {
"use strict";
var $ = app.util;
var etpl = require('etpl');
//资源路径处理
var _source = function(source, host) {
if (!$.trim(source)) {
return "";
}
host = host && host.split ? host : appcfg.host.source;
if (/^([\w-]+:)?\/\/([^\/]+)/.test(source)) {
//source = host + source.replace(/^([\w-]+:)?\/\/([^\/]+)/,'');
} else {
source = host + source;
}
return source.replace(/\\/g, '/');
};
//时间格式处理
var _getDate = function(source, ignore_minute, logfunction) {
var myDate;
var separate = '-';
var minute = '';
if (source === void(0)) {
source = new Date();
}
logfunction && logfunction(source);
if (source.split) {
source = source.replace(/\-/g, '/');
} else if (isNaN(parseInt(source))) {
source = source.toString().replace(/\-/g, '/');
} else {
source = new Date(source);
}
logfunction && logfunction(source);
if (new Date(source) && (new Date(source)).getDate) {
myDate = new Date(source);
logfunction && logfunction(myDate);
if (!ignore_minute) {
minute = (myDate.getHours() < 10 ? " 0" : " ") + myDate.getHours() + ":" + (myDate.getMinutes() < 10 ? "0" : "") + myDate.getMinutes();
}
return myDate.getFullYear() + separate + (myDate.getMonth() + 1) + separate + (myDate.getDate() < 10 ? '0' : '') + myDate.getDate() + minute;
} else {
return source.slice(0, 16);
}
};
//日期格式化
etpl.addFilter('date', function(source, ignore_minute) {
return _getDate(source, ignore_minute);
});
//图片域名处理
etpl.addFilter('source', function(source, host) {
return _source(source, host);
});
//货币小数点
etpl.addFilter('decimal', function(source, index) {
var num = parseFloat(source),
i = index ? index : 1;
if (isNaN(num)) {
return source;
}
return num.toFixed(i);
});
//退出登录
var _logout = function() {
app.storage.remove('user');
//注销推送
var ajpush = api.require('ajpush');
ajpush.bindAliasAndTags({
alias: '',
tags: []
}, function(ret) {
if (ret.statusCode) {
console.log('推送已注销');
}
});
app.openView({
closeback: true
}, 'member', 'login');
};
//存储用户信息
var _initUser = function(userData) {
if (!userData) {
return app.toast('初始化用户信息失败');
}
if (userData.photo && ($.trim(userData.photo) === '')) {
userData.photo = '';
} else {
userData.photo = _source(userData.photo);
}
// if ($.trim(userData.nowScore) === '') {
// userData.nowScore = 0;
// }
if ($.trim(userData.realName) === '') {
userData.realName = '';
}
app.storage.val('user', userData);
//app初始化
app.storage.val('appInit', 1);
//注册推送
if (userData.tag) {
var ajpush = api.require('ajpush');
ajpush.bindAliasAndTags({
alias: "user_" + userData.id,
tags: userData.tag.split(',')
}, function(ret) {
if (ret.statusCode) {
console.log("user_" + userData.id + "成功注册推送");
}
});
}
};
//推送开关
var _push = {
open: function(cb) {
var ajpush = api.require('ajpush');
if (ajpush) {
ajpush.resumePush(function(ret) {
if (typeof cb === 'function') {
cb(ret && ret.status);
}
});
} else {
console.log('ajpush插件未就绪');
}
},
close: function(cb) {
var ajpush = api.require('ajpush');
if (ajpush) {
ajpush.stopPush(function(ret) {
if (typeof cb === 'function') {
cb(ret && ret.status);
}
});
} else {
console.log('ajpush插件未就绪');
}
}
};
//获取用户信息
var _getUser = function(hold) {
//测试数据
return {
id: "0001",
headImg: seajs.root + '/res/img/avat.jpg',
nickName: '珊珊',
realName: '伐木累',
nowScore: 99,
mobile: '15067589521'
};
var _user = app.storage.val('user');
if (!$.isPlainObject(_user)) {
app.ready(function() {
app.alert('请先登录!', function() {
app.openView(null, 'member', 'login');
}, {
bgclose: false
});
});
return {};
}
return _user;
};
//坐标反查
var _getAddrByLoc = function(lat, lng, config) {
var def = {
callback: null,
silent: false
};
var opt = $.extend(def, config || {});
var map = api.require('bMap');
var getTimeout = setTimeout(function() {
app.loading.hide();
app.toast('检索超时,请重试', 2000);
}, appcfg.set.longtime);
if (!lat || !lng) {
return app.toast('坐标反查参数错误');
}
if (!opt.silent) {
app.loading.show('正在检索地址...');
}
map.getNameFromCoords({
lon: lng,
lat: lat
}, function(ret, err) {
app.loading.hide();
clearTimeout(getTimeout);
if (err) {
var baiduerrmap = ['', '检索词有岐义', '检索地址有岐义', '没有找到检索结果', 'key错误', '网络连接错误', '网络连接超时', '还未完成鉴权,请在鉴权通过后重试'];
return console.log('百度坐标反查:' + baiduerrmap[err.code]);
}
if (ret.status) {
opt.callback(ret);
} else {
app.toast('百度地图API错误', 2000);
}
});
};
//回传用户注册地
var _uploadifyLocation = function() {
var hasLocat;
var userData = _getUser();
var updateUser = function(location) {
app.ajax({
url: appcfg.api.uploadifyLocation,
data: {
"member_id": userData.id,
"province": location.province,
"city": location.city,
"area": location.district
},
success: function(res) {
if (res.status === 'Y') {
} else {
console.log('回传用户地理位置返回异常:' + res.msg);
}
},
error: function(o) {
console.log('回传用户地理位置发生错误');
}
});
};
_getLocation(function(lat, lng) {
_getAddrByLoc(lat, lng, {
silent: true,
callback: function(res) {
var location = {};
location.lng = lng;
location.lat = lat;
location.province = res.province;
location.city = res.city;
location.district = res.district;
location.streetName = res.streetName;
location.streetNumber = res.streetNumber;
updateUser(location);
}
});
});
};
//收集信息
var _collection = function() {
var oldInfo = app.storage.val('DeviceInfo') || {},
newInfo = {},
send = function(extraParam) {
var userData = app.storage.val('user'),
hasChange;
extraParam.saveDate = _getDate(false, true);
//日期过滤
if (oldInfo.saveDate && oldInfo.saveDate >= extraParam.saveDate) {
return null;
}
//信息改变过滤
$.each(extraParam, function(i, e) {
if (e !== oldInfo[i]) {
hasChange = true;
return null;
}
});
if (hasChange && $.isPlainObject(userData)) {
app.storage.val('DeviceInfo', extraParam);
var data = $.extend({
member_id: userData.id
}, extraParam);
app.ajax({
url: appcfg.api.loginLog,
data: data,
success: function(res) {
},
error: function() {
console.log('回传设备信息时发生错误');
}
});
}
};
newInfo.app_version = appcfg.set.version;
newInfo.os = api.systemType;
newInfo.connect_status = api.connectionType;
newInfo.mobile_operator_name = api.operator;
newInfo.model = api.deviceModel;
_getLocation(function(lat, lng) {
newInfo.latitude = lat;
newInfo.longitude = lng;
send(newInfo);
}, function() {
send(newInfo);
});
};
//数据预取
var _preGet = function(cb) {
var got = 0,
preGetList = _preGet.prototype.preGetList,
getOne = function() {
got++;
if (got >= preGetList.length && typeof(cb) === 'function') {
cb();
got = null;
getOne = null;
preGetList = null;
}
};
//开始加载
$.each(preGetList, function(i, e) {
app.ajax({
url: e.url,
data: e.data,
success: function(res) {
getOne();
if (res.status === 'Y') {
var data = res.data;
if (data.split) {
data = JSON.parse(data);
}
app.storage.val(e.key, data);
}
},
error: function() {}
});
});
};
_preGet.prototype.preGetList = [];
//预取配置信息
_preGet.prototype.preGetList.push({
key: 'websiteConfig',
url: appcfg.api.websiteConfig,
data: {}
});
//预取数据
var _checkPreget = function() {
var preGetList = _preGet.prototype.preGetList,
isDone = true;
$.each(preGetList, function(i, e) {
if (!app.storage.val(e.key)) {
_preGet();
isDone = false;
return false;
}
});
return isDone;
};
//检查升级
var _checkUpdate = function(platform, silence) {
var mam = api.require('mam');
mam.checkUpdate(function(ret, err) {
if (ret) {
var result = ret.result;
if (result.update === true && result.closed === false) {
app.confirm(ret.updateTip, function() {
if (platform == 'ios') {
api.installApp({
appUri: result.source
});
} else if (platform == 'android') {
api.download({
url: result.source,
report: true
}, function(ret, err) {
if (ret && 0 === ret.state) { /* 下载进度 */
app.toast("正在下载:" + ret.percent + "%", 1000);
}
if (ret && 1 === ret.state) { /* 下载完成 */
var savePath = ret.savePath;
api.installApp({
appUri: savePath
});
}
});
}
}, null, {
bar: true,
title: '升级到 V' + result.version
});
} else if (!silence) {
app.alert("暂无更新");
}
} else if (!silence) {
app.alert(err.msg);
}
});
};
//获取地理位置
var _getLocation = function(callback, errcb) {
var bMap = api.require('bMap');
var chaoshi = setTimeout(function() {
app.loading.hide();
bMap.stopLocation();
if (app.storage.val('gps')) {
var gpsCache = app.storage.val('gps');
if (typeof(callback) === 'function') {
callback(gpsCache.lat, gpsCache.lng);
}
console.log('定位超时,使用缓存数据');
} else {
if (typeof(errcb) === 'function') {
errcb();
} else {
app.toast('GPS定位超时!', 1000);
}
}
}, appcfg.set.outime);
bMap.getLocation({
accuracy: '10m',
autoStop: true,
filter: 1
}, function(ret, err) {
app.loading.hide();
if (ret && ret.status) {
chaoshi = clearTimeout(chaoshi);
if(ret.lat && ret.lon){
app.storage.val('gps', {
lat: ret.lat,
lng: ret.lon
});
}else{
console.log('bMap.getLocation定位异常');
}
bMap.stopLocation();
if (typeof(callback) === 'function') {
callback(ret.lat, ret.lon);
}
} else {
if (typeof(errcb) === 'function') {
errcb();
} else {
app.toast('GPS定位失败:' + JSON.stringify(err) );
}
}
});
};
//指定DOM打开地图
var _openBaiduMap = function(dom, data, refresh) {
if (!$.isPlainObject(data) || !data.longitude || !data.latitude) {
return app.toast('参数缺失,无法打开地图');
}
var bdMapParam = {
lat: data.latitude,
lng: d | p.window.evaluate('', 'bdMapView', 'refresh()');
} else {
setTimeout(function() {
var offset = $("#" + dom)[0].getBoundingClientRect();
app.window.popoverElement({
id: dom,
name: 'bdMapView',
url: seajs.root + '/view/common/baiduMap/temp.html',
top: parseInt(window.selfTop) + offset.top,
bounce: false
});
}, 0);
}
};
//公用模板
var _commonTemp = function(tempName, data) {
var templateCache = app.storage.val('templateCache') || {};
if (!data) {
data = {};
}
if(templateCache[tempName]){
return templateCache[tempName];
}
var etplEngine = new etpl.Engine();
var template = api.readFile({
sync: true,
path: seajs.root + '/res/temp/template.html'
});
etplEngine.compile(template);
var Render = etplEngine.getRenderer(tempName);
if(Render){
var html = Render(data);
templateCache[tempName] = html;
app.storage.val('templateCache', templateCache);
return html;
} else {
console.log('找不到指定模板:' + tempName);
}
};
var cacheImg = function(element, callback) {
var placeholderPic = seajs.root + '/res/img/placeholder.jpg';
var remoteEle;
if ($(element)[0].getAttribute('data-remote')) {
remoteEle = $(element);
} else {
remoteEle = $(element)[0].querySelectorAll('[data-remote]');
}
app.ready(function() {
var cacheCount = 0;
$.each(remoteEle, function(i, ele) {
var remote = ele.getAttribute('data-remote') || placeholderPic;
api.imageCache({
url: remote,
policy: "cache_else_network"
}, function(ret, err) {
var url = ret.url;
if (ele.tagName.toLowerCase() === 'img') {
ele.setAttribute('src', url);
} else {
ele.style.backgroundImage = "url(" + url + ")";
}
ele.removeAttribute('data-remote');
cacheCount++;
if(cacheCount===remoteEle.length){
typeof callback === 'function' && callback();
}
});
});
});
return remoteEle;
};
module.exports = {
logout: _logout,
initUser: _initUser,
getUser: _getUser,
push: _push,
preGet: _preGet,
checkPreget: _checkPreget,
source: _source,
getDate: _getDate,
checkUpdate: _checkUpdate,
uploadifyLocation: _uploadifyLocation,
openBaiduMap: _openBaiduMap,
commonTemp: _commonTemp,
getAddrByLoc: _getAddrByLoc,
getLocation: _getLocation,
collection: _collection,
cacheImg: cacheImg
};
}); | ata.longitude
};
app.storage.val('bdMapData', bdMapParam);
if (refresh) {
ap | conditional_block |
spline.rs | //! Spline curves and operations.
#[cfg(feature = "serialization")] use serde_derive::{Deserialize, Serialize};
#[cfg(not(feature = "std"))] use alloc::vec::Vec;
#[cfg(feature = "std")] use std::cmp::Ordering;
#[cfg(feature = "std")] use std::ops::{Div, Mul};
#[cfg(not(feature = "std"))] use core::ops::{Div, Mul};
#[cfg(not(feature = "std"))] use core::cmp::Ordering;
use crate::interpolate::{Interpolate, Additive, One, Trigo};
use crate::interpolation::Interpolation;
use crate::key::Key;
/// Spline curve used to provide interpolation between control points (keys).
///
/// Splines are made out of control points ([`Key`]). When creating a [`Spline`] with
/// [`Spline::from_vec`] or [`Spline::from_iter`], the keys don’t have to be sorted (they are sorted
/// automatically by the sampling value).
///
/// You can sample from a spline with several functions:
///
/// - [`Spline::sample`]: allows you to sample from a spline. If not enough keys are available
/// for the required interpolation mode, you get `None`.
/// - [`Spline::clamped_sample`]: behaves like [`Spline::sample`] but will return either the first
/// or last key if out of bound; it will return `None` if not enough key.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serialization", derive(Deserialize, Serialize))]
pub struct Spline<T, V>(pub(crate) Vec<Key<T, V>>);
impl<T, V> Spline<T, V> {
/// Internal sort to ensure invariant of sorting keys is valid.
fn internal_sort(&mut self) where T: PartialOrd {
self.0.sort_by(|k0, k1| k0.t.partial_cmp(&k1.t).unwrap_or(Ordering::Less));
}
/// Create a new spline out of keys. The keys don’t have to be sorted even though it’s recommended
/// to provide ascending sorted ones (for performance purposes).
pub fn from_vec(keys: Vec<Key<T, V>>) -> Self where T: PartialOrd {
let mut spline = Spline(keys);
spline.internal_sort();
spline
}
/// Create a new spline by consuming an `Iterater<Item = Key<T>>`. They keys don’t have to be
/// sorted.
///
/// # Note on iterators
///
/// It’s valid to use any iterator that implements `Iterator<Item = Key<T>>`. However, you should
/// use [`Spline::from_vec`] if you are passing a [`Vec`].
pub fn from_iter<I>(iter: I) -> Self where I: Iterator<Item = Key<T, V>>, T: PartialOrd {
Self::from_vec(iter.collect())
}
/// Retrieve the keys of a spline.
pub fn keys(&self) -> &[Key<T, V>] {
&self.0
}
/// Number of keys.
#[inline(always)]
pub fn len(&self) -> usize {
self.0.len()
}
/// Check whether the spline has no key.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Sample a spline at a given time, returning the interpolated value along with its associated
/// key.
///
/// The current implementation, based on immutability, cannot perform in constant time. This means
/// that sampling’s processing complexity is currently *O(log n)*. It’s possible to achieve *O(1)*
/// performance by using a slightly different spline type. If you are interested by this feature,
/// an implementation for a dedicated type is foreseen yet not started yet.
///
/// # Return
///
/// `None` if you try to sample a value at a time that has no key associated with. That can also
/// happen if you try to sample between two keys with a specific interpolation mode that makes the
/// sampling impossible. For instance, [`Interpolation::CatmullRom`] requires *four* keys. If
/// you’re near the beginning of the spline or its end, ensure you have enough keys around to make
/// the sampling.
pub fn sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
let keys = &self.0;
let i = search_lower_cp(keys, t)?;
let cp0 = &keys[i];
match cp0.interpolation {
Interpolation::Step(threshold) => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = if nt < threshold { cp0.value } else { cp1.value };
Some((value, cp0, Some(cp1)))
}
Interpolation::Linear => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::lerp(cp0.value, cp1.value, nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::Cosine => {
let two_t = T::one() + T::one();
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let cos_nt = (T::one() - (nt * T::pi()).cos()) / two_t;
let value = Interpolate::lerp(cp0.value, cp1.value, cos_nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::CatmullRom => {
// We need at least four points for Catmull Rom; ensure we have them, otherwise, return
// None.
if i == 0 || i >= keys.len() - 2 {
None
} else {
let cp1 = &keys[i + 1];
let cpm0 = &keys[i - 1];
let cpm1 = &keys[i + 2];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::cubic_hermite((cpm0.value, cpm0.t), (cp0.value, cp0.t), (cp1.value, cp1.t), (cpm1.value, cpm1.t), nt);
Some((value, cp0, Some(cp1)))
}
}
Interpolation::Bezier(u) => {
// We need to check the next control point to see whether we want quadratic or cubic Bezier.
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value =
if let Interpolation::Bezier(v) = cp1.interpolation {
Interpolate::cubic_bezier(cp0.value, u, v, cp1.value, nt)
} else {
Interpolate::quadratic_bezier(cp0.value, u, cp1.value, nt)
};
Some((value, cp0, Some(cp1)))
}
Interpolation::StrokeBezier(input, output) => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::cubic_bezier(cp0.value, input, output, cp1.value, nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::__NonExhaustive => unreachable!(),
}
}
/// Sample a spline at a given time.
///
pub fn sample(&self, t: T) -> Option<V>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
self.sample_with_key(t).map(|(v, _, _)| v)
}
/// Sample a spline at a given time with clamping, returning the interpolated value along with its
/// associated key.
///
/// # Return
///
/// If you sample before the first key or after the last one, return the first key or the last
/// one, respectively. Otherwise, behave the same way as [`Spline::sample`].
///
/// # Error
///
/// This function returns [`None`] if you have no key.
pub fn clamped_sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
if self.0.is_empty() {
return None;
}
self.sample_with_key(t).or_else(move || {
let first = self.0.first().unwrap();
if t <= first.t {
let second = if self.0.len() >= 2 { Some(&self.0[1]) } else { None };
Some((first.value, &first, second))
} else {
let last = self.0.last().unwrap();
if t >= last.t {
Some((last.value, &last, None))
} else {
None
}
}
})
}
/// Sample a spline at a given time with clamping.
pub fn clamped_sample(&self, t: T) -> Option<V>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
self.clamped_sample_with_key(t).map(|(v, _, _)| v)
}
/// Add a key into the spline.
pub fn add(&mut self, key: Key<T, V>) where T: PartialOrd {
self.0.push(key);
self.internal_sort();
}
/// Remove a key from the spline.
pub fn remove(&mut self, index: usize) -> Option<Key<T, V>> {
if index > | key and return the key already present.
///
/// The key is updated — if present — with the provided function.
///
/// # Notes
///
/// That function makes sense only if you want to change the interpolator (i.e. [`Key::t`]) of
/// your key. If you just want to change the interpolation mode or the carried value, consider
/// using the [`Spline::get_mut`] method instead as it will be way faster.
pub fn replace<F>(
&mut self,
index: usize,
f: F
) -> Option<Key<T, V>>
where
F: FnOnce(&Key<T, V>) -> Key<T, V>,
T: PartialOrd
{
let key = self.remove(index)?;
self.add(f(&key));
Some(key)
}
/// Get a key at a given index.
pub fn get(&self, index: usize) -> Option<&Key<T, V>> {
self.0.get(index)
}
/// Mutably get a key at a given index.
pub fn get_mut(&mut self, index: usize) -> Option<KeyMut<T, V>> {
self.0.get_mut(index).map(|key| KeyMut {
value: &mut key.value,
interpolation: &mut key.interpolation
})
}
}
/// A mutable [`Key`].
///
/// Mutable keys allow to edit the carried values and the interpolation mode but not the actual
/// interpolator value as it would invalidate the internal structure of the [`Spline`]. If you
/// want to achieve this, you’re advised to use [`Spline::replace`].
pub struct KeyMut<'a, T, V> {
/// Carried value.
pub value: &'a mut V,
/// Interpolation mode to use for that key.
pub interpolation: &'a mut Interpolation<T, V>,
}
// Normalize a time ([0;1]) given two control points.
#[inline(always)]
pub(crate) fn normalize_time<T, V>(
t: T,
cp: &Key<T, V>,
cp1: &Key<T, V>
) -> T where T: Additive + Div<T, Output = T> + PartialEq {
assert!(cp1.t != cp.t, "overlapping keys");
(t - cp.t) / (cp1.t - cp.t)
}
// Find the lower control point corresponding to a given time.
fn search_lower_cp<T, V>(cps: &[Key<T, V>], t: T) -> Option<usize> where T: PartialOrd {
let mut i = 0;
let len = cps.len();
if len < 2 {
return None;
}
loop {
let cp = &cps[i];
let cp1 = &cps[i+1];
if t >= cp1.t {
if i >= len - 2 {
return None;
}
i += 1;
} else if t < cp.t {
if i == 0 {
return None;
}
i -= 1;
} else {
break; // found
}
}
Some(i)
}
| = self.0.len() {
None
} else {
Some(self.0.remove(index))
}
}
/// Update a | identifier_body |
spline.rs | //! Spline curves and operations.
#[cfg(feature = "serialization")] use serde_derive::{Deserialize, Serialize};
#[cfg(not(feature = "std"))] use alloc::vec::Vec;
#[cfg(feature = "std")] use std::cmp::Ordering;
#[cfg(feature = "std")] use std::ops::{Div, Mul};
#[cfg(not(feature = "std"))] use core::ops::{Div, Mul};
#[cfg(not(feature = "std"))] use core::cmp::Ordering;
use crate::interpolate::{Interpolate, Additive, One, Trigo};
use crate::interpolation::Interpolation;
use crate::key::Key;
/// Spline curve used to provide interpolation between control points (keys).
///
/// Splines are made out of control points ([`Key`]). When creating a [`Spline`] with
/// [`Spline::from_vec`] or [`Spline::from_iter`], the keys don’t have to be sorted (they are sorted
/// automatically by the sampling value).
///
/// You can sample from a spline with several functions:
///
/// - [`Spline::sample`]: allows you to sample from a spline. If not enough keys are available
/// for the required interpolation mode, you get `None`.
/// - [`Spline::clamped_sample`]: behaves like [`Spline::sample`] but will return either the first
/// or last key if out of bound; it will return `None` if not enough key.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serialization", derive(Deserialize, Serialize))]
pub struct Spline<T, V>(pub(crate) Vec<Key<T, V>>);
impl<T, V> Spline<T, V> {
/// Internal sort to ensure invariant of sorting keys is valid.
fn internal_sort(&mut self) where T: PartialOrd {
self.0.sort_by(|k0, k1| k0.t.partial_cmp(&k1.t).unwrap_or(Ordering::Less));
}
/// Create a new spline out of keys. The keys don’t have to be sorted even though it’s recommended
/// to provide ascending sorted ones (for performance purposes).
pub fn from_vec(keys: Vec<Key<T, V>>) -> Self where T: PartialOrd {
let mut spline = Spline(keys);
spline.internal_sort();
spline
}
/// Create a new spline by consuming an `Iterater<Item = Key<T>>`. They keys don’t have to be
/// sorted.
///
/// # Note on iterators
///
/// It’s valid to use any iterator that implements `Iterator<Item = Key<T>>`. However, you should
/// use [`Spline::from_vec`] if you are passing a [`Vec`].
pub fn from_iter<I>(iter: I) -> Self where I: Iterator<Item = Key<T, V>>, T: PartialOrd {
Self::from_vec(iter.collect())
}
/// Retrieve the keys of a spline.
pub fn keys(&self) -> &[Key<T, V>] {
&self.0
}
/// Number of keys.
#[inline(always)]
pub fn len(&self) -> usize {
self.0.len()
}
/// Check whether the spline has no key.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Sample a spline at a given time, returning the interpolated value along with its associated
/// key.
///
/// The current implementation, based on immutability, cannot perform in constant time. This means
/// that sampling’s processing complexity is currently *O(log n)*. It’s possible to achieve *O(1)*
/// performance by using a slightly different spline type. If you are interested by this feature,
/// an implementation for a dedicated type is foreseen yet not started yet.
///
/// # Return
///
/// `None` if you try to sample a value at a time that has no key associated with. That can also
/// happen if you try to sample between two keys with a specific interpolation mode that makes the
/// sampling impossible. For instance, [`Interpolation::CatmullRom`] requires *four* keys. If
/// you’re near the beginning of the spline or its end, ensure you have enough keys around to make
/// the sampling.
pub fn sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
let keys = &self.0;
let i = search_lower_cp(keys, t)?;
let cp0 = &keys[i];
match cp0.interpolation {
Interpolation::Step(threshold) => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = if nt < threshold { cp0.value } else { cp1.value };
Some((value, cp0, Some(cp1)))
}
Interpolation::Linear => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::lerp(cp0.value, cp1.value, nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::Cosine => {
let two_t = T::one() + T::one();
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let cos_nt = (T::one() - (nt * T::pi()).cos()) / two_t;
let value = Interpolate::lerp(cp0.value, cp1.value, cos_nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::CatmullRom => {
// We need at least four points for Catmull Rom; ensure we have them, otherwise, return
// None.
if i == 0 || i >= keys.len() - 2 {
None
} else {
let cp1 = &keys[i + 1];
let cpm0 = &keys[i - 1];
let cpm1 = &keys[i + 2];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::cubic_hermite((cpm0.value, cpm0.t), (cp0.value, cp0.t), (cp1.value, cp1.t), (cpm1.value, cpm1.t), nt);
Some((value, cp0, Some(cp1)))
}
}
Interpolation::Bezier(u) => {
// We need to check the next control point to see whether we want quadratic or cubic Bezier.
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value =
if let Interpolation::Bezier(v) = cp1.interpolation {
Interpolate::cubic_bezier(cp0.value, u, v, cp1.value, nt)
} else {
Interpolate::quadratic_bezier(cp0.value, u, cp1.value, nt)
};
Some((value, cp0, Some(cp1)))
}
Interpolation::StrokeBezier(input, output) => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::cubic_bezier(cp0.value, input, output, cp1.value, nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::__NonExhaustive => unreachable!(),
}
}
/// Sample a spline at a given time.
///
pub fn sample(&self, t: T) -> Option<V>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
self.sample_with_key(t).map(|(v, _, _)| v)
}
/// Sample a spline at a given time with clamping, returning the interpolated value along with its
/// associated key.
///
/// # Return
///
/// If you sample before the first key or after the last one, return the first key or the last
/// one, respectively. Otherwise, behave the same way as [`Spline::sample`].
///
/// # Error
///
/// This function returns [`None`] if you have no key.
pub fn clamped_sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
if self.0.is_empty() {
return None;
}
self.sample_with_key(t).or_else(move || {
let first = self.0.first().unwrap();
if t <= first.t {
let second = if self.0.len() >= 2 { Some(&self.0[1]) } else { None };
Some((first.value, &first, second))
} else {
let last = self.0.last().unwrap();
if t >= last.t {
Some((last.value, &last, None))
} else {
None
}
}
})
}
/// Sample a spline at a given time with clamping.
pub fn clamped_sample(&self, t: T) -> Option<V>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
self.clamped_sample_with_key(t).map(|(v, _, _)| v)
}
/// Add a key into the spline.
pub fn add(&mut self, key: Key<T, V>) where T: PartialOrd {
self.0.push(key);
self.internal_sort();
}
/// Remove a key from the spline.
pub fn remove(&mut self, index: usize) -> Option<Key<T, V>> {
if index >= self.0.len() {
None
} else {
Some(self.0.remove(index))
}
}
/// Update a key and return the key already present.
///
/// The key is updated — if present — with the provided function.
///
/// # Notes
///
/// That function makes sense only if you want to change the interpolator (i.e. [`Key::t`]) of
/// your key. If you just want to change the interpolation mode or the carried value, consider
/// using the [`Spline::get_mut`] method instead as it will be way faster.
pub fn replace<F>(
&mut self,
index: usize,
f: F
) -> Option<Key<T, V>>
where
F: FnOnce(&Key<T, V>) -> Key<T, V>,
T: PartialOrd
{
let key = self.remove(index)?;
self.add(f(&key));
Some(key)
}
/// Get a key at a given index.
pub fn get(&self, index: usize) -> Option<&Key<T, V>> {
self.0.get(index)
}
/// Mutably get a key at a given index.
pub fn get_mut(&mut self, index: usize) -> Option<KeyMut<T, V>> {
self.0.get_mut(index).map(|key| KeyMut {
value: &mut key.value,
interpolation: &mut key.interpolation
})
}
}
/// A mutable [`Key`].
///
/// Mutable keys allow to edit the carried values and the interpolation mode but not the actual
/// interpolator value as it would invalidate the internal structure of the [`Spline`]. If you
/// want to achieve this, you’re advised to use [`Spline::replace`].
pub struct KeyMut<'a, T, V> {
/ | ried value.
pub value: &'a mut V,
/// Interpolation mode to use for that key.
pub interpolation: &'a mut Interpolation<T, V>,
}
// Normalize a time ([0;1]) given two control points.
#[inline(always)]
pub(crate) fn normalize_time<T, V>(
t: T,
cp: &Key<T, V>,
cp1: &Key<T, V>
) -> T where T: Additive + Div<T, Output = T> + PartialEq {
assert!(cp1.t != cp.t, "overlapping keys");
(t - cp.t) / (cp1.t - cp.t)
}
// Find the lower control point corresponding to a given time.
fn search_lower_cp<T, V>(cps: &[Key<T, V>], t: T) -> Option<usize> where T: PartialOrd {
let mut i = 0;
let len = cps.len();
if len < 2 {
return None;
}
loop {
let cp = &cps[i];
let cp1 = &cps[i+1];
if t >= cp1.t {
if i >= len - 2 {
return None;
}
i += 1;
} else if t < cp.t {
if i == 0 {
return None;
}
i -= 1;
} else {
break; // found
}
}
Some(i)
}
| // Car | identifier_name |
spline.rs | //! Spline curves and operations.
#[cfg(feature = "serialization")] use serde_derive::{Deserialize, Serialize};
#[cfg(not(feature = "std"))] use alloc::vec::Vec;
#[cfg(feature = "std")] use std::cmp::Ordering;
#[cfg(feature = "std")] use std::ops::{Div, Mul};
#[cfg(not(feature = "std"))] use core::ops::{Div, Mul};
#[cfg(not(feature = "std"))] use core::cmp::Ordering;
use crate::interpolate::{Interpolate, Additive, One, Trigo};
use crate::interpolation::Interpolation;
use crate::key::Key;
/// Spline curve used to provide interpolation between control points (keys).
///
/// Splines are made out of control points ([`Key`]). When creating a [`Spline`] with
/// [`Spline::from_vec`] or [`Spline::from_iter`], the keys don’t have to be sorted (they are sorted
/// automatically by the sampling value).
///
/// You can sample from a spline with several functions:
///
/// - [`Spline::sample`]: allows you to sample from a spline. If not enough keys are available
/// for the required interpolation mode, you get `None`.
/// - [`Spline::clamped_sample`]: behaves like [`Spline::sample`] but will return either the first
/// or last key if out of bound; it will return `None` if not enough key.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serialization", derive(Deserialize, Serialize))]
pub struct Spline<T, V>(pub(crate) Vec<Key<T, V>>);
impl<T, V> Spline<T, V> {
/// Internal sort to ensure invariant of sorting keys is valid.
fn internal_sort(&mut self) where T: PartialOrd {
self.0.sort_by(|k0, k1| k0.t.partial_cmp(&k1.t).unwrap_or(Ordering::Less));
}
/// Create a new spline out of keys. The keys don’t have to be sorted even though it’s recommended
/// to provide ascending sorted ones (for performance purposes).
pub fn from_vec(keys: Vec<Key<T, V>>) -> Self where T: PartialOrd {
let mut spline = Spline(keys);
spline.internal_sort();
spline
}
/// Create a new spline by consuming an `Iterater<Item = Key<T>>`. They keys don’t have to be
/// sorted.
///
/// # Note on iterators
///
/// It’s valid to use any iterator that implements `Iterator<Item = Key<T>>`. However, you should
/// use [`Spline::from_vec`] if you are passing a [`Vec`].
pub fn from_iter<I>(iter: I) -> Self where I: Iterator<Item = Key<T, V>>, T: PartialOrd {
Self::from_vec(iter.collect())
}
/// Retrieve the keys of a spline.
pub fn keys(&self) -> &[Key<T, V>] {
&self.0
}
/// Number of keys.
#[inline(always)]
pub fn len(&self) -> usize {
self.0.len()
}
/// Check whether the spline has no key.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Sample a spline at a given time, returning the interpolated value along with its associated
/// key.
///
/// The current implementation, based on immutability, cannot perform in constant time. This means
/// that sampling’s processing complexity is currently *O(log n)*. It’s possible to achieve *O(1)*
/// performance by using a slightly different spline type. If you are interested by this feature,
/// an implementation for a dedicated type is foreseen yet not started yet.
///
/// # Return
///
/// `None` if you try to sample a value at a time that has no key associated with. That can also
/// happen if you try to sample between two keys with a specific interpolation mode that makes the
/// sampling impossible. For instance, [`Interpolation::CatmullRom`] requires *four* keys. If
/// you’re near the beginning of the spline or its end, ensure you have enough keys around to make
/// the sampling.
pub fn sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
let keys = &self.0;
let i = search_lower_cp(keys, t)?;
let cp0 = &keys[i];
match cp0.interpolation {
Interpolation::Step(threshold) => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = if nt < threshold { cp0.value } else { cp1.value };
Some((value, cp0, Some(cp1)))
} | Interpolation::Linear => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::lerp(cp0.value, cp1.value, nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::Cosine => {
let two_t = T::one() + T::one();
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let cos_nt = (T::one() - (nt * T::pi()).cos()) / two_t;
let value = Interpolate::lerp(cp0.value, cp1.value, cos_nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::CatmullRom => {
// We need at least four points for Catmull Rom; ensure we have them, otherwise, return
// None.
if i == 0 || i >= keys.len() - 2 {
None
} else {
let cp1 = &keys[i + 1];
let cpm0 = &keys[i - 1];
let cpm1 = &keys[i + 2];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::cubic_hermite((cpm0.value, cpm0.t), (cp0.value, cp0.t), (cp1.value, cp1.t), (cpm1.value, cpm1.t), nt);
Some((value, cp0, Some(cp1)))
}
}
Interpolation::Bezier(u) => {
// We need to check the next control point to see whether we want quadratic or cubic Bezier.
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value =
if let Interpolation::Bezier(v) = cp1.interpolation {
Interpolate::cubic_bezier(cp0.value, u, v, cp1.value, nt)
} else {
Interpolate::quadratic_bezier(cp0.value, u, cp1.value, nt)
};
Some((value, cp0, Some(cp1)))
}
Interpolation::StrokeBezier(input, output) => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::cubic_bezier(cp0.value, input, output, cp1.value, nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::__NonExhaustive => unreachable!(),
}
}
/// Sample a spline at a given time.
///
pub fn sample(&self, t: T) -> Option<V>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
self.sample_with_key(t).map(|(v, _, _)| v)
}
/// Sample a spline at a given time with clamping, returning the interpolated value along with its
/// associated key.
///
/// # Return
///
/// If you sample before the first key or after the last one, return the first key or the last
/// one, respectively. Otherwise, behave the same way as [`Spline::sample`].
///
/// # Error
///
/// This function returns [`None`] if you have no key.
pub fn clamped_sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
if self.0.is_empty() {
return None;
}
self.sample_with_key(t).or_else(move || {
let first = self.0.first().unwrap();
if t <= first.t {
let second = if self.0.len() >= 2 { Some(&self.0[1]) } else { None };
Some((first.value, &first, second))
} else {
let last = self.0.last().unwrap();
if t >= last.t {
Some((last.value, &last, None))
} else {
None
}
}
})
}
/// Sample a spline at a given time with clamping.
pub fn clamped_sample(&self, t: T) -> Option<V>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
self.clamped_sample_with_key(t).map(|(v, _, _)| v)
}
/// Add a key into the spline.
pub fn add(&mut self, key: Key<T, V>) where T: PartialOrd {
self.0.push(key);
self.internal_sort();
}
/// Remove a key from the spline.
pub fn remove(&mut self, index: usize) -> Option<Key<T, V>> {
if index >= self.0.len() {
None
} else {
Some(self.0.remove(index))
}
}
/// Update a key and return the key already present.
///
/// The key is updated — if present — with the provided function.
///
/// # Notes
///
/// That function makes sense only if you want to change the interpolator (i.e. [`Key::t`]) of
/// your key. If you just want to change the interpolation mode or the carried value, consider
/// using the [`Spline::get_mut`] method instead as it will be way faster.
pub fn replace<F>(
&mut self,
index: usize,
f: F
) -> Option<Key<T, V>>
where
F: FnOnce(&Key<T, V>) -> Key<T, V>,
T: PartialOrd
{
let key = self.remove(index)?;
self.add(f(&key));
Some(key)
}
/// Get a key at a given index.
pub fn get(&self, index: usize) -> Option<&Key<T, V>> {
self.0.get(index)
}
/// Mutably get a key at a given index.
pub fn get_mut(&mut self, index: usize) -> Option<KeyMut<T, V>> {
self.0.get_mut(index).map(|key| KeyMut {
value: &mut key.value,
interpolation: &mut key.interpolation
})
}
}
/// A mutable [`Key`].
///
/// Mutable keys allow to edit the carried values and the interpolation mode but not the actual
/// interpolator value as it would invalidate the internal structure of the [`Spline`]. If you
/// want to achieve this, you’re advised to use [`Spline::replace`].
pub struct KeyMut<'a, T, V> {
/// Carried value.
pub value: &'a mut V,
/// Interpolation mode to use for that key.
pub interpolation: &'a mut Interpolation<T, V>,
}
// Normalize a time ([0;1]) given two control points.
#[inline(always)]
pub(crate) fn normalize_time<T, V>(
t: T,
cp: &Key<T, V>,
cp1: &Key<T, V>
) -> T where T: Additive + Div<T, Output = T> + PartialEq {
assert!(cp1.t != cp.t, "overlapping keys");
(t - cp.t) / (cp1.t - cp.t)
}
// Find the lower control point corresponding to a given time.
fn search_lower_cp<T, V>(cps: &[Key<T, V>], t: T) -> Option<usize> where T: PartialOrd {
let mut i = 0;
let len = cps.len();
if len < 2 {
return None;
}
loop {
let cp = &cps[i];
let cp1 = &cps[i+1];
if t >= cp1.t {
if i >= len - 2 {
return None;
}
i += 1;
} else if t < cp.t {
if i == 0 {
return None;
}
i -= 1;
} else {
break; // found
}
}
Some(i)
} | random_line_split | |
spline.rs | //! Spline curves and operations.
#[cfg(feature = "serialization")] use serde_derive::{Deserialize, Serialize};
#[cfg(not(feature = "std"))] use alloc::vec::Vec;
#[cfg(feature = "std")] use std::cmp::Ordering;
#[cfg(feature = "std")] use std::ops::{Div, Mul};
#[cfg(not(feature = "std"))] use core::ops::{Div, Mul};
#[cfg(not(feature = "std"))] use core::cmp::Ordering;
use crate::interpolate::{Interpolate, Additive, One, Trigo};
use crate::interpolation::Interpolation;
use crate::key::Key;
/// Spline curve used to provide interpolation between control points (keys).
///
/// Splines are made out of control points ([`Key`]). When creating a [`Spline`] with
/// [`Spline::from_vec`] or [`Spline::from_iter`], the keys don’t have to be sorted (they are sorted
/// automatically by the sampling value).
///
/// You can sample from a spline with several functions:
///
/// - [`Spline::sample`]: allows you to sample from a spline. If not enough keys are available
/// for the required interpolation mode, you get `None`.
/// - [`Spline::clamped_sample`]: behaves like [`Spline::sample`] but will return either the first
/// or last key if out of bound; it will return `None` if not enough key.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serialization", derive(Deserialize, Serialize))]
pub struct Spline<T, V>(pub(crate) Vec<Key<T, V>>);
impl<T, V> Spline<T, V> {
/// Internal sort to ensure invariant of sorting keys is valid.
fn internal_sort(&mut self) where T: PartialOrd {
self.0.sort_by(|k0, k1| k0.t.partial_cmp(&k1.t).unwrap_or(Ordering::Less));
}
/// Create a new spline out of keys. The keys don’t have to be sorted even though it’s recommended
/// to provide ascending sorted ones (for performance purposes).
pub fn from_vec(keys: Vec<Key<T, V>>) -> Self where T: PartialOrd {
let mut spline = Spline(keys);
spline.internal_sort();
spline
}
/// Create a new spline by consuming an `Iterater<Item = Key<T>>`. They keys don’t have to be
/// sorted.
///
/// # Note on iterators
///
/// It’s valid to use any iterator that implements `Iterator<Item = Key<T>>`. However, you should
/// use [`Spline::from_vec`] if you are passing a [`Vec`].
pub fn from_iter<I>(iter: I) -> Self where I: Iterator<Item = Key<T, V>>, T: PartialOrd {
Self::from_vec(iter.collect())
}
/// Retrieve the keys of a spline.
pub fn keys(&self) -> &[Key<T, V>] {
&self.0
}
/// Number of keys.
#[inline(always)]
pub fn len(&self) -> usize {
self.0.len()
}
/// Check whether the spline has no key.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Sample a spline at a given time, returning the interpolated value along with its associated
/// key.
///
/// The current implementation, based on immutability, cannot perform in constant time. This means
/// that sampling’s processing complexity is currently *O(log n)*. It’s possible to achieve *O(1)*
/// performance by using a slightly different spline type. If you are interested by this feature,
/// an implementation for a dedicated type is foreseen yet not started yet.
///
/// # Return
///
/// `None` if you try to sample a value at a time that has no key associated with. That can also
/// happen if you try to sample between two keys with a specific interpolation mode that makes the
/// sampling impossible. For instance, [`Interpolation::CatmullRom`] requires *four* keys. If
/// you’re near the beginning of the spline or its end, ensure you have enough keys around to make
/// the sampling.
pub fn sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
let keys = &self.0;
let i = search_lower_cp(keys, t)?;
let cp0 = &keys[i];
match cp0.interpolation {
Interpolation::Step(threshold) => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = if nt < threshold { cp0.value } else { cp1.value };
Some((value, cp0, Some(cp1)))
}
Interpolation::Linear => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::lerp(cp0.value, cp1.value, nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::Cosine => {
let two_t = T::one() + T::one();
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let cos_nt = (T::one() - (nt * T::pi()).cos()) / two_t;
let value = Interpolate::lerp(cp0.value, cp1.value, cos_nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::CatmullRom => {
// We need at least four points for Catmull Rom; ensure we have them, otherwise, return
// None.
if i == 0 || i >= keys.len() - 2 {
None
} else {
let cp1 = &keys[i + 1];
let cpm0 = &keys[i - 1];
let cpm1 = &keys[i + 2];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::cubic_hermite((cpm0.value, cpm0.t), (cp0.value, cp0.t), (cp1.value, cp1.t), (cpm1.value, cpm1.t), nt);
Some((value, cp0, Some(cp1)))
}
}
Interpolation::Bezier(u) => {
// We need to check the next control point to see whether we want quadratic or cubic Bezier.
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value =
if let Interpolation::Bezier(v) = cp1.interpolation {
Interpolate::cubic_bezier(cp0.value, u, v, cp1.value, nt)
} else {
Interpolate::quadratic_bezier(cp0.value, u, cp1.value, nt)
};
Some((value, cp0, Some(cp1)))
}
Interpolation::StrokeBezier(input, output) => {
let cp1 = &keys[i + 1];
let nt = normalize_time(t, cp0, cp1);
let value = Interpolate::cubic_bezier(cp0.value, input, output, cp1.value, nt);
Some((value, cp0, Some(cp1)))
}
Interpolation::__NonExhaustive => unreachable!(),
}
}
/// Sample a spline at a given time.
///
pub fn sample(&self, t: T) -> Option<V>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
self.sample_with_key(t).map(|(v, _, _)| v)
}
/// Sample a spline at a given time with clamping, returning the interpolated value along with its
/// associated key.
///
/// # Return
///
/// If you sample before the first key or after the last one, return the first key or the last
/// one, respectively. Otherwise, behave the same way as [`Spline::sample`].
///
/// # Error
///
/// This function returns [`None`] if you have no key.
pub fn clamped_sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
if self.0.is_empty() {
return None;
}
self.sample_with_key(t).or_else(move || {
let first = self.0.first().unwrap();
if t <= first.t {
let second = if self.0.len() >= 2 { Some(&self.0[1]) } else { None };
Some((first.value, &first, second))
} else {
let last = self.0.last().unwrap();
if t >= last.t {
Some | None
}
}
})
}
/// Sample a spline at a given time with clamping.
pub fn clamped_sample(&self, t: T) -> Option<V>
where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd,
V: Interpolate<T> {
self.clamped_sample_with_key(t).map(|(v, _, _)| v)
}
/// Add a key into the spline.
pub fn add(&mut self, key: Key<T, V>) where T: PartialOrd {
self.0.push(key);
self.internal_sort();
}
/// Remove a key from the spline.
pub fn remove(&mut self, index: usize) -> Option<Key<T, V>> {
if index >= self.0.len() {
None
} else {
Some(self.0.remove(index))
}
}
/// Update a key and return the key already present.
///
/// The key is updated — if present — with the provided function.
///
/// # Notes
///
/// That function makes sense only if you want to change the interpolator (i.e. [`Key::t`]) of
/// your key. If you just want to change the interpolation mode or the carried value, consider
/// using the [`Spline::get_mut`] method instead as it will be way faster.
pub fn replace<F>(
&mut self,
index: usize,
f: F
) -> Option<Key<T, V>>
where
F: FnOnce(&Key<T, V>) -> Key<T, V>,
T: PartialOrd
{
let key = self.remove(index)?;
self.add(f(&key));
Some(key)
}
/// Get a key at a given index.
pub fn get(&self, index: usize) -> Option<&Key<T, V>> {
self.0.get(index)
}
/// Mutably get a key at a given index.
pub fn get_mut(&mut self, index: usize) -> Option<KeyMut<T, V>> {
self.0.get_mut(index).map(|key| KeyMut {
value: &mut key.value,
interpolation: &mut key.interpolation
})
}
}
/// A mutable [`Key`].
///
/// Mutable keys allow to edit the carried values and the interpolation mode but not the actual
/// interpolator value as it would invalidate the internal structure of the [`Spline`]. If you
/// want to achieve this, you’re advised to use [`Spline::replace`].
pub struct KeyMut<'a, T, V> {
/// Carried value.
pub value: &'a mut V,
/// Interpolation mode to use for that key.
pub interpolation: &'a mut Interpolation<T, V>,
}
// Normalize a time ([0;1]) given two control points.
#[inline(always)]
pub(crate) fn normalize_time<T, V>(
t: T,
cp: &Key<T, V>,
cp1: &Key<T, V>
) -> T where T: Additive + Div<T, Output = T> + PartialEq {
assert!(cp1.t != cp.t, "overlapping keys");
(t - cp.t) / (cp1.t - cp.t)
}
// Find the lower control point corresponding to a given time.
fn search_lower_cp<T, V>(cps: &[Key<T, V>], t: T) -> Option<usize> where T: PartialOrd {
let mut i = 0;
let len = cps.len();
if len < 2 {
return None;
}
loop {
let cp = &cps[i];
let cp1 = &cps[i+1];
if t >= cp1.t {
if i >= len - 2 {
return None;
}
i += 1;
} else if t < cp.t {
if i == 0 {
return None;
}
i -= 1;
} else {
break; // found
}
}
Some(i)
}
| ((last.value, &last, None))
} else {
| conditional_block |
data_import.py | #!/usr/local/bin/python3
import functools
import glob
import os
import sys
from datetime import datetime
from collections import defaultdict
import csv
import json
import dexag_client
import oneinch_client
import paraswap_client
import totle_client
import exchange_utils
from v2_compare_prices import canonicalize_and_sort_splits
CSV_DATA_DIR = f"{os.path.dirname(os.path.abspath(__file__))}/outputs"
# don't lru_cache() a generator, the second time it will not produce any data
def csv_row_gen(file, only_splits=False, only_non_splits=False, only_totle_splits=False, only_totle_non_splits=False):
# print(f"csv_row_gen doing {file}, only_splits={only_splits}, only_non_splits={only_non_splits}) ...")
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=None)
for row in reader:
splits = canonicalize_and_sort_splits(row.get('splits'))
totle_splits = canonicalize_and_sort_splits(row.get('totle_splits'))
if only_splits and len(splits) < 2: continue
if only_totle_splits and len(totle_splits) < 2: continue
if only_non_splits and len(splits) > 1: continue
if only_totle_non_splits and len(totle_splits) > 1: continue
id, time, action = row['id'], row['time'], row['action'] # datetime.fromisoformat(row['time']).isoformat(' ', 'seconds')
trade_size, token = float(row['trade_size']), row['token']
exchange, exchange_price = row['exchange'], float(row['exchange_price'])
totle_used, totle_price, pct_savings = row['totle_used'], float(row['totle_price']), float(row['pct_savings']),
# Some older CSVs have the non-splittable dexs in the ex_prices column
ex_prices = exchange_utils.canonical_and_splittable(eval(row.get('ex_prices') or '{}'))
if pct_savings < -1.0:
print(f"{pct_savings} vs {exchange} buying {token} for {trade_size} ETH using {totle_used} {totle_splits} id={id}")
yield time, action, trade_size, token, exchange, exchange_price, totle_used, totle_price, pct_savings, splits, ex_prices
@functools.lru_cache()
def parse_csv_files(csv_files, **kwargs):
"""Returns 2 dicts containing pct savings and prices/split data both having the form
token: { trade_size: {exchange: [sample, sample, ...], ...}
kwargs have these defaults: only_splits=False, only_non_splits=False
"""
per_token_savings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
slip_price_diff_splits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for file in csv_files:
per_file_base_prices = {}
for _, _, trade_size, token, exchange, exchange_price, _, totle_price, pct_savings, splits, _ in csv_row_gen(file, **kwargs):
if not per_file_base_prices.get(token): # this assumes prices recorded from lowest to highest for a token
per_file_base_prices[token] = totle_price # should be same for all aggs, but is slightly different sometimes
slip = (totle_price / per_file_base_prices[token]) - 1.0 # should be 0 for the lowest trade_size
# i.e. slip = (totle_price - per_file_base_prices[token]) / per_file_base_prices[token]
slip = 0.0 if slip < 0.0 and slip > -0.00001 else slip # get rid of -0.0000
price_diff = (totle_price - exchange_price) / exchange_price
slip_price_diff_splits[token][trade_size][exchange].append((slip, price_diff, splits))
per_token_savings[token][trade_size][exchange].append(pct_savings)
return per_token_savings, slip_price_diff_splits
@functools.lru_cache()
def read_slippage_csvs(csv_files=None):
"""Returns a dict of price_slip_cost data points, i.e. {token: {trade_size: {exchange: [ (psc), (psc) ] }}}"""
csv_files = csv_files or glob.glob(f'{CSV_DATA_DIR}/*buy_slippage.csv')
tok_ts_ex_pscs = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for file in csv_files:
print(f"reading {file} ...")
f_exchange, f_token, *_ = os.path.basename(file).split('_')
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=None)
# time,action,trade_size,token,exchange,exchange_price,slippage,cost
for row in reader:
# time = datetime.fromisoformat(row['time']).isoformat(' ', 'seconds')
trade_size = float(row['trade_size'])
tok_ts_ex_pscs[f_token][trade_size][f_exchange].append( (float(row['exchange_price']), float(row['slippage']), float(row['cost'])) )
return tok_ts_ex_pscs # TODO: don't return defaultdicts, users should get key errors
# generator
def pct_savings_gen(per_pair_savings):
"""Generates a sequence of (pair, trade_size, agg/exchange, [pct_savings]) for all leaves in the given dict"""
for pair, ts_ex_savings in sorted(per_pair_savings.items()):
for trade_size, ex_savings in ts_ex_savings.items():
for exchange, pct_savings in ex_savings.items():
yield pair, trade_size, exchange, pct_savings
########################################################################################################################
# JSON file aggregation functions
DEX_AG = dexag_client.name()
ONE_INCH = oneinch_client.name()
PARASWAP = paraswap_client.name()
TOTLE_EX = totle_client.name()
AGG_NAMES = [DEX_AG, ONE_INCH, PARASWAP]
JSON_DATA_DIR = f"{os.path.dirname(os.path.abspath(__file__))}/order_splitting_data"
@functools.lru_cache()
def get_all_splits_by_agg(files=None):
"""Returns an aggregated dict of split data, i.e. token: {trade_size: {agg: [{dex: pct, dex: pct}, {...}, ...]}}"""
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_splits_by_agg.json')
tok_ts_splits_by_agg = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_splits_by_agg in json.load(open(f)).items():
for ts, agg_splits in ts_splits_by_agg.items():
for agg, split in agg_splits.items():
tok_ts_splits_by_agg[token][ts][agg].append(split)
return dict(sorted(tok_ts_splits_by_agg.items()))
@functools.lru_cache()
def get_all_dexs_with_pair(files=None):
"""Returns an aggregated dict of DEXs used in splits, i.e. token: {trade_size: [dex, dex, ...]}"""
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_dexs_with_pair.json')
tok_ts_dexs_with_pair = defaultdict(lambda: defaultdict(list))
for f in files:
for token, ts_dexs_with_pair in json.load(open(f)).items():
for ts, dexs in ts_dexs_with_pair.items():
tok_ts_dexs_with_pair[token][ts] = list(set(tok_ts_dexs_with_pair[token][ts] + dexs))
return dict(sorted(tok_ts_dexs_with_pair.items()))
@functools.lru_cache()
def get_all_agg_prices(files=None):
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_agg_prices.json')
tok_ts_agg_prices = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_agg_prices in json.load(open(f)).items():
for ts, agg_prices in ts_agg_prices.items():
for agg, price in agg_prices.items():
tok_ts_agg_prices[token][ts][agg].append(price)
return dict(sorted(tok_ts_agg_prices.items()))
@functools.lru_cache()
def get_all_dex_prices(files=None):
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_dex_prices.json')
tok_ts_dex_prices = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
|
return dict(sorted(tok_ts_dex_prices.items()))
# generator
def token_ts_agg_split_gen(tok_ts_splits_by_agg):
"""Generates a sequence of (token, trade_size, agg, split) for all leaves in the given dict"""
for token, ts_splits_by_agg in tok_ts_splits_by_agg.items():
for trade_size, agg_splits in ts_splits_by_agg.items():
for agg, splits in agg_splits.items():
for split in splits:
yield token, trade_size, agg, split
def sorted_unique_trade_sizes(tok_ts_splits_by_agg):
all_trade_sizes = set(trade_size for token, trade_size, agg, split in token_ts_agg_split_gen(tok_ts_splits_by_agg))
return list(map(str, sorted(map(float, all_trade_sizes))))
def tokens_split_pct(tok_ts_splits_by_agg, only_token=None, only_agg=None):
"""Returns a dict of token: {trade_size: split_pct}"""
result = defaultdict(dict)
n_samples, n_splits = defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int))
for token, trade_size, agg, split in token_ts_agg_split_gen(tok_ts_splits_by_agg):
if only_token and token != only_token: continue
if only_agg and agg != only_agg: continue
n_samples[token][trade_size] += 1
if len(split) > 1: n_splits[token][trade_size] += 1
# if len(split) > 1: print(f"{token} {trade_size}: {split}")
result[token][trade_size] = (100.0 * n_splits[token][trade_size]) / n_samples[token][trade_size]
return result
| for token, ts_agg_prices in json.load(open(f)).items():
for ts, agg_prices in ts_agg_prices.items():
# test for agg_name keys because Totle's JSON structure is different from aggs
if any(map(lambda k: k in AGG_NAMES, agg_prices.keys())):
# agg dex_prices files look like this:
# "0.1": {
# "DEX.AG": {
# "Uniswap": 0.003936408446252657,
# "Bancor": 0.003993840558066265
# },
# "Paraswap": { ... }
for agg, prices in agg_prices.items():
tok_ts_dex_prices[token][ts][agg].append(prices)
else:
# Totle's dex_prices file looks like this:
# "0.1": {
# "Ether Delta": 0.00735650292064385,
# "Bancor": 0.003993865645004445,
# "Uniswap": 0.003936433172436365
# },
# "0.5": { ... }
# insert Totle as the agg_name in the aggregated data structure
tok_ts_dex_prices[token][ts][TOTLE_EX].append(agg_prices) | conditional_block |
data_import.py | #!/usr/local/bin/python3
import functools
import glob
import os
import sys
from datetime import datetime
from collections import defaultdict
import csv
import json
import dexag_client
import oneinch_client
import paraswap_client
import totle_client
import exchange_utils
from v2_compare_prices import canonicalize_and_sort_splits
CSV_DATA_DIR = f"{os.path.dirname(os.path.abspath(__file__))}/outputs"
# don't lru_cache() a generator, the second time it will not produce any data
def csv_row_gen(file, only_splits=False, only_non_splits=False, only_totle_splits=False, only_totle_non_splits=False):
# print(f"csv_row_gen doing {file}, only_splits={only_splits}, only_non_splits={only_non_splits}) ...")
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=None)
for row in reader:
splits = canonicalize_and_sort_splits(row.get('splits'))
totle_splits = canonicalize_and_sort_splits(row.get('totle_splits'))
if only_splits and len(splits) < 2: continue
if only_totle_splits and len(totle_splits) < 2: continue
if only_non_splits and len(splits) > 1: continue
if only_totle_non_splits and len(totle_splits) > 1: continue
id, time, action = row['id'], row['time'], row['action'] # datetime.fromisoformat(row['time']).isoformat(' ', 'seconds')
trade_size, token = float(row['trade_size']), row['token']
exchange, exchange_price = row['exchange'], float(row['exchange_price'])
totle_used, totle_price, pct_savings = row['totle_used'], float(row['totle_price']), float(row['pct_savings']),
# Some older CSVs have the non-splittable dexs in the ex_prices column
ex_prices = exchange_utils.canonical_and_splittable(eval(row.get('ex_prices') or '{}'))
if pct_savings < -1.0:
print(f"{pct_savings} vs {exchange} buying {token} for {trade_size} ETH using {totle_used} {totle_splits} id={id}")
yield time, action, trade_size, token, exchange, exchange_price, totle_used, totle_price, pct_savings, splits, ex_prices
@functools.lru_cache()
def parse_csv_files(csv_files, **kwargs):
"""Returns 2 dicts containing pct savings and prices/split data both having the form
token: { trade_size: {exchange: [sample, sample, ...], ...}
kwargs have these defaults: only_splits=False, only_non_splits=False
"""
per_token_savings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
slip_price_diff_splits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for file in csv_files:
per_file_base_prices = {}
for _, _, trade_size, token, exchange, exchange_price, _, totle_price, pct_savings, splits, _ in csv_row_gen(file, **kwargs):
if not per_file_base_prices.get(token): # this assumes prices recorded from lowest to highest for a token
per_file_base_prices[token] = totle_price # should be same for all aggs, but is slightly different sometimes
slip = (totle_price / per_file_base_prices[token]) - 1.0 # should be 0 for the lowest trade_size
# i.e. slip = (totle_price - per_file_base_prices[token]) / per_file_base_prices[token]
slip = 0.0 if slip < 0.0 and slip > -0.00001 else slip # get rid of -0.0000
price_diff = (totle_price - exchange_price) / exchange_price
slip_price_diff_splits[token][trade_size][exchange].append((slip, price_diff, splits))
per_token_savings[token][trade_size][exchange].append(pct_savings)
return per_token_savings, slip_price_diff_splits
@functools.lru_cache()
def read_slippage_csvs(csv_files=None):
"""Returns a dict of price_slip_cost data points, i.e. {token: {trade_size: {exchange: [ (psc), (psc) ] }}}"""
csv_files = csv_files or glob.glob(f'{CSV_DATA_DIR}/*buy_slippage.csv')
tok_ts_ex_pscs = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for file in csv_files:
print(f"reading {file} ...")
f_exchange, f_token, *_ = os.path.basename(file).split('_')
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=None)
# time,action,trade_size,token,exchange,exchange_price,slippage,cost
for row in reader:
# time = datetime.fromisoformat(row['time']).isoformat(' ', 'seconds')
trade_size = float(row['trade_size'])
tok_ts_ex_pscs[f_token][trade_size][f_exchange].append( (float(row['exchange_price']), float(row['slippage']), float(row['cost'])) )
return tok_ts_ex_pscs # TODO: don't return defaultdicts, users should get key errors
# generator
def pct_savings_gen(per_pair_savings):
"""Generates a sequence of (pair, trade_size, agg/exchange, [pct_savings]) for all leaves in the given dict"""
for pair, ts_ex_savings in sorted(per_pair_savings.items()):
for trade_size, ex_savings in ts_ex_savings.items():
for exchange, pct_savings in ex_savings.items():
yield pair, trade_size, exchange, pct_savings
########################################################################################################################
# JSON file aggregation functions
DEX_AG = dexag_client.name()
ONE_INCH = oneinch_client.name()
PARASWAP = paraswap_client.name()
TOTLE_EX = totle_client.name()
AGG_NAMES = [DEX_AG, ONE_INCH, PARASWAP]
JSON_DATA_DIR = f"{os.path.dirname(os.path.abspath(__file__))}/order_splitting_data"
@functools.lru_cache()
def get_all_splits_by_agg(files=None):
"""Returns an aggregated dict of split data, i.e. token: {trade_size: {agg: [{dex: pct, dex: pct}, {...}, ...]}}"""
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_splits_by_agg.json')
tok_ts_splits_by_agg = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_splits_by_agg in json.load(open(f)).items():
for ts, agg_splits in ts_splits_by_agg.items():
for agg, split in agg_splits.items():
tok_ts_splits_by_agg[token][ts][agg].append(split)
return dict(sorted(tok_ts_splits_by_agg.items()))
@functools.lru_cache()
def get_all_dexs_with_pair(files=None):
|
@functools.lru_cache()
def get_all_agg_prices(files=None):
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_agg_prices.json')
tok_ts_agg_prices = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_agg_prices in json.load(open(f)).items():
for ts, agg_prices in ts_agg_prices.items():
for agg, price in agg_prices.items():
tok_ts_agg_prices[token][ts][agg].append(price)
return dict(sorted(tok_ts_agg_prices.items()))
@functools.lru_cache()
def get_all_dex_prices(files=None):
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_dex_prices.json')
tok_ts_dex_prices = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_agg_prices in json.load(open(f)).items():
for ts, agg_prices in ts_agg_prices.items():
# test for agg_name keys because Totle's JSON structure is different from aggs
if any(map(lambda k: k in AGG_NAMES, agg_prices.keys())):
# agg dex_prices files look like this:
# "0.1": {
# "DEX.AG": {
# "Uniswap": 0.003936408446252657,
# "Bancor": 0.003993840558066265
# },
# "Paraswap": { ... }
for agg, prices in agg_prices.items():
tok_ts_dex_prices[token][ts][agg].append(prices)
else:
# Totle's dex_prices file looks like this:
# "0.1": {
# "Ether Delta": 0.00735650292064385,
# "Bancor": 0.003993865645004445,
# "Uniswap": 0.003936433172436365
# },
# "0.5": { ... }
# insert Totle as the agg_name in the aggregated data structure
tok_ts_dex_prices[token][ts][TOTLE_EX].append(agg_prices)
return dict(sorted(tok_ts_dex_prices.items()))
# generator
def token_ts_agg_split_gen(tok_ts_splits_by_agg):
"""Generates a sequence of (token, trade_size, agg, split) for all leaves in the given dict"""
for token, ts_splits_by_agg in tok_ts_splits_by_agg.items():
for trade_size, agg_splits in ts_splits_by_agg.items():
for agg, splits in agg_splits.items():
for split in splits:
yield token, trade_size, agg, split
def sorted_unique_trade_sizes(tok_ts_splits_by_agg):
all_trade_sizes = set(trade_size for token, trade_size, agg, split in token_ts_agg_split_gen(tok_ts_splits_by_agg))
return list(map(str, sorted(map(float, all_trade_sizes))))
def tokens_split_pct(tok_ts_splits_by_agg, only_token=None, only_agg=None):
"""Returns a dict of token: {trade_size: split_pct}"""
result = defaultdict(dict)
n_samples, n_splits = defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int))
for token, trade_size, agg, split in token_ts_agg_split_gen(tok_ts_splits_by_agg):
if only_token and token != only_token: continue
if only_agg and agg != only_agg: continue
n_samples[token][trade_size] += 1
if len(split) > 1: n_splits[token][trade_size] += 1
# if len(split) > 1: print(f"{token} {trade_size}: {split}")
result[token][trade_size] = (100.0 * n_splits[token][trade_size]) / n_samples[token][trade_size]
return result
| """Returns an aggregated dict of DEXs used in splits, i.e. token: {trade_size: [dex, dex, ...]}"""
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_dexs_with_pair.json')
tok_ts_dexs_with_pair = defaultdict(lambda: defaultdict(list))
for f in files:
for token, ts_dexs_with_pair in json.load(open(f)).items():
for ts, dexs in ts_dexs_with_pair.items():
tok_ts_dexs_with_pair[token][ts] = list(set(tok_ts_dexs_with_pair[token][ts] + dexs))
return dict(sorted(tok_ts_dexs_with_pair.items())) | identifier_body |
data_import.py | #!/usr/local/bin/python3
import functools
import glob
import os
import sys
from datetime import datetime
from collections import defaultdict
import csv
import json
import dexag_client
import oneinch_client
import paraswap_client
import totle_client
import exchange_utils
from v2_compare_prices import canonicalize_and_sort_splits
CSV_DATA_DIR = f"{os.path.dirname(os.path.abspath(__file__))}/outputs"
# don't lru_cache() a generator, the second time it will not produce any data
def csv_row_gen(file, only_splits=False, only_non_splits=False, only_totle_splits=False, only_totle_non_splits=False):
# print(f"csv_row_gen doing {file}, only_splits={only_splits}, only_non_splits={only_non_splits}) ...")
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=None)
for row in reader:
splits = canonicalize_and_sort_splits(row.get('splits'))
totle_splits = canonicalize_and_sort_splits(row.get('totle_splits'))
if only_splits and len(splits) < 2: continue
if only_totle_splits and len(totle_splits) < 2: continue
if only_non_splits and len(splits) > 1: continue
if only_totle_non_splits and len(totle_splits) > 1: continue
id, time, action = row['id'], row['time'], row['action'] # datetime.fromisoformat(row['time']).isoformat(' ', 'seconds')
trade_size, token = float(row['trade_size']), row['token']
exchange, exchange_price = row['exchange'], float(row['exchange_price'])
totle_used, totle_price, pct_savings = row['totle_used'], float(row['totle_price']), float(row['pct_savings']),
# Some older CSVs have the non-splittable dexs in the ex_prices column
ex_prices = exchange_utils.canonical_and_splittable(eval(row.get('ex_prices') or '{}'))
if pct_savings < -1.0:
print(f"{pct_savings} vs {exchange} buying {token} for {trade_size} ETH using {totle_used} {totle_splits} id={id}")
yield time, action, trade_size, token, exchange, exchange_price, totle_used, totle_price, pct_savings, splits, ex_prices
@functools.lru_cache()
def parse_csv_files(csv_files, **kwargs):
"""Returns 2 dicts containing pct savings and prices/split data both having the form
token: { trade_size: {exchange: [sample, sample, ...], ...}
kwargs have these defaults: only_splits=False, only_non_splits=False
"""
per_token_savings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
slip_price_diff_splits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for file in csv_files:
per_file_base_prices = {}
for _, _, trade_size, token, exchange, exchange_price, _, totle_price, pct_savings, splits, _ in csv_row_gen(file, **kwargs):
if not per_file_base_prices.get(token): # this assumes prices recorded from lowest to highest for a token
per_file_base_prices[token] = totle_price # should be same for all aggs, but is slightly different sometimes
slip = (totle_price / per_file_base_prices[token]) - 1.0 # should be 0 for the lowest trade_size
# i.e. slip = (totle_price - per_file_base_prices[token]) / per_file_base_prices[token]
slip = 0.0 if slip < 0.0 and slip > -0.00001 else slip # get rid of -0.0000
price_diff = (totle_price - exchange_price) / exchange_price
slip_price_diff_splits[token][trade_size][exchange].append((slip, price_diff, splits))
per_token_savings[token][trade_size][exchange].append(pct_savings)
return per_token_savings, slip_price_diff_splits
@functools.lru_cache()
def read_slippage_csvs(csv_files=None):
"""Returns a dict of price_slip_cost data points, i.e. {token: {trade_size: {exchange: [ (psc), (psc) ] }}}"""
csv_files = csv_files or glob.glob(f'{CSV_DATA_DIR}/*buy_slippage.csv')
| for file in csv_files:
print(f"reading {file} ...")
f_exchange, f_token, *_ = os.path.basename(file).split('_')
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=None)
# time,action,trade_size,token,exchange,exchange_price,slippage,cost
for row in reader:
# time = datetime.fromisoformat(row['time']).isoformat(' ', 'seconds')
trade_size = float(row['trade_size'])
tok_ts_ex_pscs[f_token][trade_size][f_exchange].append( (float(row['exchange_price']), float(row['slippage']), float(row['cost'])) )
return tok_ts_ex_pscs # TODO: don't return defaultdicts, users should get key errors
# generator
def pct_savings_gen(per_pair_savings):
"""Generates a sequence of (pair, trade_size, agg/exchange, [pct_savings]) for all leaves in the given dict"""
for pair, ts_ex_savings in sorted(per_pair_savings.items()):
for trade_size, ex_savings in ts_ex_savings.items():
for exchange, pct_savings in ex_savings.items():
yield pair, trade_size, exchange, pct_savings
########################################################################################################################
# JSON file aggregation functions
DEX_AG = dexag_client.name()
ONE_INCH = oneinch_client.name()
PARASWAP = paraswap_client.name()
TOTLE_EX = totle_client.name()
AGG_NAMES = [DEX_AG, ONE_INCH, PARASWAP]
JSON_DATA_DIR = f"{os.path.dirname(os.path.abspath(__file__))}/order_splitting_data"
@functools.lru_cache()
def get_all_splits_by_agg(files=None):
"""Returns an aggregated dict of split data, i.e. token: {trade_size: {agg: [{dex: pct, dex: pct}, {...}, ...]}}"""
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_splits_by_agg.json')
tok_ts_splits_by_agg = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_splits_by_agg in json.load(open(f)).items():
for ts, agg_splits in ts_splits_by_agg.items():
for agg, split in agg_splits.items():
tok_ts_splits_by_agg[token][ts][agg].append(split)
return dict(sorted(tok_ts_splits_by_agg.items()))
@functools.lru_cache()
def get_all_dexs_with_pair(files=None):
"""Returns an aggregated dict of DEXs used in splits, i.e. token: {trade_size: [dex, dex, ...]}"""
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_dexs_with_pair.json')
tok_ts_dexs_with_pair = defaultdict(lambda: defaultdict(list))
for f in files:
for token, ts_dexs_with_pair in json.load(open(f)).items():
for ts, dexs in ts_dexs_with_pair.items():
tok_ts_dexs_with_pair[token][ts] = list(set(tok_ts_dexs_with_pair[token][ts] + dexs))
return dict(sorted(tok_ts_dexs_with_pair.items()))
@functools.lru_cache()
def get_all_agg_prices(files=None):
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_agg_prices.json')
tok_ts_agg_prices = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_agg_prices in json.load(open(f)).items():
for ts, agg_prices in ts_agg_prices.items():
for agg, price in agg_prices.items():
tok_ts_agg_prices[token][ts][agg].append(price)
return dict(sorted(tok_ts_agg_prices.items()))
@functools.lru_cache()
def get_all_dex_prices(files=None):
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_dex_prices.json')
tok_ts_dex_prices = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_agg_prices in json.load(open(f)).items():
for ts, agg_prices in ts_agg_prices.items():
# test for agg_name keys because Totle's JSON structure is different from aggs
if any(map(lambda k: k in AGG_NAMES, agg_prices.keys())):
# agg dex_prices files look like this:
# "0.1": {
# "DEX.AG": {
# "Uniswap": 0.003936408446252657,
# "Bancor": 0.003993840558066265
# },
# "Paraswap": { ... }
for agg, prices in agg_prices.items():
tok_ts_dex_prices[token][ts][agg].append(prices)
else:
# Totle's dex_prices file looks like this:
# "0.1": {
# "Ether Delta": 0.00735650292064385,
# "Bancor": 0.003993865645004445,
# "Uniswap": 0.003936433172436365
# },
# "0.5": { ... }
# insert Totle as the agg_name in the aggregated data structure
tok_ts_dex_prices[token][ts][TOTLE_EX].append(agg_prices)
return dict(sorted(tok_ts_dex_prices.items()))
# generator
def token_ts_agg_split_gen(tok_ts_splits_by_agg):
"""Generates a sequence of (token, trade_size, agg, split) for all leaves in the given dict"""
for token, ts_splits_by_agg in tok_ts_splits_by_agg.items():
for trade_size, agg_splits in ts_splits_by_agg.items():
for agg, splits in agg_splits.items():
for split in splits:
yield token, trade_size, agg, split
def sorted_unique_trade_sizes(tok_ts_splits_by_agg):
all_trade_sizes = set(trade_size for token, trade_size, agg, split in token_ts_agg_split_gen(tok_ts_splits_by_agg))
return list(map(str, sorted(map(float, all_trade_sizes))))
def tokens_split_pct(tok_ts_splits_by_agg, only_token=None, only_agg=None):
"""Returns a dict of token: {trade_size: split_pct}"""
result = defaultdict(dict)
n_samples, n_splits = defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int))
for token, trade_size, agg, split in token_ts_agg_split_gen(tok_ts_splits_by_agg):
if only_token and token != only_token: continue
if only_agg and agg != only_agg: continue
n_samples[token][trade_size] += 1
if len(split) > 1: n_splits[token][trade_size] += 1
# if len(split) > 1: print(f"{token} {trade_size}: {split}")
result[token][trade_size] = (100.0 * n_splits[token][trade_size]) / n_samples[token][trade_size]
return result | tok_ts_ex_pscs = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
| random_line_split |
data_import.py | #!/usr/local/bin/python3
import functools
import glob
import os
import sys
from datetime import datetime
from collections import defaultdict
import csv
import json
import dexag_client
import oneinch_client
import paraswap_client
import totle_client
import exchange_utils
from v2_compare_prices import canonicalize_and_sort_splits
CSV_DATA_DIR = f"{os.path.dirname(os.path.abspath(__file__))}/outputs"
# don't lru_cache() a generator, the second time it will not produce any data
def csv_row_gen(file, only_splits=False, only_non_splits=False, only_totle_splits=False, only_totle_non_splits=False):
# print(f"csv_row_gen doing {file}, only_splits={only_splits}, only_non_splits={only_non_splits}) ...")
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=None)
for row in reader:
splits = canonicalize_and_sort_splits(row.get('splits'))
totle_splits = canonicalize_and_sort_splits(row.get('totle_splits'))
if only_splits and len(splits) < 2: continue
if only_totle_splits and len(totle_splits) < 2: continue
if only_non_splits and len(splits) > 1: continue
if only_totle_non_splits and len(totle_splits) > 1: continue
id, time, action = row['id'], row['time'], row['action'] # datetime.fromisoformat(row['time']).isoformat(' ', 'seconds')
trade_size, token = float(row['trade_size']), row['token']
exchange, exchange_price = row['exchange'], float(row['exchange_price'])
totle_used, totle_price, pct_savings = row['totle_used'], float(row['totle_price']), float(row['pct_savings']),
# Some older CSVs have the non-splittable dexs in the ex_prices column
ex_prices = exchange_utils.canonical_and_splittable(eval(row.get('ex_prices') or '{}'))
if pct_savings < -1.0:
print(f"{pct_savings} vs {exchange} buying {token} for {trade_size} ETH using {totle_used} {totle_splits} id={id}")
yield time, action, trade_size, token, exchange, exchange_price, totle_used, totle_price, pct_savings, splits, ex_prices
@functools.lru_cache()
def parse_csv_files(csv_files, **kwargs):
"""Returns 2 dicts containing pct savings and prices/split data both having the form
token: { trade_size: {exchange: [sample, sample, ...], ...}
kwargs have these defaults: only_splits=False, only_non_splits=False
"""
per_token_savings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
slip_price_diff_splits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for file in csv_files:
per_file_base_prices = {}
for _, _, trade_size, token, exchange, exchange_price, _, totle_price, pct_savings, splits, _ in csv_row_gen(file, **kwargs):
if not per_file_base_prices.get(token): # this assumes prices recorded from lowest to highest for a token
per_file_base_prices[token] = totle_price # should be same for all aggs, but is slightly different sometimes
slip = (totle_price / per_file_base_prices[token]) - 1.0 # should be 0 for the lowest trade_size
# i.e. slip = (totle_price - per_file_base_prices[token]) / per_file_base_prices[token]
slip = 0.0 if slip < 0.0 and slip > -0.00001 else slip # get rid of -0.0000
price_diff = (totle_price - exchange_price) / exchange_price
slip_price_diff_splits[token][trade_size][exchange].append((slip, price_diff, splits))
per_token_savings[token][trade_size][exchange].append(pct_savings)
return per_token_savings, slip_price_diff_splits
@functools.lru_cache()
def read_slippage_csvs(csv_files=None):
"""Returns a dict of price_slip_cost data points, i.e. {token: {trade_size: {exchange: [ (psc), (psc) ] }}}"""
csv_files = csv_files or glob.glob(f'{CSV_DATA_DIR}/*buy_slippage.csv')
tok_ts_ex_pscs = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for file in csv_files:
print(f"reading {file} ...")
f_exchange, f_token, *_ = os.path.basename(file).split('_')
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=None)
# time,action,trade_size,token,exchange,exchange_price,slippage,cost
for row in reader:
# time = datetime.fromisoformat(row['time']).isoformat(' ', 'seconds')
trade_size = float(row['trade_size'])
tok_ts_ex_pscs[f_token][trade_size][f_exchange].append( (float(row['exchange_price']), float(row['slippage']), float(row['cost'])) )
return tok_ts_ex_pscs # TODO: don't return defaultdicts, users should get key errors
# generator
def pct_savings_gen(per_pair_savings):
"""Generates a sequence of (pair, trade_size, agg/exchange, [pct_savings]) for all leaves in the given dict"""
for pair, ts_ex_savings in sorted(per_pair_savings.items()):
for trade_size, ex_savings in ts_ex_savings.items():
for exchange, pct_savings in ex_savings.items():
yield pair, trade_size, exchange, pct_savings
########################################################################################################################
# JSON file aggregation functions
DEX_AG = dexag_client.name()
ONE_INCH = oneinch_client.name()
PARASWAP = paraswap_client.name()
TOTLE_EX = totle_client.name()
AGG_NAMES = [DEX_AG, ONE_INCH, PARASWAP]
JSON_DATA_DIR = f"{os.path.dirname(os.path.abspath(__file__))}/order_splitting_data"
@functools.lru_cache()
def | (files=None):
"""Returns an aggregated dict of split data, i.e. token: {trade_size: {agg: [{dex: pct, dex: pct}, {...}, ...]}}"""
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_splits_by_agg.json')
tok_ts_splits_by_agg = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_splits_by_agg in json.load(open(f)).items():
for ts, agg_splits in ts_splits_by_agg.items():
for agg, split in agg_splits.items():
tok_ts_splits_by_agg[token][ts][agg].append(split)
return dict(sorted(tok_ts_splits_by_agg.items()))
@functools.lru_cache()
def get_all_dexs_with_pair(files=None):
"""Returns an aggregated dict of DEXs used in splits, i.e. token: {trade_size: [dex, dex, ...]}"""
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_dexs_with_pair.json')
tok_ts_dexs_with_pair = defaultdict(lambda: defaultdict(list))
for f in files:
for token, ts_dexs_with_pair in json.load(open(f)).items():
for ts, dexs in ts_dexs_with_pair.items():
tok_ts_dexs_with_pair[token][ts] = list(set(tok_ts_dexs_with_pair[token][ts] + dexs))
return dict(sorted(tok_ts_dexs_with_pair.items()))
@functools.lru_cache()
def get_all_agg_prices(files=None):
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_agg_prices.json')
tok_ts_agg_prices = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_agg_prices in json.load(open(f)).items():
for ts, agg_prices in ts_agg_prices.items():
for agg, price in agg_prices.items():
tok_ts_agg_prices[token][ts][agg].append(price)
return dict(sorted(tok_ts_agg_prices.items()))
@functools.lru_cache()
def get_all_dex_prices(files=None):
files = files or glob.glob(f'{JSON_DATA_DIR}/2019*ts_dex_prices.json')
tok_ts_dex_prices = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for f in files:
for token, ts_agg_prices in json.load(open(f)).items():
for ts, agg_prices in ts_agg_prices.items():
# test for agg_name keys because Totle's JSON structure is different from aggs
if any(map(lambda k: k in AGG_NAMES, agg_prices.keys())):
# agg dex_prices files look like this:
# "0.1": {
# "DEX.AG": {
# "Uniswap": 0.003936408446252657,
# "Bancor": 0.003993840558066265
# },
# "Paraswap": { ... }
for agg, prices in agg_prices.items():
tok_ts_dex_prices[token][ts][agg].append(prices)
else:
# Totle's dex_prices file looks like this:
# "0.1": {
# "Ether Delta": 0.00735650292064385,
# "Bancor": 0.003993865645004445,
# "Uniswap": 0.003936433172436365
# },
# "0.5": { ... }
# insert Totle as the agg_name in the aggregated data structure
tok_ts_dex_prices[token][ts][TOTLE_EX].append(agg_prices)
return dict(sorted(tok_ts_dex_prices.items()))
# generator
def token_ts_agg_split_gen(tok_ts_splits_by_agg):
"""Generates a sequence of (token, trade_size, agg, split) for all leaves in the given dict"""
for token, ts_splits_by_agg in tok_ts_splits_by_agg.items():
for trade_size, agg_splits in ts_splits_by_agg.items():
for agg, splits in agg_splits.items():
for split in splits:
yield token, trade_size, agg, split
def sorted_unique_trade_sizes(tok_ts_splits_by_agg):
all_trade_sizes = set(trade_size for token, trade_size, agg, split in token_ts_agg_split_gen(tok_ts_splits_by_agg))
return list(map(str, sorted(map(float, all_trade_sizes))))
def tokens_split_pct(tok_ts_splits_by_agg, only_token=None, only_agg=None):
"""Returns a dict of token: {trade_size: split_pct}"""
result = defaultdict(dict)
n_samples, n_splits = defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int))
for token, trade_size, agg, split in token_ts_agg_split_gen(tok_ts_splits_by_agg):
if only_token and token != only_token: continue
if only_agg and agg != only_agg: continue
n_samples[token][trade_size] += 1
if len(split) > 1: n_splits[token][trade_size] += 1
# if len(split) > 1: print(f"{token} {trade_size}: {split}")
result[token][trade_size] = (100.0 * n_splits[token][trade_size]) / n_samples[token][trade_size]
return result
| get_all_splits_by_agg | identifier_name |
ourairports.rs | use serde::de::{self, Unexpected};
use serde::{Deserialize, Deserializer, Serialize};
/// Contains a record of a single airport.
#[derive(Deserialize, Serialize)]
pub struct Airport {
/// Internal OurAirports integer identifier for the airport.
/// This will stay persistent, even if the airport code changes.
id: String,
/// The text identifier used in the OurAirports URL.
/// This will be the ICAO code if available. Otherwise, it will be a local airport code (if no conflict), or if nothing else is available, an internally-generated code starting with the ISO2 country code, followed by a dash and a four-digit number.
ident: String,
/// The type of the airport.
/// Allowed values are "closed_airport", "heliport", "large_airport", "medium_airport", "seaplane_base", and "small_airport".
#[serde(rename = "type")]
airport_type: String,
/// The official airport name, including "Airport", "Airstrip", etc.
name: String,
/// The airport latitude in decimal degrees (positive for north).
latitude_deg: f64,
/// The airport longitude in decimal degrees (positive for east).
longitude_deg: f64,
/// The airport elevation MSL in feet (*not* metres). None if unavailable.
elevation_ft: Option<i32>,
/// The code for the continent where the airport is (primarily) located.
/// Allowed values are "AF" (Africa), "AN" (Antarctica), "AS" (Asia), "EU" (Europe), "NA" (North America), "OC" (Oceania), or "SA" (South America).
continent: String,
/// The two-character ISO 3166:1-alpha2 code for the country where the airport is (primarily) located.
/// A handful of unofficial, non-ISO codes are also in use, such as "XK" for Kosovo.
iso_country: String,
/// An alphanumeric code for the high-level administrative subdivision of a country where the airport is primarily located (e.g. province, governorate), prefixed by the ISO2 country code and a hyphen.
/// OurAirports uses ISO 3166:2 codes whenever possible, preferring higher administrative levels, but also includes some custom codes.
iso_region: String,
/// The primary municipality that the airport serves (when available).
/// Note that this is *not* necessarily the municipality where the airport is physically located.
municipality: String,
/// true if the airport currently has scheduled airline service; false otherwise.
#[serde(deserialize_with = "bool_from_str")]
scheduled_service: bool,
/// The code that an aviation GPS database (such as Jeppesen's or Garmin's) would normally use for the airport. This will always be the ICAO code if one exists.
/// Note that, unlike the `ident` column, this is *not* guaranteed to be globally unique.
gps_code: String,
/// The three-letter IATA code for the airport (if it has one).
iata_code: String,
/// The local country code for the airport, if different from the `gps_code` and `iata_code` fields (used mainly for US airports).
local_code: String,
/// URL of the airport's official home page on the web, if one exists.
home_link: String,
/// URL of the airport's page on Wikipedia, if one exists.
wikipedia_link: String,
/// Extra keywords/phrases to assist with search, as a Vec.
/// May include former names for the airport, alternate codes, names in other languages, nearby tourist destinations, etc.
#[serde(deserialize_with = "vec_string_from_string")]
keywords: Vec<String>,
}
/// Contains information about a single airport radio frequency
/// for voice communication (radio navigation aids appear in struct Navaids)
#[derive(Deserialize, Serialize)]
pub struct AirportFrequency {
/// Internal OurAirports integer identifier for the frequency.
/// This will stay persistent, even if the radio frequency or description changes.
id: String,
/// Internal integer foreign key matching the `id` column for the associated airport in Airports struct.
/// (`airport_ident` is a better alternative.)
airport_ref: String,
/// Externally-visible string foreign key matching the `ident` column for the associated airport in Airports.
airport_ident: String,
/// A code for the frequency type.
/// This isn't (currently) a controlled vocabulary, but probably will be soon.
/// Some common values are "TWR" (tower), "ATF" or "CTAF" (common traffic frequency), "GND" (ground control), "RMP" (ramp control), "ATIS" (automated weather), "RCO" (remote radio outlet), "ARR" (arrivals), "DEP" (departures), "UNICOM" (monitored ground station), and "RDO" (a flight-service station).
#[serde(rename = "type")]
frequency_type: String,
/// A description of the frequency, typically the way a pilot would open a call on it.
description: String,
/// Radio voice frequency in megahertz.
/// Note that the same frequency may appear multiple times for an airport, serving different functions.
frequency_mhz: String,
}
/// Contains information about a single landing surface
#[derive(Deserialize, Serialize)]
pub struct Runway {
/// Internal OurAirports integer identifier for the runway.
/// This will stay persistent, even if the runway numbering changes.
id: String,
/// Internal integer foreign key matching the id column for the associated airport in airports.csv. (`airport_ident` is a better alternative.)
airport_ref: String,
/// Externally-visible string foreign key matching the ident column for the associated airport in airports.csv.
airport_ident: String,
/// Length of the full runway surface (including displaced thresholds, overrun areas, etc) in feet.
length_ft: Option<u32>,
/// Width of the runway surface in feet.
width_ft: Option<u32>,
/// Code for the runway surface type.
/// This is not yet a controlled vocabulary, but probably will be soon.
/// Some common values include "ASP" (asphalt), "TURF" (turf), "CON" (concrete), "GRS" (grass), "GRE" (gravel), "WATER" (water), and "UNK" (unknown).
surface: String,
/// `true` if the surface is lighted at night. `false` otherwise.
#[serde(deserialize_with = "bool_from_str")]
lighted: bool,
/// `true` if the runway surface is currently closed, `false` otherwise.
#[serde(deserialize_with = "bool_from_str")]
closed: bool,
/// Identifier for the low-numbered end of the runway.
le_ident: String,
/// Latitude of the centre of the low-numbered end of the runway, in decimal degrees (positive is north), if available.
le_latitude_deg: Option<f64>,
/// Longitude of the centre of the low-numbered end of the runway, in decimal degrees (positive is east), if available.
le_longitude_deg: Option<f64>,
/// Elevation above MSL of the low-numbered end of the runway in feet.
le_elevation_ft: Option<i32>,
/// Heading of the low-numbered end of the runway in degrees true (*not* magnetic).
#[serde(rename = "le_heading_degT")]
le_heading_deg_true: Option<f64>,
/// Length of the displaced threshold (if any) for the low-numbered end of the runway, in feet.
le_displaced_threshold_ft: Option<i32>,
/// Identifier for the high-numbered end of the runway.
he_ident: String,
/// Latitude of the centre of the high-numbered end of the runway, in decimal degrees (positive is north), if available.
he_latitude_deg: Option<f64>,
/// Longitude of the centre of the high-numbered end of the runway, in decimal degrees (positive is east), if available.
he_longitude_deg: Option<f64>,
/// Elevation above MSL of the high-numbered end of the runway in feet.
he_elevation_ft: Option<i32>,
#[serde(rename = "he_heading_degT")]
/// Heading of the high-numbered end of the runway in degrees true (*not* magnetic).
he_heading_deg_true: Option<f64>,
/// Length of the displaced threshold (if any) for the high-numbered end of the runway, in feet.
he_displaced_threshold_ft: Option<i32>,
}
/// Represents a single radio navigation
#[derive(Deserialize, Serialize)]
pub struct Navaid {
/// Internal OurAirports integer identifier for the navaid.
/// This will stay persistent, even if the navaid identifier or frequency changes.
id: String,
/// This is a unique string identifier constructed from the navaid name and country, and used in the OurAirports URL.
filename: String,
/// The 1-3 character identifer that the navaid transmits.
ident: String,
/// The name of the navaid, excluding its type.
name: String,
/// The type of the navaid. Options are "DME", "NDB", "NDB-DME", "TACAN", "VOR", "VOR-DME", or "VORTAC".
/// See the [map legend](https://ourairports.com/help/data-dictionary.html#navaids) for more information about each type.
#[serde(rename = "type")]
navaid_type: String,
/// The frequency of the navaid in *kilohertz*.
/// If the Navaid operates on the VHF band (VOR, VOR-DME) or operates on the UHF band with a paired VHF frequency (DME, TACAN, VORTAC), then you need to divide this number by 1,000 to get the frequency in megahertz (115.3 MHz in this example).
/// For an NDB or NDB-DME, you can use this frequency directly.
frequency_khz: String,
/// The latitude of the navaid in decimal degrees (negative for south).
latitude_deg: Option<f64>,
/// The longitude of the navaid in decimal degrees (negative for west).
longitude_deg: Option<f64>,
/// The navaid's elevation MSL in feet (not metres).
elevation_ft: Option<i32>,
/// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country that operates the navaid.
/// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/).
iso_country: String,
/// The paired VHF frequency for the DME (or TACAN) in kilohertz.
/// Divide by 1,000 to get the paired VHF frequency in megahertz (e.g. 115.3 MHz).
dme_frequency_khz: String,
/// The DME channel (an alternative way of tuning distance-measuring equipment)
dme_channel: String,
/// The latitude of the associated DME in decimal degrees (negative for south). If missing, assume that the value is the same as `latitude_deg`.
dme_latitude_deg: Option<f64>,
/// The longitude of the associated DME in decimal degrees (negative for west). If missing, assume that the value is the same as `longitude_deg`.
dme_longitude_deg: Option<f64>,
/// The associated DME transmitters elevation MSL in feet. If missing, assume that it's the same value as `elevation_ft`.
dme_elevation_ft: Option<i32>,
/// The magnetic variation adjustment built into a VOR's, VOR-DME's, or TACAN's radials. Positive means east (added to the true direction), and negative means west (subtracted from the true direction).
/// This will not usually be the same as `magnetic_variation_deg` because the magnetic pole is constantly in motion.
slaved_variation_deg: Option<f64>,
/// The actual magnetic variation at the navaid's location. Positive means east (added to the true direction), and negative means west (subtracted from the true direction),
magnetic_variation_deg: Option<f64>,
/// The primary function of the navaid in the airspace system.
/// Options include "HI" (high-altitude airways, at or above flight level 180), "LO" (low-altitude airways), "BOTH" (high- and low-altitude airways), "TERM" (terminal-area navigation only), and "RNAV" (non-GPS area navigation).
#[serde(rename = "usageType")]
usage_type: String,
/// The power-output level of the navaid.
/// Options include "HIGH", "MEDIUM", "LOW", and "UNKNOWN".
power: String,
/// The OurAirports text identifier (usually the ICAO code) for an airport associated with the navaid.
/// Links to the `ident` column in airports.csv.
associated_airport: String,
}
/// Represents a country or country-like entity (e.g. Hong Kong)
#[derive(Deserialize, Serialize)]
pub struct Country {
/// Internal OurAirports integer identifier for the country.
/// This will stay persistent, even if the country name or code changes.
id: String,
/// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country that operates the navaid.
/// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/).
/// The `iso_country` field in airports.csv points into this field.
code: String,
/// The common **English**-language name for the country.
/// Other variations of the name may appear in the `keywords` field to assist with search.
name: String,
/// The code for the continent where the country is (primarily) located.
/// See the `continent` code in airports.csv for allowed values.
continent: String,
/// Link to the Wikipedia article about the country.
wikipedia_link: String,
/// An array of search keywords/phrases related to the country.
#[serde(deserialize_with = "vec_string_from_string")]
keywords: Vec<String>,
}
/// Represents a high-level administrative subdivision of a country
#[derive(Deserialize, Serialize)]
pub struct Region {
/// Internal OurAirports integer identifier for the region. This will stay persistent, even if the region code changes.
id: String,
/// `local_code` prefixed with the country code to make a globally-unique identifier.
code: String,
/// The local code for the administrative subdivision.
/// Whenever possible, these are official [ISO 3166:2](https://en.wikipedia.org/wiki/ISO_3166-2), at the highest level available, but in some cases OurAirports has to use unofficial codes.
/// There is also a pseudo code "U-A" for each country, which means that the airport has not yet been assigned to a region (or perhaps can't be, as in the case of a deep-sea oil platform).
local_code: String,
/// The common **English**-language name for the administrative subdivision.
/// In some cases, the name in local languages will appear in the `keywords` field assist search.
name: String,
/// A code for the continent to which the region belongs.
/// See the `continent` field in airports.csv for a list of codes.
continent: String,
/// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country containing the administrative subdivision.
/// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/).
iso_country: String,
/// A link to the Wikipedia article describing the subdivision.
wikipedia_link: String,
/// An array of keywords to assist with search. May include former names for the region, and/or the region name in other languages.
#[serde(deserialize_with = "vec_string_from_string")]
keywords: Vec<String>,
}
/// Converts a string to a boolean based on "yes" and "no"
fn bool_from_str<'de, D>(deserializer: D) -> Result<bool, D::Error>
where
D: Deserializer<'de>,
{
match String::deserialize(deserializer)?.to_lowercase().as_str() {
"yes" | "1"=> Ok(true),
"no" | "0" => Ok(false),
other => Err(de::Error::invalid_value(
Unexpected::Str(other),
&"Value must be yes or no",
)),
}
}
/// Transforms a comma-separated string to a vector.
fn | <'de, D>(deserializer: D) -> Result<Vec<String>, D::Error>
where
D: Deserializer<'de>,
{
let keywords = String::deserialize(deserializer)?;
match keywords.len() {
0 => Ok(vec![]),
_ => Ok(keywords.split(',').map(|s| s.trim().to_string()).collect()),
}
}
| vec_string_from_string | identifier_name |
ourairports.rs | use serde::de::{self, Unexpected};
use serde::{Deserialize, Deserializer, Serialize};
/// Contains a record of a single airport.
#[derive(Deserialize, Serialize)]
pub struct Airport {
/// Internal OurAirports integer identifier for the airport.
/// This will stay persistent, even if the airport code changes.
id: String,
/// The text identifier used in the OurAirports URL.
/// This will be the ICAO code if available. Otherwise, it will be a local airport code (if no conflict), or if nothing else is available, an internally-generated code starting with the ISO2 country code, followed by a dash and a four-digit number.
ident: String,
/// The type of the airport.
/// Allowed values are "closed_airport", "heliport", "large_airport", "medium_airport", "seaplane_base", and "small_airport".
#[serde(rename = "type")]
airport_type: String,
/// The official airport name, including "Airport", "Airstrip", etc.
name: String,
/// The airport latitude in decimal degrees (positive for north).
latitude_deg: f64,
/// The airport longitude in decimal degrees (positive for east).
longitude_deg: f64,
/// The airport elevation MSL in feet (*not* metres). None if unavailable.
elevation_ft: Option<i32>,
/// The code for the continent where the airport is (primarily) located.
/// Allowed values are "AF" (Africa), "AN" (Antarctica), "AS" (Asia), "EU" (Europe), "NA" (North America), "OC" (Oceania), or "SA" (South America).
continent: String,
/// The two-character ISO 3166:1-alpha2 code for the country where the airport is (primarily) located.
/// A handful of unofficial, non-ISO codes are also in use, such as "XK" for Kosovo.
iso_country: String,
/// An alphanumeric code for the high-level administrative subdivision of a country where the airport is primarily located (e.g. province, governorate), prefixed by the ISO2 country code and a hyphen.
/// OurAirports uses ISO 3166:2 codes whenever possible, preferring higher administrative levels, but also includes some custom codes.
iso_region: String,
/// The primary municipality that the airport serves (when available).
/// Note that this is *not* necessarily the municipality where the airport is physically located.
municipality: String,
/// true if the airport currently has scheduled airline service; false otherwise.
#[serde(deserialize_with = "bool_from_str")]
scheduled_service: bool,
/// The code that an aviation GPS database (such as Jeppesen's or Garmin's) would normally use for the airport. This will always be the ICAO code if one exists.
/// Note that, unlike the `ident` column, this is *not* guaranteed to be globally unique.
gps_code: String,
/// The three-letter IATA code for the airport (if it has one).
iata_code: String,
/// The local country code for the airport, if different from the `gps_code` and `iata_code` fields (used mainly for US airports).
local_code: String,
/// URL of the airport's official home page on the web, if one exists.
home_link: String,
/// URL of the airport's page on Wikipedia, if one exists.
wikipedia_link: String,
/// Extra keywords/phrases to assist with search, as a Vec.
/// May include former names for the airport, alternate codes, names in other languages, nearby tourist destinations, etc.
#[serde(deserialize_with = "vec_string_from_string")] | /// for voice communication (radio navigation aids appear in struct Navaids)
#[derive(Deserialize, Serialize)]
pub struct AirportFrequency {
/// Internal OurAirports integer identifier for the frequency.
/// This will stay persistent, even if the radio frequency or description changes.
id: String,
/// Internal integer foreign key matching the `id` column for the associated airport in Airports struct.
/// (`airport_ident` is a better alternative.)
airport_ref: String,
/// Externally-visible string foreign key matching the `ident` column for the associated airport in Airports.
airport_ident: String,
/// A code for the frequency type.
/// This isn't (currently) a controlled vocabulary, but probably will be soon.
/// Some common values are "TWR" (tower), "ATF" or "CTAF" (common traffic frequency), "GND" (ground control), "RMP" (ramp control), "ATIS" (automated weather), "RCO" (remote radio outlet), "ARR" (arrivals), "DEP" (departures), "UNICOM" (monitored ground station), and "RDO" (a flight-service station).
#[serde(rename = "type")]
frequency_type: String,
/// A description of the frequency, typically the way a pilot would open a call on it.
description: String,
/// Radio voice frequency in megahertz.
/// Note that the same frequency may appear multiple times for an airport, serving different functions.
frequency_mhz: String,
}
/// Contains information about a single landing surface
#[derive(Deserialize, Serialize)]
pub struct Runway {
/// Internal OurAirports integer identifier for the runway.
/// This will stay persistent, even if the runway numbering changes.
id: String,
/// Internal integer foreign key matching the id column for the associated airport in airports.csv. (`airport_ident` is a better alternative.)
airport_ref: String,
/// Externally-visible string foreign key matching the ident column for the associated airport in airports.csv.
airport_ident: String,
/// Length of the full runway surface (including displaced thresholds, overrun areas, etc) in feet.
length_ft: Option<u32>,
/// Width of the runway surface in feet.
width_ft: Option<u32>,
/// Code for the runway surface type.
/// This is not yet a controlled vocabulary, but probably will be soon.
/// Some common values include "ASP" (asphalt), "TURF" (turf), "CON" (concrete), "GRS" (grass), "GRE" (gravel), "WATER" (water), and "UNK" (unknown).
surface: String,
/// `true` if the surface is lighted at night. `false` otherwise.
#[serde(deserialize_with = "bool_from_str")]
lighted: bool,
/// `true` if the runway surface is currently closed, `false` otherwise.
#[serde(deserialize_with = "bool_from_str")]
closed: bool,
/// Identifier for the low-numbered end of the runway.
le_ident: String,
/// Latitude of the centre of the low-numbered end of the runway, in decimal degrees (positive is north), if available.
le_latitude_deg: Option<f64>,
/// Longitude of the centre of the low-numbered end of the runway, in decimal degrees (positive is east), if available.
le_longitude_deg: Option<f64>,
/// Elevation above MSL of the low-numbered end of the runway in feet.
le_elevation_ft: Option<i32>,
/// Heading of the low-numbered end of the runway in degrees true (*not* magnetic).
#[serde(rename = "le_heading_degT")]
le_heading_deg_true: Option<f64>,
/// Length of the displaced threshold (if any) for the low-numbered end of the runway, in feet.
le_displaced_threshold_ft: Option<i32>,
/// Identifier for the high-numbered end of the runway.
he_ident: String,
/// Latitude of the centre of the high-numbered end of the runway, in decimal degrees (positive is north), if available.
he_latitude_deg: Option<f64>,
/// Longitude of the centre of the high-numbered end of the runway, in decimal degrees (positive is east), if available.
he_longitude_deg: Option<f64>,
/// Elevation above MSL of the high-numbered end of the runway in feet.
he_elevation_ft: Option<i32>,
#[serde(rename = "he_heading_degT")]
/// Heading of the high-numbered end of the runway in degrees true (*not* magnetic).
he_heading_deg_true: Option<f64>,
/// Length of the displaced threshold (if any) for the high-numbered end of the runway, in feet.
he_displaced_threshold_ft: Option<i32>,
}
/// Represents a single radio navigation
#[derive(Deserialize, Serialize)]
pub struct Navaid {
/// Internal OurAirports integer identifier for the navaid.
/// This will stay persistent, even if the navaid identifier or frequency changes.
id: String,
/// This is a unique string identifier constructed from the navaid name and country, and used in the OurAirports URL.
filename: String,
/// The 1-3 character identifer that the navaid transmits.
ident: String,
/// The name of the navaid, excluding its type.
name: String,
/// The type of the navaid. Options are "DME", "NDB", "NDB-DME", "TACAN", "VOR", "VOR-DME", or "VORTAC".
/// See the [map legend](https://ourairports.com/help/data-dictionary.html#navaids) for more information about each type.
#[serde(rename = "type")]
navaid_type: String,
/// The frequency of the navaid in *kilohertz*.
/// If the Navaid operates on the VHF band (VOR, VOR-DME) or operates on the UHF band with a paired VHF frequency (DME, TACAN, VORTAC), then you need to divide this number by 1,000 to get the frequency in megahertz (115.3 MHz in this example).
/// For an NDB or NDB-DME, you can use this frequency directly.
frequency_khz: String,
/// The latitude of the navaid in decimal degrees (negative for south).
latitude_deg: Option<f64>,
/// The longitude of the navaid in decimal degrees (negative for west).
longitude_deg: Option<f64>,
/// The navaid's elevation MSL in feet (not metres).
elevation_ft: Option<i32>,
/// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country that operates the navaid.
/// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/).
iso_country: String,
/// The paired VHF frequency for the DME (or TACAN) in kilohertz.
/// Divide by 1,000 to get the paired VHF frequency in megahertz (e.g. 115.3 MHz).
dme_frequency_khz: String,
/// The DME channel (an alternative way of tuning distance-measuring equipment)
dme_channel: String,
/// The latitude of the associated DME in decimal degrees (negative for south). If missing, assume that the value is the same as `latitude_deg`.
dme_latitude_deg: Option<f64>,
/// The longitude of the associated DME in decimal degrees (negative for west). If missing, assume that the value is the same as `longitude_deg`.
dme_longitude_deg: Option<f64>,
/// The associated DME transmitters elevation MSL in feet. If missing, assume that it's the same value as `elevation_ft`.
dme_elevation_ft: Option<i32>,
/// The magnetic variation adjustment built into a VOR's, VOR-DME's, or TACAN's radials. Positive means east (added to the true direction), and negative means west (subtracted from the true direction).
/// This will not usually be the same as `magnetic_variation_deg` because the magnetic pole is constantly in motion.
slaved_variation_deg: Option<f64>,
/// The actual magnetic variation at the navaid's location. Positive means east (added to the true direction), and negative means west (subtracted from the true direction),
magnetic_variation_deg: Option<f64>,
/// The primary function of the navaid in the airspace system.
/// Options include "HI" (high-altitude airways, at or above flight level 180), "LO" (low-altitude airways), "BOTH" (high- and low-altitude airways), "TERM" (terminal-area navigation only), and "RNAV" (non-GPS area navigation).
#[serde(rename = "usageType")]
usage_type: String,
/// The power-output level of the navaid.
/// Options include "HIGH", "MEDIUM", "LOW", and "UNKNOWN".
power: String,
/// The OurAirports text identifier (usually the ICAO code) for an airport associated with the navaid.
/// Links to the `ident` column in airports.csv.
associated_airport: String,
}
/// Represents a country or country-like entity (e.g. Hong Kong)
#[derive(Deserialize, Serialize)]
pub struct Country {
/// Internal OurAirports integer identifier for the country.
/// This will stay persistent, even if the country name or code changes.
id: String,
/// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country that operates the navaid.
/// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/).
/// The `iso_country` field in airports.csv points into this field.
code: String,
/// The common **English**-language name for the country.
/// Other variations of the name may appear in the `keywords` field to assist with search.
name: String,
/// The code for the continent where the country is (primarily) located.
/// See the `continent` code in airports.csv for allowed values.
continent: String,
/// Link to the Wikipedia article about the country.
wikipedia_link: String,
/// An array of search keywords/phrases related to the country.
#[serde(deserialize_with = "vec_string_from_string")]
keywords: Vec<String>,
}
/// Represents a high-level administrative subdivision of a country
#[derive(Deserialize, Serialize)]
pub struct Region {
/// Internal OurAirports integer identifier for the region. This will stay persistent, even if the region code changes.
id: String,
/// `local_code` prefixed with the country code to make a globally-unique identifier.
code: String,
/// The local code for the administrative subdivision.
/// Whenever possible, these are official [ISO 3166:2](https://en.wikipedia.org/wiki/ISO_3166-2), at the highest level available, but in some cases OurAirports has to use unofficial codes.
/// There is also a pseudo code "U-A" for each country, which means that the airport has not yet been assigned to a region (or perhaps can't be, as in the case of a deep-sea oil platform).
local_code: String,
/// The common **English**-language name for the administrative subdivision.
/// In some cases, the name in local languages will appear in the `keywords` field assist search.
name: String,
/// A code for the continent to which the region belongs.
/// See the `continent` field in airports.csv for a list of codes.
continent: String,
/// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country containing the administrative subdivision.
/// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/).
iso_country: String,
/// A link to the Wikipedia article describing the subdivision.
wikipedia_link: String,
/// An array of keywords to assist with search. May include former names for the region, and/or the region name in other languages.
#[serde(deserialize_with = "vec_string_from_string")]
keywords: Vec<String>,
}
/// Converts a string to a boolean based on "yes" and "no"
fn bool_from_str<'de, D>(deserializer: D) -> Result<bool, D::Error>
where
D: Deserializer<'de>,
{
match String::deserialize(deserializer)?.to_lowercase().as_str() {
"yes" | "1"=> Ok(true),
"no" | "0" => Ok(false),
other => Err(de::Error::invalid_value(
Unexpected::Str(other),
&"Value must be yes or no",
)),
}
}
/// Transforms a comma-separated string to a vector.
fn vec_string_from_string<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error>
where
D: Deserializer<'de>,
{
let keywords = String::deserialize(deserializer)?;
match keywords.len() {
0 => Ok(vec![]),
_ => Ok(keywords.split(',').map(|s| s.trim().to_string()).collect()),
}
} | keywords: Vec<String>,
}
/// Contains information about a single airport radio frequency | random_line_split |
num_format.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (vars) charf cninetyninehexfloatf decf floatf intf scif strf Cninety
//! handles creating printed output for numeric substitutions
// spell-checker:ignore (vars) charf decf floatf intf scif strf Cninety
use std::env;
use std::vec::Vec;
use crate::display::Quotable;
use crate::{show_error, show_warning};
use super::format_field::{FieldType, FormatField};
use super::formatter::{Base, FormatPrimitive, Formatter, InitialPrefix};
use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf;
use super::formatters::decf::Decf;
use super::formatters::floatf::Floatf;
use super::formatters::intf::Intf;
use super::formatters::scif::Scif;
pub fn warn_expected_numeric(pf_arg: &str) {
// important: keep println here not print
show_error!("{}: expected a numeric value", pf_arg.maybe_quote());
}
// when character constant arguments have excess characters
// issue a warning when POSIXLY_CORRECT is not set
fn warn_char_constant_ign(remaining_bytes: &[u8]) {
match env::var("POSIXLY_CORRECT") {
Ok(_) => {}
Err(e) => {
if let env::VarError::NotPresent = e {
show_warning!(
"{:?}: character(s) following character \
constant have been ignored",
remaining_bytes
);
}
}
}
}
// this function looks at the first few
// characters of an argument and returns a value if we can learn
// a value from that (e.g. no argument? return 0, char constant? ret value)
fn get_provided(str_in_opt: Option<&String>) -> Option<u8> {
const C_S_QUOTE: u8 = 39;
const C_D_QUOTE: u8 = 34;
match str_in_opt {
Some(str_in) => {
let mut byte_it = str_in.bytes();
if let Some(ch) = byte_it.next() {
match ch {
C_S_QUOTE | C_D_QUOTE => {
Some(match byte_it.next() {
Some(second_byte) => {
let mut ignored: Vec<u8> = Vec::new();
for cont in byte_it {
ignored.push(cont);
}
if !ignored.is_empty() {
warn_char_constant_ign(&ignored);
}
second_byte
}
// no byte after quote
None => {
let so_far = (ch as char).to_string();
warn_expected_numeric(&so_far);
0_u8
}
})
}
// first byte is not quote
_ => None, // no first byte
}
} else {
Some(0_u8)
}
}
None => Some(0),
}
}
// takes a string and returns
// a sign,
// a base,
// and an offset for index after all
// initial spacing, sign, base prefix, and leading zeroes
#[allow(clippy::cognitive_complexity)]
fn get_initial_prefix(str_in: &str, field_type: &FieldType) -> InitialPrefix {
let mut str_it = str_in.chars();
let mut ret = InitialPrefix {
radix_in: Base::Ten,
sign: 1,
offset: 0,
};
let mut top_char = str_it.next();
// skip spaces and ensure top_char is the first non-space char
// (or None if none exists)
while let Some(' ') = top_char {
ret.offset += 1;
top_char = str_it.next();
}
// parse sign
match top_char {
Some('+') => {
ret.offset += 1;
top_char = str_it.next();
}
Some('-') => {
ret.sign = -1;
ret.offset += 1;
top_char = str_it.next();
}
_ => {}
}
// we want to exit with offset being
// the index of the first non-zero
// digit before the decimal point or
// if there is none, the zero before the
// decimal point, or, if there is none,
// the decimal point.
// while we are determining the offset
// we will ensure as a convention
// the offset is always on the first character
// that we are yet unsure if it is the
// final offset. If the zero could be before
// a decimal point we don't move past the zero.
let mut is_hex = false;
if Some('0') == top_char {
if let Some(base) = str_it.next() {
// lead zeroes can only exist in
// octal and hex base
let mut do_clean_lead_zeroes = false;
match base {
'x' | 'X' => {
is_hex = true;
ret.offset += 2;
ret.radix_in = Base::Hex;
do_clean_lead_zeroes = true;
}
e @ '0'..='9' => {
ret.offset += 1;
if let FieldType::Intf = *field_type {
ret.radix_in = Base::Octal;
}
if e == '0' {
do_clean_lead_zeroes = true;
}
}
_ => {}
}
if do_clean_lead_zeroes {
let mut first = true;
for ch_zero in str_it {
// see notes on offset above:
// this is why the offset for octal and decimal numbers
// that reach this branch is 1 even though
// they have already eaten the characters '00'
// this is also why when hex encounters its
// first zero it does not move its offset
// forward because it does not know for sure
// that it's current offset (of that zero)
// is not the final offset,
// whereas at that point octal knows its
// current offset is not the final offset.
match ch_zero {
'0' => {
if !(is_hex && first) {
ret.offset += 1;
}
}
// if decimal, keep last zero if one exists
// (it's possible for last zero to
// not exist at this branch if we're in hex input)
'.' => break,
// other digit, etc.
_ => {
if !(is_hex && first) {
ret.offset += 1;
}
break;
}
}
if first {
first = false;
}
}
}
}
}
ret
}
// this is the function a Sub's print will delegate to
// if it is a numeric field, passing the field details
// and an iterator to the argument
pub fn | (field: &FormatField, in_str_opt: Option<&String>) -> Option<String> {
let field_char = field.field_char;
// num format mainly operates by further delegating to one of
// several Formatter structs depending on the field
// see formatter.rs for more details
// to do switch to static dispatch
let formatter: Box<dyn Formatter> = match *field.field_type {
FieldType::Intf => Box::new(Intf::new()),
FieldType::Floatf => Box::new(Floatf::new()),
FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()),
FieldType::Scif => Box::new(Scif::new()),
FieldType::Decf => Box::new(Decf::new()),
_ => {
panic!("asked to do num format with non-num field type");
}
};
let prim_opt=
// if we can get an assumed value from looking at the first
// few characters, use that value to create the FormatPrimitive
if let Some(provided_num) = get_provided(in_str_opt) {
let mut tmp = FormatPrimitive::default();
match field_char {
'u' | 'i' | 'd' => {
tmp.pre_decimal = Some(
format!("{provided_num}"));
},
'x' | 'X' => {
tmp.pre_decimal = Some(
format!("{provided_num:x}"));
},
'o' => {
tmp.pre_decimal = Some(
format!("{provided_num:o}"));
},
'e' | 'E' | 'g' | 'G' => {
let as_str = format!("{provided_num}");
let initial_prefix = get_initial_prefix(
&as_str,
field.field_type
);
tmp=formatter.get_primitive(field, &initial_prefix, &as_str)
.expect("err during default provided num");
},
_ => {
tmp.pre_decimal = Some(
format!("{provided_num}"));
tmp.post_decimal = Some(String::from("0"));
}
}
Some(tmp)
} else {
// otherwise we'll interpret the argument as a number
// using the appropriate Formatter
let in_str = in_str_opt.expect(
"please send the devs this message:
\n get_provided is failing to ret as Some(0) on no str ");
// first get information about the beginning of the
// numeric argument that would be useful for
// any formatter (int or float)
let initial_prefix = get_initial_prefix(
in_str,
field.field_type
);
// then get the FormatPrimitive from the Formatter
formatter.get_primitive(field, &initial_prefix, in_str)
};
// if we have a formatPrimitive, print its results
// according to the field-char appropriate Formatter
prim_opt.map(|prim| formatter.primitive_to_str(&prim, field.clone()))
}
| num_format | identifier_name |
num_format.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (vars) charf cninetyninehexfloatf decf floatf intf scif strf Cninety
//! handles creating printed output for numeric substitutions
// spell-checker:ignore (vars) charf decf floatf intf scif strf Cninety
use std::env;
use std::vec::Vec;
use crate::display::Quotable;
use crate::{show_error, show_warning};
use super::format_field::{FieldType, FormatField};
use super::formatter::{Base, FormatPrimitive, Formatter, InitialPrefix};
use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf;
use super::formatters::decf::Decf;
use super::formatters::floatf::Floatf;
use super::formatters::intf::Intf;
use super::formatters::scif::Scif;
pub fn warn_expected_numeric(pf_arg: &str) |
// when character constant arguments have excess characters
// issue a warning when POSIXLY_CORRECT is not set
fn warn_char_constant_ign(remaining_bytes: &[u8]) {
match env::var("POSIXLY_CORRECT") {
Ok(_) => {}
Err(e) => {
if let env::VarError::NotPresent = e {
show_warning!(
"{:?}: character(s) following character \
constant have been ignored",
remaining_bytes
);
}
}
}
}
// this function looks at the first few
// characters of an argument and returns a value if we can learn
// a value from that (e.g. no argument? return 0, char constant? ret value)
fn get_provided(str_in_opt: Option<&String>) -> Option<u8> {
const C_S_QUOTE: u8 = 39;
const C_D_QUOTE: u8 = 34;
match str_in_opt {
Some(str_in) => {
let mut byte_it = str_in.bytes();
if let Some(ch) = byte_it.next() {
match ch {
C_S_QUOTE | C_D_QUOTE => {
Some(match byte_it.next() {
Some(second_byte) => {
let mut ignored: Vec<u8> = Vec::new();
for cont in byte_it {
ignored.push(cont);
}
if !ignored.is_empty() {
warn_char_constant_ign(&ignored);
}
second_byte
}
// no byte after quote
None => {
let so_far = (ch as char).to_string();
warn_expected_numeric(&so_far);
0_u8
}
})
}
// first byte is not quote
_ => None, // no first byte
}
} else {
Some(0_u8)
}
}
None => Some(0),
}
}
// takes a string and returns
// a sign,
// a base,
// and an offset for index after all
// initial spacing, sign, base prefix, and leading zeroes
#[allow(clippy::cognitive_complexity)]
fn get_initial_prefix(str_in: &str, field_type: &FieldType) -> InitialPrefix {
let mut str_it = str_in.chars();
let mut ret = InitialPrefix {
radix_in: Base::Ten,
sign: 1,
offset: 0,
};
let mut top_char = str_it.next();
// skip spaces and ensure top_char is the first non-space char
// (or None if none exists)
while let Some(' ') = top_char {
ret.offset += 1;
top_char = str_it.next();
}
// parse sign
match top_char {
Some('+') => {
ret.offset += 1;
top_char = str_it.next();
}
Some('-') => {
ret.sign = -1;
ret.offset += 1;
top_char = str_it.next();
}
_ => {}
}
// we want to exit with offset being
// the index of the first non-zero
// digit before the decimal point or
// if there is none, the zero before the
// decimal point, or, if there is none,
// the decimal point.
// while we are determining the offset
// we will ensure as a convention
// the offset is always on the first character
// that we are yet unsure if it is the
// final offset. If the zero could be before
// a decimal point we don't move past the zero.
let mut is_hex = false;
if Some('0') == top_char {
if let Some(base) = str_it.next() {
// lead zeroes can only exist in
// octal and hex base
let mut do_clean_lead_zeroes = false;
match base {
'x' | 'X' => {
is_hex = true;
ret.offset += 2;
ret.radix_in = Base::Hex;
do_clean_lead_zeroes = true;
}
e @ '0'..='9' => {
ret.offset += 1;
if let FieldType::Intf = *field_type {
ret.radix_in = Base::Octal;
}
if e == '0' {
do_clean_lead_zeroes = true;
}
}
_ => {}
}
if do_clean_lead_zeroes {
let mut first = true;
for ch_zero in str_it {
// see notes on offset above:
// this is why the offset for octal and decimal numbers
// that reach this branch is 1 even though
// they have already eaten the characters '00'
// this is also why when hex encounters its
// first zero it does not move its offset
// forward because it does not know for sure
// that it's current offset (of that zero)
// is not the final offset,
// whereas at that point octal knows its
// current offset is not the final offset.
match ch_zero {
'0' => {
if !(is_hex && first) {
ret.offset += 1;
}
}
// if decimal, keep last zero if one exists
// (it's possible for last zero to
// not exist at this branch if we're in hex input)
'.' => break,
// other digit, etc.
_ => {
if !(is_hex && first) {
ret.offset += 1;
}
break;
}
}
if first {
first = false;
}
}
}
}
}
ret
}
// this is the function a Sub's print will delegate to
// if it is a numeric field, passing the field details
// and an iterator to the argument
pub fn num_format(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> {
let field_char = field.field_char;
// num format mainly operates by further delegating to one of
// several Formatter structs depending on the field
// see formatter.rs for more details
// to do switch to static dispatch
let formatter: Box<dyn Formatter> = match *field.field_type {
FieldType::Intf => Box::new(Intf::new()),
FieldType::Floatf => Box::new(Floatf::new()),
FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()),
FieldType::Scif => Box::new(Scif::new()),
FieldType::Decf => Box::new(Decf::new()),
_ => {
panic!("asked to do num format with non-num field type");
}
};
let prim_opt=
// if we can get an assumed value from looking at the first
// few characters, use that value to create the FormatPrimitive
if let Some(provided_num) = get_provided(in_str_opt) {
let mut tmp = FormatPrimitive::default();
match field_char {
'u' | 'i' | 'd' => {
tmp.pre_decimal = Some(
format!("{provided_num}"));
},
'x' | 'X' => {
tmp.pre_decimal = Some(
format!("{provided_num:x}"));
},
'o' => {
tmp.pre_decimal = Some(
format!("{provided_num:o}"));
},
'e' | 'E' | 'g' | 'G' => {
let as_str = format!("{provided_num}");
let initial_prefix = get_initial_prefix(
&as_str,
field.field_type
);
tmp=formatter.get_primitive(field, &initial_prefix, &as_str)
.expect("err during default provided num");
},
_ => {
tmp.pre_decimal = Some(
format!("{provided_num}"));
tmp.post_decimal = Some(String::from("0"));
}
}
Some(tmp)
} else {
// otherwise we'll interpret the argument as a number
// using the appropriate Formatter
let in_str = in_str_opt.expect(
"please send the devs this message:
\n get_provided is failing to ret as Some(0) on no str ");
// first get information about the beginning of the
// numeric argument that would be useful for
// any formatter (int or float)
let initial_prefix = get_initial_prefix(
in_str,
field.field_type
);
// then get the FormatPrimitive from the Formatter
formatter.get_primitive(field, &initial_prefix, in_str)
};
// if we have a formatPrimitive, print its results
// according to the field-char appropriate Formatter
prim_opt.map(|prim| formatter.primitive_to_str(&prim, field.clone()))
}
| {
// important: keep println here not print
show_error!("{}: expected a numeric value", pf_arg.maybe_quote());
} | identifier_body |
num_format.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (vars) charf cninetyninehexfloatf decf floatf intf scif strf Cninety
//! handles creating printed output for numeric substitutions
// spell-checker:ignore (vars) charf decf floatf intf scif strf Cninety
use std::env;
use std::vec::Vec;
use crate::display::Quotable;
use crate::{show_error, show_warning};
use super::format_field::{FieldType, FormatField};
use super::formatter::{Base, FormatPrimitive, Formatter, InitialPrefix};
use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf;
use super::formatters::decf::Decf;
use super::formatters::floatf::Floatf;
use super::formatters::intf::Intf;
use super::formatters::scif::Scif;
pub fn warn_expected_numeric(pf_arg: &str) {
// important: keep println here not print
show_error!("{}: expected a numeric value", pf_arg.maybe_quote());
}
// when character constant arguments have excess characters
// issue a warning when POSIXLY_CORRECT is not set
fn warn_char_constant_ign(remaining_bytes: &[u8]) {
match env::var("POSIXLY_CORRECT") {
Ok(_) => {}
Err(e) => {
if let env::VarError::NotPresent = e {
show_warning!(
"{:?}: character(s) following character \
constant have been ignored",
remaining_bytes
);
}
}
}
}
// this function looks at the first few
// characters of an argument and returns a value if we can learn
// a value from that (e.g. no argument? return 0, char constant? ret value)
fn get_provided(str_in_opt: Option<&String>) -> Option<u8> {
const C_S_QUOTE: u8 = 39;
const C_D_QUOTE: u8 = 34;
match str_in_opt {
Some(str_in) => {
let mut byte_it = str_in.bytes();
if let Some(ch) = byte_it.next() {
match ch {
C_S_QUOTE | C_D_QUOTE => {
Some(match byte_it.next() {
Some(second_byte) => {
let mut ignored: Vec<u8> = Vec::new();
for cont in byte_it {
ignored.push(cont);
}
if !ignored.is_empty() {
warn_char_constant_ign(&ignored);
}
second_byte
}
// no byte after quote
None => {
let so_far = (ch as char).to_string();
warn_expected_numeric(&so_far);
0_u8
}
})
}
// first byte is not quote
_ => None, // no first byte
}
} else {
Some(0_u8)
}
}
None => Some(0),
}
}
// takes a string and returns
// a sign,
// a base,
// and an offset for index after all
// initial spacing, sign, base prefix, and leading zeroes
#[allow(clippy::cognitive_complexity)]
fn get_initial_prefix(str_in: &str, field_type: &FieldType) -> InitialPrefix {
let mut str_it = str_in.chars();
let mut ret = InitialPrefix {
radix_in: Base::Ten,
sign: 1,
offset: 0,
};
let mut top_char = str_it.next();
// skip spaces and ensure top_char is the first non-space char
// (or None if none exists)
while let Some(' ') = top_char {
ret.offset += 1;
top_char = str_it.next();
}
// parse sign
match top_char {
Some('+') => {
ret.offset += 1;
top_char = str_it.next();
}
Some('-') => {
ret.sign = -1; | ret.offset += 1;
top_char = str_it.next();
}
_ => {}
}
// we want to exit with offset being
// the index of the first non-zero
// digit before the decimal point or
// if there is none, the zero before the
// decimal point, or, if there is none,
// the decimal point.
// while we are determining the offset
// we will ensure as a convention
// the offset is always on the first character
// that we are yet unsure if it is the
// final offset. If the zero could be before
// a decimal point we don't move past the zero.
let mut is_hex = false;
if Some('0') == top_char {
if let Some(base) = str_it.next() {
// lead zeroes can only exist in
// octal and hex base
let mut do_clean_lead_zeroes = false;
match base {
'x' | 'X' => {
is_hex = true;
ret.offset += 2;
ret.radix_in = Base::Hex;
do_clean_lead_zeroes = true;
}
e @ '0'..='9' => {
ret.offset += 1;
if let FieldType::Intf = *field_type {
ret.radix_in = Base::Octal;
}
if e == '0' {
do_clean_lead_zeroes = true;
}
}
_ => {}
}
if do_clean_lead_zeroes {
let mut first = true;
for ch_zero in str_it {
// see notes on offset above:
// this is why the offset for octal and decimal numbers
// that reach this branch is 1 even though
// they have already eaten the characters '00'
// this is also why when hex encounters its
// first zero it does not move its offset
// forward because it does not know for sure
// that it's current offset (of that zero)
// is not the final offset,
// whereas at that point octal knows its
// current offset is not the final offset.
match ch_zero {
'0' => {
if !(is_hex && first) {
ret.offset += 1;
}
}
// if decimal, keep last zero if one exists
// (it's possible for last zero to
// not exist at this branch if we're in hex input)
'.' => break,
// other digit, etc.
_ => {
if !(is_hex && first) {
ret.offset += 1;
}
break;
}
}
if first {
first = false;
}
}
}
}
}
ret
}
// this is the function a Sub's print will delegate to
// if it is a numeric field, passing the field details
// and an iterator to the argument
pub fn num_format(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> {
let field_char = field.field_char;
// num format mainly operates by further delegating to one of
// several Formatter structs depending on the field
// see formatter.rs for more details
// to do switch to static dispatch
let formatter: Box<dyn Formatter> = match *field.field_type {
FieldType::Intf => Box::new(Intf::new()),
FieldType::Floatf => Box::new(Floatf::new()),
FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()),
FieldType::Scif => Box::new(Scif::new()),
FieldType::Decf => Box::new(Decf::new()),
_ => {
panic!("asked to do num format with non-num field type");
}
};
let prim_opt=
// if we can get an assumed value from looking at the first
// few characters, use that value to create the FormatPrimitive
if let Some(provided_num) = get_provided(in_str_opt) {
let mut tmp = FormatPrimitive::default();
match field_char {
'u' | 'i' | 'd' => {
tmp.pre_decimal = Some(
format!("{provided_num}"));
},
'x' | 'X' => {
tmp.pre_decimal = Some(
format!("{provided_num:x}"));
},
'o' => {
tmp.pre_decimal = Some(
format!("{provided_num:o}"));
},
'e' | 'E' | 'g' | 'G' => {
let as_str = format!("{provided_num}");
let initial_prefix = get_initial_prefix(
&as_str,
field.field_type
);
tmp=formatter.get_primitive(field, &initial_prefix, &as_str)
.expect("err during default provided num");
},
_ => {
tmp.pre_decimal = Some(
format!("{provided_num}"));
tmp.post_decimal = Some(String::from("0"));
}
}
Some(tmp)
} else {
// otherwise we'll interpret the argument as a number
// using the appropriate Formatter
let in_str = in_str_opt.expect(
"please send the devs this message:
\n get_provided is failing to ret as Some(0) on no str ");
// first get information about the beginning of the
// numeric argument that would be useful for
// any formatter (int or float)
let initial_prefix = get_initial_prefix(
in_str,
field.field_type
);
// then get the FormatPrimitive from the Formatter
formatter.get_primitive(field, &initial_prefix, in_str)
};
// if we have a formatPrimitive, print its results
// according to the field-char appropriate Formatter
prim_opt.map(|prim| formatter.primitive_to_str(&prim, field.clone()))
} | random_line_split | |
qaoaLibrary.py | #----------------------------------------------------------------------------#
# Title: A python helper library for QAOA-type optimization
# Author: Aniruddha Bapat
# Date: 05-28-2018
#
# Description: Here, I will maintain a library of frequently used functions
# in QAOA-type optimizations.
#----------------------------------------------------------------------------#
from ctypes import *
import numpy as np
import scipy.sparse as sp
from scipy.optimize import basinhopping, minimize
import itertools as it
from numpy import pi, sqrt
# Load c functions in qaoa
# I think this is OS-dependent. Create the appropriate shared c object
# for your OS (.so, .dll etc.) and load it into qc
qc = cdll.LoadLibrary('./lib2local-qaoa.so')
# States and Hamiltonian structs, ctyped into python
class state(Structure):
_fields_ = [('n', c_int),
('N',c_int),
('realcur',POINTER(c_double)),
('imagcur',POINTER(c_double)),
('realbuf',POINTER(c_double)),
('imagbuf',POINTER(c_double))]
class ham(Structure):
_fields_ = [('n',c_int),
('N',c_int),
('zzc',POINTER(c_double)),
('xc',POINTER(c_double)),
('zc',POINTER(c_double))]
# Bound class for basinhopping
class MaxTbound(object):
"""random displacement with bounds"""
def __init__(self, T, stepsize=0.5):
self.maxT= T
def __call__(self, **kwargs):
"""take a random step but ensure the new position is within the bounds"""
x = kwargs["x_new"]
return (sum(x)<self.maxT)
# Set input and return types
qc.expectH.restype = c_double
qc.overlap.resype = c_double
qc.energy.restype = c_double
qc.qaoa1energy.restype = c_double
# Initialize state and hamiltonian
def initialize(n):
psi = state()
H = ham()
qc.allocate(byref(psi),n)
qc.allocateH(byref(H),n)
return (psi, H)
# Generate Ham object from coefficients zzc, xc and zc (given as np arrays)
def hamGen(H, zzc, xc, zc):
n = len(xc)
H.zzc = (c_double * n**2)(*zzc.flatten().tolist())
H.xc = (c_double * n)(*xc.tolist())
H.zc = (c_double * n)(*zc.tolist())
# Function to compute Ham's k smallest eigenvalues and eigenvectors
def ground(H, Neigs):
n = H.n
energies= map(lambda i:qc.energy(i,byref(H)), range(1<<n))
Ham = np.diag(energies)
for i in range(1<<n):
for j in range(n):
Ham[i,i^(1<<j)] = H.xc[j]
return sp.linalg.eigsh(Ham, k=Neigs, which='SA')
# Inner product magnitude of two states given as complex np arrays
def overlap(psi1, psi2):
return np.abs(np.vdot(psi1,psi2))
# Convert C state object into numpy array of computational basis amplitudes
def c2pyState(psi):
n = psi.n
return np.array(psi.realcur[:1<<n]) + 1.0j*np.array(psi.imagcur[:1<<n])
# Overlap of psi with the Neigs smallest ground states of H
def gsOverlap(psi, H, Neigs):
val, vec = ground(H, Neigs)
final = c2pyState(psi)
retvalue = 0
for k in range(Neigs):
retvalue += overlap(final, vec[:,k])**2
return np.sqrt(retvalue)
#betagamma = all betas first, then all gammas
def expectQAOAp(psi, H, betagamma):
ppsi = pointer(psi)
pH = pointer(H)
qc.uniform(ppsi)
p = len(betagamma)/2
qc.evolveQAOA(ppsi, pH, (c_double * p)(*betagamma[:p]), (c_double * p)(*betagamma[p:]), p)
return qc.expectH(psi, pH)
# return squared overlap with the ground state vectors
def overlapQAOAp(psi, H, betagamma, vec):
ppsi = pointer(psi)
pH = pointer(H)
csp.uniform(ppsi)
p = len(betagamma)/2
csp.evolveQAOA(ppsi, pH, (c_double * p)(*betagamma[:p]), (c_double * p)(*betagamma[p:]), p)
sqoverlap=0
psivec = c2pyState(psi)
for i in len(vec.T):
sqoverlap += overlap(psivec, vec[:,i])**2
return sqoverlap
# Perform an inductive local optimization of QAOA angles. The code returns
# optimal angles and energy. Psi is now the result of the optimal evolution.
def optQAOA(psi, H, pmax, typeOfOpt='BFGS'):
ppsi = pointer(psi)
pH = pointer(H)
fOpt = lambda bg: expectQAOAp(psi, H, bg.tolist())
bg0 = 0.5*np.ones(2)
bgCur= bg0
for p in range(1,pmax+1):
bgNew = np.concatenate((bgCur[:p-1], [bgCur[p-1]], bgCur[p-1:], [bgCur[-1]]))
opt = minimize(fOpt, bg0 if p==1 else bgNew, method=typeOfOpt)
bgCur = opt.x
E = expectQAOAp(psi, H, bgCur.tolist())
#if E!=opt.fun: return "Error: Energy expectation does not match optimized value"
return (bgCur, E)
def optQAOAglobal(psi, H, p, Nit=10, Temp=1.0, Tmax=20):
ppsi = pointer(psi)
pH = pointer(H)
fOpt = lambda bg: expectQAOAp(psi, H, bg.tolist())
bg0 = 2.0*np.ones(2*p)
bounds = [(0,pi/2)]*p + [(0,np.inf)]*p
minKwargs = dict(method="L-BFGS-B", bounds=bounds)
stepCond = MaxTbound(Tmax)
opt = basinhopping(fOpt, bg0, niter=Nit, T=Temp, minimizer_kwargs=minKwargs, accept_test=stepCond)
bgOpt= opt.x
E = expectQAOAp(psi, H, bgOpt.tolist())
return (opt.x, E)
# Full search of angles, with resolution of M for every pi/2 angles
# beta range: [0,pi/2]
# Gamma range: [0, 3*pi]
def optQAOAgreedy(psi, H, pmax, typeOfOpt='BFGS'):
ppsi = pointer(psi)
pH = pointer(H)
bg0 = [0.5]*2
bgOpt = []
for p in range(pmax):
fOpt = lambda bg: expectQAOAp(psi, H, np.concatenate((bgOpt,bg)).tolist())
opt = minimize(fOpt, bg0, method=typeOfOpt)
bgOpt += opt.x.tolist()
E = expectQAOAp(psi, H, bgOpt)
return (np.array(bgOpt), E)
def optQAOAfull(psi, H, p, M=10, trunc=-np.inf):
S = 2 # scale factor gamma:beta
bMax = pi/2
gMax = S*bMax
indOpt= np.inf
Opt = np.inf
configs = it.product(*([np.linspace(0,bMax, M,endpoint=False)]*p + [np.linspace(0,gMax, S*M,endpoint=False)]*p))
for ind, i in enumerate(configs):
Ecur = expectQAOAp(psi, H, i)
if trunc!=np.inf and Ecur < trunc:
Opt = Ecur
indOpt = ind
break
indOpt= indOpt if Opt < Ecur else ind
Opt = min(Opt, Ecur)
configs = it.product(*([np.linspace(0,bMax, M,endpoint=False)]*p + [np.linspace(0,gMax, 6*M,endpoint=False)]*p))
return (list(configs)[indOpt], Opt)
# Prints useful information
def printOut(psi, val, vec, bgOpt, Eopt):
final = c2pyState(psi)
final2gs = 0
sumval = 0
Neigs = len(vec.T)
for k in range(Neigs):
sumval += overlap(final, vec[:,k])**2
final2gs=np.sqrt(sumval)
final2initial = 1./sqrt(psi.N)*overlap(final, np.ones(psi.N))
print "Optimal schedule:",
for i in range(len(bgOpt)):
print "%5.4f "%bgOpt[i],
print
#print "%5.4f %5.4f"%(Eopt, val[0])
print "Energy ratio: %7.6f, Overlap: %5.4f"%(np.abs(Eopt/(val[0])), final2gs)
print "Final to initial state overlap: %5.4f"%final2initial
# Symmetrize a state w.r.t to the global Z2 symmetry present in 2-local Hamiltonians
# Only physical when you are in the symmetric sector
def z2symmetrize(psi):
sym = psi + psi[::-1]
return sym/np.sqrt(overlap(sym,sym))
# Entanglement entropy
# Input: a state psi (as an np.arrar) in the computational basis, and the cut site k
def entent(psi, k):
N = len(psi)
n = len(format(N,'b'))-1 # effectively n = log2(N) for powers of two
m = n-k
prho = np.zeros((1<<k,1<<k),dtype=np.complex_) # partial density matrix
x = np.arange(1<<k)
for i in range(1<<m):
ppsi = psi[x*(1<<m)+i] # Partial state vector
prho += np.outer(ppsi,np.conj(ppsi))
val, vec = eigh(prho)
return sum(map(lambda x: -x*np.log(abs(x)+0.000000000001), val))
# The analytical formula for energy under QAOA1
# betagamma = beta first, then gamma
def expectQAOA1(H, betagamma):
return qc.qaoa1energy(byref(H), c_double(betagamma[0]), c_double(betagamma[1]))
##############################################################################
# ZZ + X + Z Hamiltonian coefficient generators
# Output format: a list of lists of the form ((Z, ZZ, ..), (X, XX, ..))
# So, for a 2 local Hamiltonian, expect ((Z, ZZ), (X,))
# The standard 2-local long-range model with power law interactions
def lr(n, alpha, J, B):
Ki = np.zeros(n)
Jij = np.array([[(J/(abs(i-j))**alpha if i!=j else 0) for i in range(n)] for j in range(n)])
Bi = B*np.ones(n)
return ((Ki, Jij), (Bi,))
# Read parameters and prepare coefficients
def readParams():
return
# Construct an unweighted MaxSat instance from a cnf file
# NOTE1: cnf Clauses must be 1-indexed.
# NOTE2: This method is pretty memory-inefficient for
# large clause sizes. Ideal for up to ~4Sat
def readCnf(path2file):
f=open(path2file,'r')
clauses = []
maxLength = 0
while True:
line = f.readline()
if not line: break
spline = line.split()
if spline[0]=='c':
continue
elif spline[0]=='p':
n = int(spline[2])
M = int(spline[3])
else:
for c in spline[:-1]:
if abs(int(c))>n or int(c)==0:
print("Error: variable indices must lie between 0 and %d"%n) | break;
maxLength = max(maxLength, len(spline)-1)
clauses.append(map(int,spline[:-1]))
if len(clauses)!=M: print("Error: Need %d clauses"%M)
f.close()
const = 0
Bi = np.zeros(n)
Jall = [np.zeros([n]*i) for i in range(1,maxLength+1)]
for i in range(M):
clause=map(lambda x: abs(x)-1,clauses[i])
signs=map(np.sign,clauses[i])
K = len(clause)
for k in range(1, K+1):
for inds in it.combinations(range(K), k):
Jall[k-1][inds] += (1./(1<<K))*np.prod([signs[x] for x in inds])
const += 1/(1<<K)
return (Jall, (Bi,)) | break;
if spline[-1]!='0':
print("Error: clause descriptions must have a terminal 0") | random_line_split |
qaoaLibrary.py | #----------------------------------------------------------------------------#
# Title: A python helper library for QAOA-type optimization
# Author: Aniruddha Bapat
# Date: 05-28-2018
#
# Description: Here, I will maintain a library of frequently used functions
# in QAOA-type optimizations.
#----------------------------------------------------------------------------#
from ctypes import *
import numpy as np
import scipy.sparse as sp
from scipy.optimize import basinhopping, minimize
import itertools as it
from numpy import pi, sqrt
# Load c functions in qaoa
# I think this is OS-dependent. Create the appropriate shared c object
# for your OS (.so, .dll etc.) and load it into qc
qc = cdll.LoadLibrary('./lib2local-qaoa.so')
# States and Hamiltonian structs, ctyped into python
class state(Structure):
_fields_ = [('n', c_int),
('N',c_int),
('realcur',POINTER(c_double)),
('imagcur',POINTER(c_double)),
('realbuf',POINTER(c_double)),
('imagbuf',POINTER(c_double))]
class ham(Structure):
_fields_ = [('n',c_int),
('N',c_int),
('zzc',POINTER(c_double)),
('xc',POINTER(c_double)),
('zc',POINTER(c_double))]
# Bound class for basinhopping
class MaxTbound(object):
"""random displacement with bounds"""
def __init__(self, T, stepsize=0.5):
self.maxT= T
def __call__(self, **kwargs):
"""take a random step but ensure the new position is within the bounds"""
x = kwargs["x_new"]
return (sum(x)<self.maxT)
# Set input and return types
qc.expectH.restype = c_double
qc.overlap.resype = c_double
qc.energy.restype = c_double
qc.qaoa1energy.restype = c_double
# Initialize state and hamiltonian
def | (n):
psi = state()
H = ham()
qc.allocate(byref(psi),n)
qc.allocateH(byref(H),n)
return (psi, H)
# Generate Ham object from coefficients zzc, xc and zc (given as np arrays)
def hamGen(H, zzc, xc, zc):
n = len(xc)
H.zzc = (c_double * n**2)(*zzc.flatten().tolist())
H.xc = (c_double * n)(*xc.tolist())
H.zc = (c_double * n)(*zc.tolist())
# Function to compute Ham's k smallest eigenvalues and eigenvectors
def ground(H, Neigs):
n = H.n
energies= map(lambda i:qc.energy(i,byref(H)), range(1<<n))
Ham = np.diag(energies)
for i in range(1<<n):
for j in range(n):
Ham[i,i^(1<<j)] = H.xc[j]
return sp.linalg.eigsh(Ham, k=Neigs, which='SA')
# Inner product magnitude of two states given as complex np arrays
def overlap(psi1, psi2):
return np.abs(np.vdot(psi1,psi2))
# Convert C state object into numpy array of computational basis amplitudes
def c2pyState(psi):
n = psi.n
return np.array(psi.realcur[:1<<n]) + 1.0j*np.array(psi.imagcur[:1<<n])
# Overlap of psi with the Neigs smallest ground states of H
def gsOverlap(psi, H, Neigs):
val, vec = ground(H, Neigs)
final = c2pyState(psi)
retvalue = 0
for k in range(Neigs):
retvalue += overlap(final, vec[:,k])**2
return np.sqrt(retvalue)
#betagamma = all betas first, then all gammas
def expectQAOAp(psi, H, betagamma):
ppsi = pointer(psi)
pH = pointer(H)
qc.uniform(ppsi)
p = len(betagamma)/2
qc.evolveQAOA(ppsi, pH, (c_double * p)(*betagamma[:p]), (c_double * p)(*betagamma[p:]), p)
return qc.expectH(psi, pH)
# return squared overlap with the ground state vectors
def overlapQAOAp(psi, H, betagamma, vec):
ppsi = pointer(psi)
pH = pointer(H)
csp.uniform(ppsi)
p = len(betagamma)/2
csp.evolveQAOA(ppsi, pH, (c_double * p)(*betagamma[:p]), (c_double * p)(*betagamma[p:]), p)
sqoverlap=0
psivec = c2pyState(psi)
for i in len(vec.T):
sqoverlap += overlap(psivec, vec[:,i])**2
return sqoverlap
# Perform an inductive local optimization of QAOA angles. The code returns
# optimal angles and energy. Psi is now the result of the optimal evolution.
def optQAOA(psi, H, pmax, typeOfOpt='BFGS'):
ppsi = pointer(psi)
pH = pointer(H)
fOpt = lambda bg: expectQAOAp(psi, H, bg.tolist())
bg0 = 0.5*np.ones(2)
bgCur= bg0
for p in range(1,pmax+1):
bgNew = np.concatenate((bgCur[:p-1], [bgCur[p-1]], bgCur[p-1:], [bgCur[-1]]))
opt = minimize(fOpt, bg0 if p==1 else bgNew, method=typeOfOpt)
bgCur = opt.x
E = expectQAOAp(psi, H, bgCur.tolist())
#if E!=opt.fun: return "Error: Energy expectation does not match optimized value"
return (bgCur, E)
def optQAOAglobal(psi, H, p, Nit=10, Temp=1.0, Tmax=20):
ppsi = pointer(psi)
pH = pointer(H)
fOpt = lambda bg: expectQAOAp(psi, H, bg.tolist())
bg0 = 2.0*np.ones(2*p)
bounds = [(0,pi/2)]*p + [(0,np.inf)]*p
minKwargs = dict(method="L-BFGS-B", bounds=bounds)
stepCond = MaxTbound(Tmax)
opt = basinhopping(fOpt, bg0, niter=Nit, T=Temp, minimizer_kwargs=minKwargs, accept_test=stepCond)
bgOpt= opt.x
E = expectQAOAp(psi, H, bgOpt.tolist())
return (opt.x, E)
# Full search of angles, with resolution of M for every pi/2 angles
# beta range: [0,pi/2]
# Gamma range: [0, 3*pi]
def optQAOAgreedy(psi, H, pmax, typeOfOpt='BFGS'):
ppsi = pointer(psi)
pH = pointer(H)
bg0 = [0.5]*2
bgOpt = []
for p in range(pmax):
fOpt = lambda bg: expectQAOAp(psi, H, np.concatenate((bgOpt,bg)).tolist())
opt = minimize(fOpt, bg0, method=typeOfOpt)
bgOpt += opt.x.tolist()
E = expectQAOAp(psi, H, bgOpt)
return (np.array(bgOpt), E)
def optQAOAfull(psi, H, p, M=10, trunc=-np.inf):
S = 2 # scale factor gamma:beta
bMax = pi/2
gMax = S*bMax
indOpt= np.inf
Opt = np.inf
configs = it.product(*([np.linspace(0,bMax, M,endpoint=False)]*p + [np.linspace(0,gMax, S*M,endpoint=False)]*p))
for ind, i in enumerate(configs):
Ecur = expectQAOAp(psi, H, i)
if trunc!=np.inf and Ecur < trunc:
Opt = Ecur
indOpt = ind
break
indOpt= indOpt if Opt < Ecur else ind
Opt = min(Opt, Ecur)
configs = it.product(*([np.linspace(0,bMax, M,endpoint=False)]*p + [np.linspace(0,gMax, 6*M,endpoint=False)]*p))
return (list(configs)[indOpt], Opt)
# Prints useful information
def printOut(psi, val, vec, bgOpt, Eopt):
final = c2pyState(psi)
final2gs = 0
sumval = 0
Neigs = len(vec.T)
for k in range(Neigs):
sumval += overlap(final, vec[:,k])**2
final2gs=np.sqrt(sumval)
final2initial = 1./sqrt(psi.N)*overlap(final, np.ones(psi.N))
print "Optimal schedule:",
for i in range(len(bgOpt)):
print "%5.4f "%bgOpt[i],
print
#print "%5.4f %5.4f"%(Eopt, val[0])
print "Energy ratio: %7.6f, Overlap: %5.4f"%(np.abs(Eopt/(val[0])), final2gs)
print "Final to initial state overlap: %5.4f"%final2initial
# Symmetrize a state w.r.t to the global Z2 symmetry present in 2-local Hamiltonians
# Only physical when you are in the symmetric sector
def z2symmetrize(psi):
sym = psi + psi[::-1]
return sym/np.sqrt(overlap(sym,sym))
# Entanglement entropy
# Input: a state psi (as an np.arrar) in the computational basis, and the cut site k
def entent(psi, k):
N = len(psi)
n = len(format(N,'b'))-1 # effectively n = log2(N) for powers of two
m = n-k
prho = np.zeros((1<<k,1<<k),dtype=np.complex_) # partial density matrix
x = np.arange(1<<k)
for i in range(1<<m):
ppsi = psi[x*(1<<m)+i] # Partial state vector
prho += np.outer(ppsi,np.conj(ppsi))
val, vec = eigh(prho)
return sum(map(lambda x: -x*np.log(abs(x)+0.000000000001), val))
# The analytical formula for energy under QAOA1
# betagamma = beta first, then gamma
def expectQAOA1(H, betagamma):
return qc.qaoa1energy(byref(H), c_double(betagamma[0]), c_double(betagamma[1]))
##############################################################################
# ZZ + X + Z Hamiltonian coefficient generators
# Output format: a list of lists of the form ((Z, ZZ, ..), (X, XX, ..))
# So, for a 2 local Hamiltonian, expect ((Z, ZZ), (X,))
# The standard 2-local long-range model with power law interactions
def lr(n, alpha, J, B):
Ki = np.zeros(n)
Jij = np.array([[(J/(abs(i-j))**alpha if i!=j else 0) for i in range(n)] for j in range(n)])
Bi = B*np.ones(n)
return ((Ki, Jij), (Bi,))
# Read parameters and prepare coefficients
def readParams():
return
# Construct an unweighted MaxSat instance from a cnf file
# NOTE1: cnf Clauses must be 1-indexed.
# NOTE2: This method is pretty memory-inefficient for
# large clause sizes. Ideal for up to ~4Sat
def readCnf(path2file):
f=open(path2file,'r')
clauses = []
maxLength = 0
while True:
line = f.readline()
if not line: break
spline = line.split()
if spline[0]=='c':
continue
elif spline[0]=='p':
n = int(spline[2])
M = int(spline[3])
else:
for c in spline[:-1]:
if abs(int(c))>n or int(c)==0:
print("Error: variable indices must lie between 0 and %d"%n)
break;
if spline[-1]!='0':
print("Error: clause descriptions must have a terminal 0")
break;
maxLength = max(maxLength, len(spline)-1)
clauses.append(map(int,spline[:-1]))
if len(clauses)!=M: print("Error: Need %d clauses"%M)
f.close()
const = 0
Bi = np.zeros(n)
Jall = [np.zeros([n]*i) for i in range(1,maxLength+1)]
for i in range(M):
clause=map(lambda x: abs(x)-1,clauses[i])
signs=map(np.sign,clauses[i])
K = len(clause)
for k in range(1, K+1):
for inds in it.combinations(range(K), k):
Jall[k-1][inds] += (1./(1<<K))*np.prod([signs[x] for x in inds])
const += 1/(1<<K)
return (Jall, (Bi,))
| initialize | identifier_name |
qaoaLibrary.py | #----------------------------------------------------------------------------#
# Title: A python helper library for QAOA-type optimization
# Author: Aniruddha Bapat
# Date: 05-28-2018
#
# Description: Here, I will maintain a library of frequently used functions
# in QAOA-type optimizations.
#----------------------------------------------------------------------------#
from ctypes import *
import numpy as np
import scipy.sparse as sp
from scipy.optimize import basinhopping, minimize
import itertools as it
from numpy import pi, sqrt
# Load c functions in qaoa
# I think this is OS-dependent. Create the appropriate shared c object
# for your OS (.so, .dll etc.) and load it into qc
qc = cdll.LoadLibrary('./lib2local-qaoa.so')
# States and Hamiltonian structs, ctyped into python
class state(Structure):
_fields_ = [('n', c_int),
('N',c_int),
('realcur',POINTER(c_double)),
('imagcur',POINTER(c_double)),
('realbuf',POINTER(c_double)),
('imagbuf',POINTER(c_double))]
class ham(Structure):
_fields_ = [('n',c_int),
('N',c_int),
('zzc',POINTER(c_double)),
('xc',POINTER(c_double)),
('zc',POINTER(c_double))]
# Bound class for basinhopping
class MaxTbound(object):
|
# Set input and return types
qc.expectH.restype = c_double
qc.overlap.resype = c_double
qc.energy.restype = c_double
qc.qaoa1energy.restype = c_double
# Initialize state and hamiltonian
def initialize(n):
psi = state()
H = ham()
qc.allocate(byref(psi),n)
qc.allocateH(byref(H),n)
return (psi, H)
# Generate Ham object from coefficients zzc, xc and zc (given as np arrays)
def hamGen(H, zzc, xc, zc):
n = len(xc)
H.zzc = (c_double * n**2)(*zzc.flatten().tolist())
H.xc = (c_double * n)(*xc.tolist())
H.zc = (c_double * n)(*zc.tolist())
# Function to compute Ham's k smallest eigenvalues and eigenvectors
def ground(H, Neigs):
n = H.n
energies= map(lambda i:qc.energy(i,byref(H)), range(1<<n))
Ham = np.diag(energies)
for i in range(1<<n):
for j in range(n):
Ham[i,i^(1<<j)] = H.xc[j]
return sp.linalg.eigsh(Ham, k=Neigs, which='SA')
# Inner product magnitude of two states given as complex np arrays
def overlap(psi1, psi2):
return np.abs(np.vdot(psi1,psi2))
# Convert C state object into numpy array of computational basis amplitudes
def c2pyState(psi):
n = psi.n
return np.array(psi.realcur[:1<<n]) + 1.0j*np.array(psi.imagcur[:1<<n])
# Overlap of psi with the Neigs smallest ground states of H
def gsOverlap(psi, H, Neigs):
val, vec = ground(H, Neigs)
final = c2pyState(psi)
retvalue = 0
for k in range(Neigs):
retvalue += overlap(final, vec[:,k])**2
return np.sqrt(retvalue)
#betagamma = all betas first, then all gammas
def expectQAOAp(psi, H, betagamma):
ppsi = pointer(psi)
pH = pointer(H)
qc.uniform(ppsi)
p = len(betagamma)/2
qc.evolveQAOA(ppsi, pH, (c_double * p)(*betagamma[:p]), (c_double * p)(*betagamma[p:]), p)
return qc.expectH(psi, pH)
# return squared overlap with the ground state vectors
def overlapQAOAp(psi, H, betagamma, vec):
ppsi = pointer(psi)
pH = pointer(H)
csp.uniform(ppsi)
p = len(betagamma)/2
csp.evolveQAOA(ppsi, pH, (c_double * p)(*betagamma[:p]), (c_double * p)(*betagamma[p:]), p)
sqoverlap=0
psivec = c2pyState(psi)
for i in len(vec.T):
sqoverlap += overlap(psivec, vec[:,i])**2
return sqoverlap
# Perform an inductive local optimization of QAOA angles. The code returns
# optimal angles and energy. Psi is now the result of the optimal evolution.
def optQAOA(psi, H, pmax, typeOfOpt='BFGS'):
ppsi = pointer(psi)
pH = pointer(H)
fOpt = lambda bg: expectQAOAp(psi, H, bg.tolist())
bg0 = 0.5*np.ones(2)
bgCur= bg0
for p in range(1,pmax+1):
bgNew = np.concatenate((bgCur[:p-1], [bgCur[p-1]], bgCur[p-1:], [bgCur[-1]]))
opt = minimize(fOpt, bg0 if p==1 else bgNew, method=typeOfOpt)
bgCur = opt.x
E = expectQAOAp(psi, H, bgCur.tolist())
#if E!=opt.fun: return "Error: Energy expectation does not match optimized value"
return (bgCur, E)
def optQAOAglobal(psi, H, p, Nit=10, Temp=1.0, Tmax=20):
ppsi = pointer(psi)
pH = pointer(H)
fOpt = lambda bg: expectQAOAp(psi, H, bg.tolist())
bg0 = 2.0*np.ones(2*p)
bounds = [(0,pi/2)]*p + [(0,np.inf)]*p
minKwargs = dict(method="L-BFGS-B", bounds=bounds)
stepCond = MaxTbound(Tmax)
opt = basinhopping(fOpt, bg0, niter=Nit, T=Temp, minimizer_kwargs=minKwargs, accept_test=stepCond)
bgOpt= opt.x
E = expectQAOAp(psi, H, bgOpt.tolist())
return (opt.x, E)
# Full search of angles, with resolution of M for every pi/2 angles
# beta range: [0,pi/2]
# Gamma range: [0, 3*pi]
def optQAOAgreedy(psi, H, pmax, typeOfOpt='BFGS'):
ppsi = pointer(psi)
pH = pointer(H)
bg0 = [0.5]*2
bgOpt = []
for p in range(pmax):
fOpt = lambda bg: expectQAOAp(psi, H, np.concatenate((bgOpt,bg)).tolist())
opt = minimize(fOpt, bg0, method=typeOfOpt)
bgOpt += opt.x.tolist()
E = expectQAOAp(psi, H, bgOpt)
return (np.array(bgOpt), E)
def optQAOAfull(psi, H, p, M=10, trunc=-np.inf):
S = 2 # scale factor gamma:beta
bMax = pi/2
gMax = S*bMax
indOpt= np.inf
Opt = np.inf
configs = it.product(*([np.linspace(0,bMax, M,endpoint=False)]*p + [np.linspace(0,gMax, S*M,endpoint=False)]*p))
for ind, i in enumerate(configs):
Ecur = expectQAOAp(psi, H, i)
if trunc!=np.inf and Ecur < trunc:
Opt = Ecur
indOpt = ind
break
indOpt= indOpt if Opt < Ecur else ind
Opt = min(Opt, Ecur)
configs = it.product(*([np.linspace(0,bMax, M,endpoint=False)]*p + [np.linspace(0,gMax, 6*M,endpoint=False)]*p))
return (list(configs)[indOpt], Opt)
# Prints useful information
def printOut(psi, val, vec, bgOpt, Eopt):
final = c2pyState(psi)
final2gs = 0
sumval = 0
Neigs = len(vec.T)
for k in range(Neigs):
sumval += overlap(final, vec[:,k])**2
final2gs=np.sqrt(sumval)
final2initial = 1./sqrt(psi.N)*overlap(final, np.ones(psi.N))
print "Optimal schedule:",
for i in range(len(bgOpt)):
print "%5.4f "%bgOpt[i],
print
#print "%5.4f %5.4f"%(Eopt, val[0])
print "Energy ratio: %7.6f, Overlap: %5.4f"%(np.abs(Eopt/(val[0])), final2gs)
print "Final to initial state overlap: %5.4f"%final2initial
# Symmetrize a state w.r.t to the global Z2 symmetry present in 2-local Hamiltonians
# Only physical when you are in the symmetric sector
def z2symmetrize(psi):
sym = psi + psi[::-1]
return sym/np.sqrt(overlap(sym,sym))
# Entanglement entropy
# Input: a state psi (as an np.arrar) in the computational basis, and the cut site k
def entent(psi, k):
N = len(psi)
n = len(format(N,'b'))-1 # effectively n = log2(N) for powers of two
m = n-k
prho = np.zeros((1<<k,1<<k),dtype=np.complex_) # partial density matrix
x = np.arange(1<<k)
for i in range(1<<m):
ppsi = psi[x*(1<<m)+i] # Partial state vector
prho += np.outer(ppsi,np.conj(ppsi))
val, vec = eigh(prho)
return sum(map(lambda x: -x*np.log(abs(x)+0.000000000001), val))
# The analytical formula for energy under QAOA1
# betagamma = beta first, then gamma
def expectQAOA1(H, betagamma):
return qc.qaoa1energy(byref(H), c_double(betagamma[0]), c_double(betagamma[1]))
##############################################################################
# ZZ + X + Z Hamiltonian coefficient generators
# Output format: a list of lists of the form ((Z, ZZ, ..), (X, XX, ..))
# So, for a 2 local Hamiltonian, expect ((Z, ZZ), (X,))
# The standard 2-local long-range model with power law interactions
def lr(n, alpha, J, B):
Ki = np.zeros(n)
Jij = np.array([[(J/(abs(i-j))**alpha if i!=j else 0) for i in range(n)] for j in range(n)])
Bi = B*np.ones(n)
return ((Ki, Jij), (Bi,))
# Read parameters and prepare coefficients
def readParams():
return
# Construct an unweighted MaxSat instance from a cnf file
# NOTE1: cnf Clauses must be 1-indexed.
# NOTE2: This method is pretty memory-inefficient for
# large clause sizes. Ideal for up to ~4Sat
def readCnf(path2file):
f=open(path2file,'r')
clauses = []
maxLength = 0
while True:
line = f.readline()
if not line: break
spline = line.split()
if spline[0]=='c':
continue
elif spline[0]=='p':
n = int(spline[2])
M = int(spline[3])
else:
for c in spline[:-1]:
if abs(int(c))>n or int(c)==0:
print("Error: variable indices must lie between 0 and %d"%n)
break;
if spline[-1]!='0':
print("Error: clause descriptions must have a terminal 0")
break;
maxLength = max(maxLength, len(spline)-1)
clauses.append(map(int,spline[:-1]))
if len(clauses)!=M: print("Error: Need %d clauses"%M)
f.close()
const = 0
Bi = np.zeros(n)
Jall = [np.zeros([n]*i) for i in range(1,maxLength+1)]
for i in range(M):
clause=map(lambda x: abs(x)-1,clauses[i])
signs=map(np.sign,clauses[i])
K = len(clause)
for k in range(1, K+1):
for inds in it.combinations(range(K), k):
Jall[k-1][inds] += (1./(1<<K))*np.prod([signs[x] for x in inds])
const += 1/(1<<K)
return (Jall, (Bi,))
| """random displacement with bounds"""
def __init__(self, T, stepsize=0.5):
self.maxT= T
def __call__(self, **kwargs):
"""take a random step but ensure the new position is within the bounds"""
x = kwargs["x_new"]
return (sum(x)<self.maxT) | identifier_body |
qaoaLibrary.py | #----------------------------------------------------------------------------#
# Title: A python helper library for QAOA-type optimization
# Author: Aniruddha Bapat
# Date: 05-28-2018
#
# Description: Here, I will maintain a library of frequently used functions
# in QAOA-type optimizations.
#----------------------------------------------------------------------------#
from ctypes import *
import numpy as np
import scipy.sparse as sp
from scipy.optimize import basinhopping, minimize
import itertools as it
from numpy import pi, sqrt
# Load c functions in qaoa
# I think this is OS-dependent. Create the appropriate shared c object
# for your OS (.so, .dll etc.) and load it into qc
qc = cdll.LoadLibrary('./lib2local-qaoa.so')
# States and Hamiltonian structs, ctyped into python
class state(Structure):
_fields_ = [('n', c_int),
('N',c_int),
('realcur',POINTER(c_double)),
('imagcur',POINTER(c_double)),
('realbuf',POINTER(c_double)),
('imagbuf',POINTER(c_double))]
class ham(Structure):
_fields_ = [('n',c_int),
('N',c_int),
('zzc',POINTER(c_double)),
('xc',POINTER(c_double)),
('zc',POINTER(c_double))]
# Bound class for basinhopping
class MaxTbound(object):
"""random displacement with bounds"""
def __init__(self, T, stepsize=0.5):
self.maxT= T
def __call__(self, **kwargs):
"""take a random step but ensure the new position is within the bounds"""
x = kwargs["x_new"]
return (sum(x)<self.maxT)
# Set input and return types
qc.expectH.restype = c_double
qc.overlap.resype = c_double
qc.energy.restype = c_double
qc.qaoa1energy.restype = c_double
# Initialize state and hamiltonian
def initialize(n):
psi = state()
H = ham()
qc.allocate(byref(psi),n)
qc.allocateH(byref(H),n)
return (psi, H)
# Generate Ham object from coefficients zzc, xc and zc (given as np arrays)
def hamGen(H, zzc, xc, zc):
n = len(xc)
H.zzc = (c_double * n**2)(*zzc.flatten().tolist())
H.xc = (c_double * n)(*xc.tolist())
H.zc = (c_double * n)(*zc.tolist())
# Function to compute Ham's k smallest eigenvalues and eigenvectors
def ground(H, Neigs):
n = H.n
energies= map(lambda i:qc.energy(i,byref(H)), range(1<<n))
Ham = np.diag(energies)
for i in range(1<<n):
for j in range(n):
Ham[i,i^(1<<j)] = H.xc[j]
return sp.linalg.eigsh(Ham, k=Neigs, which='SA')
# Inner product magnitude of two states given as complex np arrays
def overlap(psi1, psi2):
return np.abs(np.vdot(psi1,psi2))
# Convert C state object into numpy array of computational basis amplitudes
def c2pyState(psi):
n = psi.n
return np.array(psi.realcur[:1<<n]) + 1.0j*np.array(psi.imagcur[:1<<n])
# Overlap of psi with the Neigs smallest ground states of H
def gsOverlap(psi, H, Neigs):
val, vec = ground(H, Neigs)
final = c2pyState(psi)
retvalue = 0
for k in range(Neigs):
retvalue += overlap(final, vec[:,k])**2
return np.sqrt(retvalue)
#betagamma = all betas first, then all gammas
def expectQAOAp(psi, H, betagamma):
ppsi = pointer(psi)
pH = pointer(H)
qc.uniform(ppsi)
p = len(betagamma)/2
qc.evolveQAOA(ppsi, pH, (c_double * p)(*betagamma[:p]), (c_double * p)(*betagamma[p:]), p)
return qc.expectH(psi, pH)
# return squared overlap with the ground state vectors
def overlapQAOAp(psi, H, betagamma, vec):
ppsi = pointer(psi)
pH = pointer(H)
csp.uniform(ppsi)
p = len(betagamma)/2
csp.evolveQAOA(ppsi, pH, (c_double * p)(*betagamma[:p]), (c_double * p)(*betagamma[p:]), p)
sqoverlap=0
psivec = c2pyState(psi)
for i in len(vec.T):
sqoverlap += overlap(psivec, vec[:,i])**2
return sqoverlap
# Perform an inductive local optimization of QAOA angles. The code returns
# optimal angles and energy. Psi is now the result of the optimal evolution.
def optQAOA(psi, H, pmax, typeOfOpt='BFGS'):
ppsi = pointer(psi)
pH = pointer(H)
fOpt = lambda bg: expectQAOAp(psi, H, bg.tolist())
bg0 = 0.5*np.ones(2)
bgCur= bg0
for p in range(1,pmax+1):
bgNew = np.concatenate((bgCur[:p-1], [bgCur[p-1]], bgCur[p-1:], [bgCur[-1]]))
opt = minimize(fOpt, bg0 if p==1 else bgNew, method=typeOfOpt)
bgCur = opt.x
E = expectQAOAp(psi, H, bgCur.tolist())
#if E!=opt.fun: return "Error: Energy expectation does not match optimized value"
return (bgCur, E)
def optQAOAglobal(psi, H, p, Nit=10, Temp=1.0, Tmax=20):
ppsi = pointer(psi)
pH = pointer(H)
fOpt = lambda bg: expectQAOAp(psi, H, bg.tolist())
bg0 = 2.0*np.ones(2*p)
bounds = [(0,pi/2)]*p + [(0,np.inf)]*p
minKwargs = dict(method="L-BFGS-B", bounds=bounds)
stepCond = MaxTbound(Tmax)
opt = basinhopping(fOpt, bg0, niter=Nit, T=Temp, minimizer_kwargs=minKwargs, accept_test=stepCond)
bgOpt= opt.x
E = expectQAOAp(psi, H, bgOpt.tolist())
return (opt.x, E)
# Full search of angles, with resolution of M for every pi/2 angles
# beta range: [0,pi/2]
# Gamma range: [0, 3*pi]
def optQAOAgreedy(psi, H, pmax, typeOfOpt='BFGS'):
ppsi = pointer(psi)
pH = pointer(H)
bg0 = [0.5]*2
bgOpt = []
for p in range(pmax):
fOpt = lambda bg: expectQAOAp(psi, H, np.concatenate((bgOpt,bg)).tolist())
opt = minimize(fOpt, bg0, method=typeOfOpt)
bgOpt += opt.x.tolist()
E = expectQAOAp(psi, H, bgOpt)
return (np.array(bgOpt), E)
def optQAOAfull(psi, H, p, M=10, trunc=-np.inf):
S = 2 # scale factor gamma:beta
bMax = pi/2
gMax = S*bMax
indOpt= np.inf
Opt = np.inf
configs = it.product(*([np.linspace(0,bMax, M,endpoint=False)]*p + [np.linspace(0,gMax, S*M,endpoint=False)]*p))
for ind, i in enumerate(configs):
Ecur = expectQAOAp(psi, H, i)
if trunc!=np.inf and Ecur < trunc:
Opt = Ecur
indOpt = ind
break
indOpt= indOpt if Opt < Ecur else ind
Opt = min(Opt, Ecur)
configs = it.product(*([np.linspace(0,bMax, M,endpoint=False)]*p + [np.linspace(0,gMax, 6*M,endpoint=False)]*p))
return (list(configs)[indOpt], Opt)
# Prints useful information
def printOut(psi, val, vec, bgOpt, Eopt):
final = c2pyState(psi)
final2gs = 0
sumval = 0
Neigs = len(vec.T)
for k in range(Neigs):
sumval += overlap(final, vec[:,k])**2
final2gs=np.sqrt(sumval)
final2initial = 1./sqrt(psi.N)*overlap(final, np.ones(psi.N))
print "Optimal schedule:",
for i in range(len(bgOpt)):
print "%5.4f "%bgOpt[i],
print
#print "%5.4f %5.4f"%(Eopt, val[0])
print "Energy ratio: %7.6f, Overlap: %5.4f"%(np.abs(Eopt/(val[0])), final2gs)
print "Final to initial state overlap: %5.4f"%final2initial
# Symmetrize a state w.r.t to the global Z2 symmetry present in 2-local Hamiltonians
# Only physical when you are in the symmetric sector
def z2symmetrize(psi):
sym = psi + psi[::-1]
return sym/np.sqrt(overlap(sym,sym))
# Entanglement entropy
# Input: a state psi (as an np.arrar) in the computational basis, and the cut site k
def entent(psi, k):
N = len(psi)
n = len(format(N,'b'))-1 # effectively n = log2(N) for powers of two
m = n-k
prho = np.zeros((1<<k,1<<k),dtype=np.complex_) # partial density matrix
x = np.arange(1<<k)
for i in range(1<<m):
ppsi = psi[x*(1<<m)+i] # Partial state vector
prho += np.outer(ppsi,np.conj(ppsi))
val, vec = eigh(prho)
return sum(map(lambda x: -x*np.log(abs(x)+0.000000000001), val))
# The analytical formula for energy under QAOA1
# betagamma = beta first, then gamma
def expectQAOA1(H, betagamma):
return qc.qaoa1energy(byref(H), c_double(betagamma[0]), c_double(betagamma[1]))
##############################################################################
# ZZ + X + Z Hamiltonian coefficient generators
# Output format: a list of lists of the form ((Z, ZZ, ..), (X, XX, ..))
# So, for a 2 local Hamiltonian, expect ((Z, ZZ), (X,))
# The standard 2-local long-range model with power law interactions
def lr(n, alpha, J, B):
Ki = np.zeros(n)
Jij = np.array([[(J/(abs(i-j))**alpha if i!=j else 0) for i in range(n)] for j in range(n)])
Bi = B*np.ones(n)
return ((Ki, Jij), (Bi,))
# Read parameters and prepare coefficients
def readParams():
return
# Construct an unweighted MaxSat instance from a cnf file
# NOTE1: cnf Clauses must be 1-indexed.
# NOTE2: This method is pretty memory-inefficient for
# large clause sizes. Ideal for up to ~4Sat
def readCnf(path2file):
f=open(path2file,'r')
clauses = []
maxLength = 0
while True:
line = f.readline()
if not line: break
spline = line.split()
if spline[0]=='c':
continue
elif spline[0]=='p':
n = int(spline[2])
M = int(spline[3])
else:
for c in spline[:-1]:
if abs(int(c))>n or int(c)==0:
|
if spline[-1]!='0':
print("Error: clause descriptions must have a terminal 0")
break;
maxLength = max(maxLength, len(spline)-1)
clauses.append(map(int,spline[:-1]))
if len(clauses)!=M: print("Error: Need %d clauses"%M)
f.close()
const = 0
Bi = np.zeros(n)
Jall = [np.zeros([n]*i) for i in range(1,maxLength+1)]
for i in range(M):
clause=map(lambda x: abs(x)-1,clauses[i])
signs=map(np.sign,clauses[i])
K = len(clause)
for k in range(1, K+1):
for inds in it.combinations(range(K), k):
Jall[k-1][inds] += (1./(1<<K))*np.prod([signs[x] for x in inds])
const += 1/(1<<K)
return (Jall, (Bi,))
| print("Error: variable indices must lie between 0 and %d"%n)
break; | conditional_block |
publish.rs | //!
//! The Zargo package manager `publish` subcommand.
//!
use std::convert::TryFrom;
use std::path::PathBuf;
use std::str::FromStr;
use colored::Colorize;
use structopt::StructOpt;
use zksync::web3::types::H256;
use zksync_eth_signer::PrivateKeySigner;
use zksync_types::tx::PackedEthSignature;
use crate::error::Error;
use crate::executable::compiler::Compiler;
use crate::executable::virtual_machine::VirtualMachine;
use crate::http::downloader::Downloader;
use crate::http::Client as HttpClient;
use crate::network::Network;
use crate::project::data::input::Input as InputFile;
use crate::project::data::private_key::PrivateKey as PrivateKeyFile;
use crate::project::data::verifying_key::VerifyingKey as VerifyingKeyFile;
use crate::project::data::Directory as DataDirectory;
use crate::project::src::Directory as SourceDirectory;
use crate::project::target::bytecode::Bytecode as BytecodeFile;
use crate::project::target::deps::Directory as TargetDependenciesDirectory;
use crate::project::target::Directory as TargetDirectory;
///
/// The Zargo package manager `publish` subcommand.
///
#[derive(Debug, StructOpt)]
#[structopt(about = "Uploads the smart contract to the specified network")]
pub struct Command {
/// Prints more logs, if passed several times.
#[structopt(short = "v", long = "verbose", parse(from_occurrences))]
pub verbosity: usize,
/// Suppresses output, if set.
#[structopt(short = "q", long = "quiet")]
pub quiet: bool,
/// The path to the Zinc project manifest file.
#[structopt(
long = "manifest-path",
parse(from_os_str),
default_value = "./Zargo.toml"
)]
pub manifest_path: PathBuf,
/// Sets the contract instance name.
#[structopt(long = "instance")]
pub instance: String,
/// Sets the network name, where the contract must be published to.
#[structopt(long = "network", default_value = "localhost")]
pub network: String,
/// Sets the change-pubkey fee token.
#[structopt(long = "change-pubkey-fee-token", default_value = "ETH")]
pub change_pubkey_fee_token: String,
}
///
/// The publish data. Used for testing purposes.
///
pub struct Data {
/// The address of the published contract instance.
pub address: zksync_types::Address,
/// The account ID of the published contract instance.
pub account_id: zksync_types::AccountId,
}
impl Data {
///
/// A shortcut constructor.
///
pub fn new(address: zksync_types::Address, account_id: zksync_types::AccountId) -> Self {
Self {
address,
account_id,
}
}
}
impl Command {
///
/// A shortcut constructor.
///
pub fn new(
verbosity: usize,
quiet: bool,
manifest_path: PathBuf,
instance: String,
network: Option<String>,
change_pubkey_fee_token: Option<String>,
) -> Self {
Self {
verbosity,
quiet,
manifest_path,
instance,
network: network
.unwrap_or_else(|| Network::from(zksync::Network::Localhost).to_string()),
change_pubkey_fee_token: change_pubkey_fee_token.unwrap_or_else(|| "ETH".to_owned()),
}
}
///
/// Executes the command.
///
pub async fn execute(self) -> anyhow::Result<Data> {
let network = zksync::Network::from_str(self.network.as_str())
.map(Network::from)
.map_err(Error::NetworkInvalid)?;
let url = network
.try_into_url()
.map_err(Error::NetworkUnimplemented)?;
let http_client = HttpClient::new(url);
let manifest = zinc_project::Manifest::try_from(&self.manifest_path)?;
match manifest.project.r#type {
zinc_project::ProjectType::Contract => {}
_ => anyhow::bail!(Error::NotAContract),
}
let mut manifest_path = self.manifest_path;
if manifest_path.is_file() {
manifest_path.pop();
}
if let zinc_project::ProjectType::Contract = manifest.project.r#type {
if !PrivateKeyFile::exists_at(&manifest_path) {
PrivateKeyFile::default().write_to(&manifest_path)?;
}
}
let source_directory_path = SourceDirectory::path(&manifest_path);
let source =
zinc_project::Source::try_from_path(&source_directory_path, &manifest_path, true)?;
let project = zinc_project::Project::new(manifest.clone(), source);
DataDirectory::create(&manifest_path)?;
let data_directory_path = DataDirectory::path(&manifest_path);
let mut input_path = data_directory_path.clone();
input_path.push(format!(
"{}.{}",
zinc_const::file_name::INPUT,
zinc_const::extension::JSON,
));
let mut proving_key_path = data_directory_path.clone();
proving_key_path.push(zinc_const::file_name::PROVING_KEY);
let mut verifying_key_path = data_directory_path.clone();
verifying_key_path.push(zinc_const::file_name::VERIFYING_KEY.to_owned());
TargetDirectory::create(&manifest_path, true)?;
let target_directory_path = TargetDirectory::path(&manifest_path, true);
let mut binary_path = target_directory_path;
binary_path.push(format!(
"{}.{}",
zinc_const::file_name::BINARY,
zinc_const::extension::BINARY
));
TargetDependenciesDirectory::create(&manifest_path)?;
if let Some(dependencies) = manifest.dependencies {
let network = zksync::Network::from_str(self.network.as_str())
.map(Network::from)
.map_err(Error::NetworkInvalid)?;
let url = network
.try_into_url()
.map_err(Error::NetworkUnimplemented)?;
let http_client = HttpClient::new(url);
let mut downloader = Downloader::new(&http_client, &manifest_path);
downloader.download_dependency_list(dependencies).await?;
}
Compiler::build_release(
self.verbosity,
self.quiet,
manifest.project.name.as_str(),
&manifest.project.version,
&manifest_path,
false,
)?;
let bytecode = BytecodeFile::try_from_path(&binary_path, true)?;
let input = InputFile::try_from_path(&input_path)?;
let arguments = input
.inner
.as_object()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.get("arguments")
.cloned()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.as_object()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.get(zinc_const::contract::CONSTRUCTOR_IDENTIFIER)
.cloned()
.ok_or_else(|| {
Error::MissingInputSection(zinc_const::contract::CONSTRUCTOR_IDENTIFIER.to_owned())
})?;
if !verifying_key_path.exists() |
let verifying_key = VerifyingKeyFile::try_from(&verifying_key_path)?;
if !self.quiet {
eprintln!(
" {} the instance `{}` of `{} v{}` to network `{}`",
"Uploading".bright_green(),
self.instance,
manifest.project.name,
manifest.project.version,
network,
);
}
let response = http_client
.publish(
zinc_types::PublishRequestQuery::new(
manifest.project.name,
manifest.project.version,
self.instance,
self.change_pubkey_fee_token.clone(),
),
zinc_types::PublishRequestBody::new(
project,
bytecode.inner,
arguments,
verifying_key.inner,
),
)
.await?;
if !self.quiet {
eprintln!(
" {} {}",
"Address".bright_green(),
serde_json::to_string(&response.address)
.expect(zinc_const::panic::DATA_CONVERSION)
.replace("\"", "")
);
}
let private_key = PrivateKeyFile::try_from(&manifest_path)?;
let signer_private_key: H256 = private_key.inner.parse()?;
let signer_address = PackedEthSignature::address_from_private_key(&signer_private_key)?;
let wallet_credentials = zksync::WalletCredentials::from_eth_signer(
signer_address,
PrivateKeySigner::new(signer_private_key),
network.into(),
)
.await
.expect(zinc_const::panic::DATA_CONVERSION);
let wallet =
zksync::Wallet::new(zksync::RpcProvider::new(network.into()), wallet_credentials)
.await?;
let initial_transfer = crate::transaction::new_initial(
&wallet,
response.address,
self.change_pubkey_fee_token,
response.change_pubkey_fee,
)
.await?;
let address = response.address;
let response = http_client
.initialize(
zinc_types::InitializeRequestQuery::new(response.address),
zinc_types::InitializeRequestBody::new(initial_transfer),
)
.await?;
if !self.quiet {
eprintln!(" {} {}", "Account ID".bright_green(), response.account_id);
}
Ok(Data::new(address, response.account_id))
}
}
| {
VirtualMachine::setup_contract(
self.verbosity,
self.quiet,
&binary_path,
zinc_const::contract::CONSTRUCTOR_IDENTIFIER,
&proving_key_path,
&verifying_key_path,
)?;
} | conditional_block |
publish.rs | //!
//! The Zargo package manager `publish` subcommand.
//!
use std::convert::TryFrom;
use std::path::PathBuf;
use std::str::FromStr;
use colored::Colorize;
use structopt::StructOpt;
use zksync::web3::types::H256;
use zksync_eth_signer::PrivateKeySigner;
use zksync_types::tx::PackedEthSignature;
use crate::error::Error;
use crate::executable::compiler::Compiler;
use crate::executable::virtual_machine::VirtualMachine;
use crate::http::downloader::Downloader;
use crate::http::Client as HttpClient;
use crate::network::Network;
use crate::project::data::input::Input as InputFile;
use crate::project::data::private_key::PrivateKey as PrivateKeyFile;
use crate::project::data::verifying_key::VerifyingKey as VerifyingKeyFile;
use crate::project::data::Directory as DataDirectory;
use crate::project::src::Directory as SourceDirectory;
use crate::project::target::bytecode::Bytecode as BytecodeFile;
use crate::project::target::deps::Directory as TargetDependenciesDirectory;
use crate::project::target::Directory as TargetDirectory;
///
/// The Zargo package manager `publish` subcommand.
///
#[derive(Debug, StructOpt)]
#[structopt(about = "Uploads the smart contract to the specified network")]
pub struct Command {
/// Prints more logs, if passed several times.
#[structopt(short = "v", long = "verbose", parse(from_occurrences))]
pub verbosity: usize,
/// Suppresses output, if set.
#[structopt(short = "q", long = "quiet")]
pub quiet: bool,
/// The path to the Zinc project manifest file.
#[structopt(
long = "manifest-path",
parse(from_os_str),
default_value = "./Zargo.toml"
)]
pub manifest_path: PathBuf,
/// Sets the contract instance name.
#[structopt(long = "instance")]
pub instance: String,
/// Sets the network name, where the contract must be published to.
#[structopt(long = "network", default_value = "localhost")]
pub network: String,
/// Sets the change-pubkey fee token.
#[structopt(long = "change-pubkey-fee-token", default_value = "ETH")]
pub change_pubkey_fee_token: String,
}
///
/// The publish data. Used for testing purposes.
///
pub struct Data {
/// The address of the published contract instance.
pub address: zksync_types::Address,
/// The account ID of the published contract instance.
pub account_id: zksync_types::AccountId,
}
impl Data {
///
/// A shortcut constructor.
///
pub fn new(address: zksync_types::Address, account_id: zksync_types::AccountId) -> Self {
Self {
address,
account_id,
}
}
}
impl Command {
///
/// A shortcut constructor.
///
pub fn new(
verbosity: usize,
quiet: bool,
manifest_path: PathBuf,
instance: String,
network: Option<String>,
change_pubkey_fee_token: Option<String>,
) -> Self {
Self {
verbosity,
quiet,
manifest_path,
instance,
network: network
.unwrap_or_else(|| Network::from(zksync::Network::Localhost).to_string()),
change_pubkey_fee_token: change_pubkey_fee_token.unwrap_or_else(|| "ETH".to_owned()),
}
}
///
/// Executes the command.
///
pub async fn execute(self) -> anyhow::Result<Data> {
let network = zksync::Network::from_str(self.network.as_str())
.map(Network::from)
.map_err(Error::NetworkInvalid)?;
let url = network
.try_into_url()
.map_err(Error::NetworkUnimplemented)?;
let http_client = HttpClient::new(url);
let manifest = zinc_project::Manifest::try_from(&self.manifest_path)?;
match manifest.project.r#type {
zinc_project::ProjectType::Contract => {}
_ => anyhow::bail!(Error::NotAContract),
}
let mut manifest_path = self.manifest_path;
if manifest_path.is_file() {
manifest_path.pop();
}
if let zinc_project::ProjectType::Contract = manifest.project.r#type {
if !PrivateKeyFile::exists_at(&manifest_path) {
PrivateKeyFile::default().write_to(&manifest_path)?;
}
}
let source_directory_path = SourceDirectory::path(&manifest_path);
let source =
zinc_project::Source::try_from_path(&source_directory_path, &manifest_path, true)?;
let project = zinc_project::Project::new(manifest.clone(), source);
DataDirectory::create(&manifest_path)?;
let data_directory_path = DataDirectory::path(&manifest_path);
let mut input_path = data_directory_path.clone();
input_path.push(format!(
"{}.{}",
zinc_const::file_name::INPUT,
zinc_const::extension::JSON,
));
let mut proving_key_path = data_directory_path.clone();
proving_key_path.push(zinc_const::file_name::PROVING_KEY);
let mut verifying_key_path = data_directory_path.clone();
verifying_key_path.push(zinc_const::file_name::VERIFYING_KEY.to_owned());
TargetDirectory::create(&manifest_path, true)?;
let target_directory_path = TargetDirectory::path(&manifest_path, true);
let mut binary_path = target_directory_path;
binary_path.push(format!(
"{}.{}",
zinc_const::file_name::BINARY,
zinc_const::extension::BINARY
));
TargetDependenciesDirectory::create(&manifest_path)?;
if let Some(dependencies) = manifest.dependencies {
let network = zksync::Network::from_str(self.network.as_str())
.map(Network::from)
.map_err(Error::NetworkInvalid)?;
let url = network
.try_into_url()
.map_err(Error::NetworkUnimplemented)?;
let http_client = HttpClient::new(url);
let mut downloader = Downloader::new(&http_client, &manifest_path);
downloader.download_dependency_list(dependencies).await?;
}
Compiler::build_release(
self.verbosity,
self.quiet,
manifest.project.name.as_str(),
&manifest.project.version,
&manifest_path,
false,
)?;
let bytecode = BytecodeFile::try_from_path(&binary_path, true)?;
let input = InputFile::try_from_path(&input_path)?;
let arguments = input
.inner
.as_object()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.get("arguments")
.cloned()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.as_object()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.get(zinc_const::contract::CONSTRUCTOR_IDENTIFIER)
.cloned()
.ok_or_else(|| {
Error::MissingInputSection(zinc_const::contract::CONSTRUCTOR_IDENTIFIER.to_owned())
})?;
if !verifying_key_path.exists() {
VirtualMachine::setup_contract(
self.verbosity,
self.quiet,
&binary_path,
zinc_const::contract::CONSTRUCTOR_IDENTIFIER,
&proving_key_path,
&verifying_key_path,
)?;
}
let verifying_key = VerifyingKeyFile::try_from(&verifying_key_path)?;
if !self.quiet {
eprintln!(
" {} the instance `{}` of `{} v{}` to network `{}`",
"Uploading".bright_green(),
self.instance,
manifest.project.name,
manifest.project.version,
network,
);
}
let response = http_client
.publish(
zinc_types::PublishRequestQuery::new(
manifest.project.name,
manifest.project.version,
self.instance,
self.change_pubkey_fee_token.clone(),
),
zinc_types::PublishRequestBody::new(
project,
bytecode.inner,
arguments,
verifying_key.inner,
),
)
.await?;
if !self.quiet {
eprintln!(
" {} {}",
"Address".bright_green(),
serde_json::to_string(&response.address)
.expect(zinc_const::panic::DATA_CONVERSION)
.replace("\"", "")
);
}
let private_key = PrivateKeyFile::try_from(&manifest_path)?;
let signer_private_key: H256 = private_key.inner.parse()?;
let signer_address = PackedEthSignature::address_from_private_key(&signer_private_key)?;
let wallet_credentials = zksync::WalletCredentials::from_eth_signer(
signer_address,
PrivateKeySigner::new(signer_private_key),
network.into(),
)
.await
.expect(zinc_const::panic::DATA_CONVERSION); | let initial_transfer = crate::transaction::new_initial(
&wallet,
response.address,
self.change_pubkey_fee_token,
response.change_pubkey_fee,
)
.await?;
let address = response.address;
let response = http_client
.initialize(
zinc_types::InitializeRequestQuery::new(response.address),
zinc_types::InitializeRequestBody::new(initial_transfer),
)
.await?;
if !self.quiet {
eprintln!(" {} {}", "Account ID".bright_green(), response.account_id);
}
Ok(Data::new(address, response.account_id))
}
} | let wallet =
zksync::Wallet::new(zksync::RpcProvider::new(network.into()), wallet_credentials)
.await?;
| random_line_split |
publish.rs | //!
//! The Zargo package manager `publish` subcommand.
//!
use std::convert::TryFrom;
use std::path::PathBuf;
use std::str::FromStr;
use colored::Colorize;
use structopt::StructOpt;
use zksync::web3::types::H256;
use zksync_eth_signer::PrivateKeySigner;
use zksync_types::tx::PackedEthSignature;
use crate::error::Error;
use crate::executable::compiler::Compiler;
use crate::executable::virtual_machine::VirtualMachine;
use crate::http::downloader::Downloader;
use crate::http::Client as HttpClient;
use crate::network::Network;
use crate::project::data::input::Input as InputFile;
use crate::project::data::private_key::PrivateKey as PrivateKeyFile;
use crate::project::data::verifying_key::VerifyingKey as VerifyingKeyFile;
use crate::project::data::Directory as DataDirectory;
use crate::project::src::Directory as SourceDirectory;
use crate::project::target::bytecode::Bytecode as BytecodeFile;
use crate::project::target::deps::Directory as TargetDependenciesDirectory;
use crate::project::target::Directory as TargetDirectory;
///
/// The Zargo package manager `publish` subcommand.
///
#[derive(Debug, StructOpt)]
#[structopt(about = "Uploads the smart contract to the specified network")]
pub struct Command {
/// Prints more logs, if passed several times.
#[structopt(short = "v", long = "verbose", parse(from_occurrences))]
pub verbosity: usize,
/// Suppresses output, if set.
#[structopt(short = "q", long = "quiet")]
pub quiet: bool,
/// The path to the Zinc project manifest file.
#[structopt(
long = "manifest-path",
parse(from_os_str),
default_value = "./Zargo.toml"
)]
pub manifest_path: PathBuf,
/// Sets the contract instance name.
#[structopt(long = "instance")]
pub instance: String,
/// Sets the network name, where the contract must be published to.
#[structopt(long = "network", default_value = "localhost")]
pub network: String,
/// Sets the change-pubkey fee token.
#[structopt(long = "change-pubkey-fee-token", default_value = "ETH")]
pub change_pubkey_fee_token: String,
}
///
/// The publish data. Used for testing purposes.
///
pub struct Data {
/// The address of the published contract instance.
pub address: zksync_types::Address,
/// The account ID of the published contract instance.
pub account_id: zksync_types::AccountId,
}
impl Data {
///
/// A shortcut constructor.
///
pub fn new(address: zksync_types::Address, account_id: zksync_types::AccountId) -> Self |
}
impl Command {
///
/// A shortcut constructor.
///
pub fn new(
verbosity: usize,
quiet: bool,
manifest_path: PathBuf,
instance: String,
network: Option<String>,
change_pubkey_fee_token: Option<String>,
) -> Self {
Self {
verbosity,
quiet,
manifest_path,
instance,
network: network
.unwrap_or_else(|| Network::from(zksync::Network::Localhost).to_string()),
change_pubkey_fee_token: change_pubkey_fee_token.unwrap_or_else(|| "ETH".to_owned()),
}
}
///
/// Executes the command.
///
pub async fn execute(self) -> anyhow::Result<Data> {
let network = zksync::Network::from_str(self.network.as_str())
.map(Network::from)
.map_err(Error::NetworkInvalid)?;
let url = network
.try_into_url()
.map_err(Error::NetworkUnimplemented)?;
let http_client = HttpClient::new(url);
let manifest = zinc_project::Manifest::try_from(&self.manifest_path)?;
match manifest.project.r#type {
zinc_project::ProjectType::Contract => {}
_ => anyhow::bail!(Error::NotAContract),
}
let mut manifest_path = self.manifest_path;
if manifest_path.is_file() {
manifest_path.pop();
}
if let zinc_project::ProjectType::Contract = manifest.project.r#type {
if !PrivateKeyFile::exists_at(&manifest_path) {
PrivateKeyFile::default().write_to(&manifest_path)?;
}
}
let source_directory_path = SourceDirectory::path(&manifest_path);
let source =
zinc_project::Source::try_from_path(&source_directory_path, &manifest_path, true)?;
let project = zinc_project::Project::new(manifest.clone(), source);
DataDirectory::create(&manifest_path)?;
let data_directory_path = DataDirectory::path(&manifest_path);
let mut input_path = data_directory_path.clone();
input_path.push(format!(
"{}.{}",
zinc_const::file_name::INPUT,
zinc_const::extension::JSON,
));
let mut proving_key_path = data_directory_path.clone();
proving_key_path.push(zinc_const::file_name::PROVING_KEY);
let mut verifying_key_path = data_directory_path.clone();
verifying_key_path.push(zinc_const::file_name::VERIFYING_KEY.to_owned());
TargetDirectory::create(&manifest_path, true)?;
let target_directory_path = TargetDirectory::path(&manifest_path, true);
let mut binary_path = target_directory_path;
binary_path.push(format!(
"{}.{}",
zinc_const::file_name::BINARY,
zinc_const::extension::BINARY
));
TargetDependenciesDirectory::create(&manifest_path)?;
if let Some(dependencies) = manifest.dependencies {
let network = zksync::Network::from_str(self.network.as_str())
.map(Network::from)
.map_err(Error::NetworkInvalid)?;
let url = network
.try_into_url()
.map_err(Error::NetworkUnimplemented)?;
let http_client = HttpClient::new(url);
let mut downloader = Downloader::new(&http_client, &manifest_path);
downloader.download_dependency_list(dependencies).await?;
}
Compiler::build_release(
self.verbosity,
self.quiet,
manifest.project.name.as_str(),
&manifest.project.version,
&manifest_path,
false,
)?;
let bytecode = BytecodeFile::try_from_path(&binary_path, true)?;
let input = InputFile::try_from_path(&input_path)?;
let arguments = input
.inner
.as_object()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.get("arguments")
.cloned()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.as_object()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.get(zinc_const::contract::CONSTRUCTOR_IDENTIFIER)
.cloned()
.ok_or_else(|| {
Error::MissingInputSection(zinc_const::contract::CONSTRUCTOR_IDENTIFIER.to_owned())
})?;
if !verifying_key_path.exists() {
VirtualMachine::setup_contract(
self.verbosity,
self.quiet,
&binary_path,
zinc_const::contract::CONSTRUCTOR_IDENTIFIER,
&proving_key_path,
&verifying_key_path,
)?;
}
let verifying_key = VerifyingKeyFile::try_from(&verifying_key_path)?;
if !self.quiet {
eprintln!(
" {} the instance `{}` of `{} v{}` to network `{}`",
"Uploading".bright_green(),
self.instance,
manifest.project.name,
manifest.project.version,
network,
);
}
let response = http_client
.publish(
zinc_types::PublishRequestQuery::new(
manifest.project.name,
manifest.project.version,
self.instance,
self.change_pubkey_fee_token.clone(),
),
zinc_types::PublishRequestBody::new(
project,
bytecode.inner,
arguments,
verifying_key.inner,
),
)
.await?;
if !self.quiet {
eprintln!(
" {} {}",
"Address".bright_green(),
serde_json::to_string(&response.address)
.expect(zinc_const::panic::DATA_CONVERSION)
.replace("\"", "")
);
}
let private_key = PrivateKeyFile::try_from(&manifest_path)?;
let signer_private_key: H256 = private_key.inner.parse()?;
let signer_address = PackedEthSignature::address_from_private_key(&signer_private_key)?;
let wallet_credentials = zksync::WalletCredentials::from_eth_signer(
signer_address,
PrivateKeySigner::new(signer_private_key),
network.into(),
)
.await
.expect(zinc_const::panic::DATA_CONVERSION);
let wallet =
zksync::Wallet::new(zksync::RpcProvider::new(network.into()), wallet_credentials)
.await?;
let initial_transfer = crate::transaction::new_initial(
&wallet,
response.address,
self.change_pubkey_fee_token,
response.change_pubkey_fee,
)
.await?;
let address = response.address;
let response = http_client
.initialize(
zinc_types::InitializeRequestQuery::new(response.address),
zinc_types::InitializeRequestBody::new(initial_transfer),
)
.await?;
if !self.quiet {
eprintln!(" {} {}", "Account ID".bright_green(), response.account_id);
}
Ok(Data::new(address, response.account_id))
}
}
| {
Self {
address,
account_id,
}
} | identifier_body |
publish.rs | //!
//! The Zargo package manager `publish` subcommand.
//!
use std::convert::TryFrom;
use std::path::PathBuf;
use std::str::FromStr;
use colored::Colorize;
use structopt::StructOpt;
use zksync::web3::types::H256;
use zksync_eth_signer::PrivateKeySigner;
use zksync_types::tx::PackedEthSignature;
use crate::error::Error;
use crate::executable::compiler::Compiler;
use crate::executable::virtual_machine::VirtualMachine;
use crate::http::downloader::Downloader;
use crate::http::Client as HttpClient;
use crate::network::Network;
use crate::project::data::input::Input as InputFile;
use crate::project::data::private_key::PrivateKey as PrivateKeyFile;
use crate::project::data::verifying_key::VerifyingKey as VerifyingKeyFile;
use crate::project::data::Directory as DataDirectory;
use crate::project::src::Directory as SourceDirectory;
use crate::project::target::bytecode::Bytecode as BytecodeFile;
use crate::project::target::deps::Directory as TargetDependenciesDirectory;
use crate::project::target::Directory as TargetDirectory;
///
/// The Zargo package manager `publish` subcommand.
///
#[derive(Debug, StructOpt)]
#[structopt(about = "Uploads the smart contract to the specified network")]
pub struct Command {
/// Prints more logs, if passed several times.
#[structopt(short = "v", long = "verbose", parse(from_occurrences))]
pub verbosity: usize,
/// Suppresses output, if set.
#[structopt(short = "q", long = "quiet")]
pub quiet: bool,
/// The path to the Zinc project manifest file.
#[structopt(
long = "manifest-path",
parse(from_os_str),
default_value = "./Zargo.toml"
)]
pub manifest_path: PathBuf,
/// Sets the contract instance name.
#[structopt(long = "instance")]
pub instance: String,
/// Sets the network name, where the contract must be published to.
#[structopt(long = "network", default_value = "localhost")]
pub network: String,
/// Sets the change-pubkey fee token.
#[structopt(long = "change-pubkey-fee-token", default_value = "ETH")]
pub change_pubkey_fee_token: String,
}
///
/// The publish data. Used for testing purposes.
///
pub struct | {
/// The address of the published contract instance.
pub address: zksync_types::Address,
/// The account ID of the published contract instance.
pub account_id: zksync_types::AccountId,
}
impl Data {
///
/// A shortcut constructor.
///
pub fn new(address: zksync_types::Address, account_id: zksync_types::AccountId) -> Self {
Self {
address,
account_id,
}
}
}
impl Command {
///
/// A shortcut constructor.
///
pub fn new(
verbosity: usize,
quiet: bool,
manifest_path: PathBuf,
instance: String,
network: Option<String>,
change_pubkey_fee_token: Option<String>,
) -> Self {
Self {
verbosity,
quiet,
manifest_path,
instance,
network: network
.unwrap_or_else(|| Network::from(zksync::Network::Localhost).to_string()),
change_pubkey_fee_token: change_pubkey_fee_token.unwrap_or_else(|| "ETH".to_owned()),
}
}
///
/// Executes the command.
///
pub async fn execute(self) -> anyhow::Result<Data> {
let network = zksync::Network::from_str(self.network.as_str())
.map(Network::from)
.map_err(Error::NetworkInvalid)?;
let url = network
.try_into_url()
.map_err(Error::NetworkUnimplemented)?;
let http_client = HttpClient::new(url);
let manifest = zinc_project::Manifest::try_from(&self.manifest_path)?;
match manifest.project.r#type {
zinc_project::ProjectType::Contract => {}
_ => anyhow::bail!(Error::NotAContract),
}
let mut manifest_path = self.manifest_path;
if manifest_path.is_file() {
manifest_path.pop();
}
if let zinc_project::ProjectType::Contract = manifest.project.r#type {
if !PrivateKeyFile::exists_at(&manifest_path) {
PrivateKeyFile::default().write_to(&manifest_path)?;
}
}
let source_directory_path = SourceDirectory::path(&manifest_path);
let source =
zinc_project::Source::try_from_path(&source_directory_path, &manifest_path, true)?;
let project = zinc_project::Project::new(manifest.clone(), source);
DataDirectory::create(&manifest_path)?;
let data_directory_path = DataDirectory::path(&manifest_path);
let mut input_path = data_directory_path.clone();
input_path.push(format!(
"{}.{}",
zinc_const::file_name::INPUT,
zinc_const::extension::JSON,
));
let mut proving_key_path = data_directory_path.clone();
proving_key_path.push(zinc_const::file_name::PROVING_KEY);
let mut verifying_key_path = data_directory_path.clone();
verifying_key_path.push(zinc_const::file_name::VERIFYING_KEY.to_owned());
TargetDirectory::create(&manifest_path, true)?;
let target_directory_path = TargetDirectory::path(&manifest_path, true);
let mut binary_path = target_directory_path;
binary_path.push(format!(
"{}.{}",
zinc_const::file_name::BINARY,
zinc_const::extension::BINARY
));
TargetDependenciesDirectory::create(&manifest_path)?;
if let Some(dependencies) = manifest.dependencies {
let network = zksync::Network::from_str(self.network.as_str())
.map(Network::from)
.map_err(Error::NetworkInvalid)?;
let url = network
.try_into_url()
.map_err(Error::NetworkUnimplemented)?;
let http_client = HttpClient::new(url);
let mut downloader = Downloader::new(&http_client, &manifest_path);
downloader.download_dependency_list(dependencies).await?;
}
Compiler::build_release(
self.verbosity,
self.quiet,
manifest.project.name.as_str(),
&manifest.project.version,
&manifest_path,
false,
)?;
let bytecode = BytecodeFile::try_from_path(&binary_path, true)?;
let input = InputFile::try_from_path(&input_path)?;
let arguments = input
.inner
.as_object()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.get("arguments")
.cloned()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.as_object()
.ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))?
.get(zinc_const::contract::CONSTRUCTOR_IDENTIFIER)
.cloned()
.ok_or_else(|| {
Error::MissingInputSection(zinc_const::contract::CONSTRUCTOR_IDENTIFIER.to_owned())
})?;
if !verifying_key_path.exists() {
VirtualMachine::setup_contract(
self.verbosity,
self.quiet,
&binary_path,
zinc_const::contract::CONSTRUCTOR_IDENTIFIER,
&proving_key_path,
&verifying_key_path,
)?;
}
let verifying_key = VerifyingKeyFile::try_from(&verifying_key_path)?;
if !self.quiet {
eprintln!(
" {} the instance `{}` of `{} v{}` to network `{}`",
"Uploading".bright_green(),
self.instance,
manifest.project.name,
manifest.project.version,
network,
);
}
let response = http_client
.publish(
zinc_types::PublishRequestQuery::new(
manifest.project.name,
manifest.project.version,
self.instance,
self.change_pubkey_fee_token.clone(),
),
zinc_types::PublishRequestBody::new(
project,
bytecode.inner,
arguments,
verifying_key.inner,
),
)
.await?;
if !self.quiet {
eprintln!(
" {} {}",
"Address".bright_green(),
serde_json::to_string(&response.address)
.expect(zinc_const::panic::DATA_CONVERSION)
.replace("\"", "")
);
}
let private_key = PrivateKeyFile::try_from(&manifest_path)?;
let signer_private_key: H256 = private_key.inner.parse()?;
let signer_address = PackedEthSignature::address_from_private_key(&signer_private_key)?;
let wallet_credentials = zksync::WalletCredentials::from_eth_signer(
signer_address,
PrivateKeySigner::new(signer_private_key),
network.into(),
)
.await
.expect(zinc_const::panic::DATA_CONVERSION);
let wallet =
zksync::Wallet::new(zksync::RpcProvider::new(network.into()), wallet_credentials)
.await?;
let initial_transfer = crate::transaction::new_initial(
&wallet,
response.address,
self.change_pubkey_fee_token,
response.change_pubkey_fee,
)
.await?;
let address = response.address;
let response = http_client
.initialize(
zinc_types::InitializeRequestQuery::new(response.address),
zinc_types::InitializeRequestBody::new(initial_transfer),
)
.await?;
if !self.quiet {
eprintln!(" {} {}", "Account ID".bright_green(), response.account_id);
}
Ok(Data::new(address, response.account_id))
}
}
| Data | identifier_name |
process_release_data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to download release files or upload new files in INCOMING folder as release files.
It can be run from the command-line.
Requires `githubrelease` package to be installed, installable with ``pip install githubrelease``.
Download SHA256 hashed files to DOWNLOAD folder::
python process_release_data.py download --hashalgo SHA256 --github-token 123123...123
Upload all hashes from INCOMING folder::
python process_release_data.py upload --github-token 123123...123
Show detailed help::
python process_release_data.py -h
"""
import os, sys
import logging
import github_release
from shutil import copyfile
COLUMN_CHECKSUM = 0
COLUMN_FILENAME = 1
COLUMN_FILEDATE = 2
COLUMN_LOCAL_FILENAME = 3
DEFAULT_FILE_DATE_UTC_STRING="2020-01-01T12:00:00.0Z"
def get_hashcmd(hashalgo):
"""Get function that can compute hash for a filename"""
import hashlib
if hashalgo == "MD5":
return lambda filename: hashlib.md5(open(filename, "rb").read()).hexdigest()
elif hashalgo == "SHA224":
return lambda filename: hashlib.sha224(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA256":
return lambda filename: hashlib.sha256(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA384":
return lambda filename: hashlib.sha384(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA512":
return lambda filename: hashlib.sha512(open(filename, "rb").read()).hexdigest()
else:
return None
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def download_fileindex_csv(repo_name, download_dir, hashalgo, github_token=None):
if github_token:
github_release._github_token_cli_arg = github_token
fileindex_csv = os.path.join(download_dir, hashalgo + ".csv")
if os.path.isfile(fileindex_csv):
os.remove(fileindex_csv)
with cd(download_dir):
if not github_release.gh_asset_download(repo_name, hashalgo, hashalgo + ".csv"):
raise ValueError("Failed to download " + hashalgo + ".csv")
return fileindex_csv
def read_fileindex_csv(hashalgo_csv):
fileindex = []
with open(hashalgo_csv, "r") as f:
for line in f:
fields = line.rstrip().split(";")
if len(fields) <= COLUMN_FILEDATE:
fields.append("") # if date is missing then add an empty field
fileindex.append(fields)
return fileindex
def write_fileindex_csv(hashalgo_csv, fileindex):
with open(hashalgo_csv, "wb") as f:
for fileindex_item in fileindex:
fields = [fileindex_item[COLUMN_CHECKSUM], fileindex_item[COLUMN_FILENAME]]
if len(fileindex_item) > COLUMN_FILEDATE:
fields.append(fileindex_item[COLUMN_FILEDATE])
f.write(bytes(";".join(fields) + "\n", "UTF-8"))
def write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo, format=None, include_local_filename=False):
"""Write file index as markdown.
format: list or table
"""
if format is None:
format = "list"
with open(hashalgo_md, "wb") as f:
if format=="table":
header = []
header.append("| FileName | FileDate | " + hashalgo + " |\n")
header.append("|----------|----------|-------------|\n")
if include_local_filename:
header[0] = "| LocalFileName " + header[0]
header[1] = "|---------------" + header[1]
for header_line in header:
f.write(bytes(header_line, "UTF-8"))
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else ""
local_filename = fileindex_item[COLUMN_LOCAL_FILENAME] if len(fileindex_item) > COLUMN_LOCAL_FILENAME else ""
if format=="table":
row = ""
if include_local_filename:
row += "| " + local_filename + " "
row += "| [" + filename + "](https://github.com/" + repo_name + "/releases/download/" + hashalgo + "/" + checksum + ") "
row += "| " + filedate + " "
row += "| " + checksum + " "
f.write(bytes(row + "|\n", "UTF-8",))
else:
f.write(bytes("- [" + filename + "](https://github.com/" + repo_name + "/releases/download/" + hashalgo + "/" + checksum + ")\n", "UTF-8",))
if include_local_filename:
f.write(bytes(" - LocalFileName: " + local_filename + "\n", "UTF-8",))
if filedate:
f.write(bytes(" - FileDate: " + filedate + "\n", "UTF-8",))
f.write(bytes(" - " + hashalgo +": " + checksum + "\n", "UTF-8",))
def get_filedate(filepath):
# Return
import datetime
return datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath)).replace(tzinfo=datetime.timezone.utc)
def set_filedate(filepath, filedate):
stat = os.stat(filepath)
atime = stat.st_atime
os.utime(filepath, (atime, filedate.timestamp()))
def date_to_utc_string(filedate):
"""Convert date object to string in UTC time zone"""
return filedate.isoformat()
def date_from_utc_string(filedate_utc_string):
"""Convert string in UTC time zone to date object"""
# We only accept date in UTC (indicated by +00:00 or Z suffix)
import datetime
try:
date_object = datetime.datetime.strptime(filedate_utc_string, "%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=datetime.timezone.utc)
except ValueError:
date_object = datetime.datetime.strptime(filedate_utc_string, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=datetime.timezone.utc)
return date_object
def download(repo_name, root_dir, download_dir, hashalgo, github_token=None):
"""Download files associated with HASHALGO release into directory (root_dir)/(hashalgo).
List of files is taken from (root_dir)/(hashalgo).csv. If multiple hashes associated with
the same filename then the last entry will be used.
"""
if github_token:
github_release._github_token_cli_arg = github_token
if not os.path.isdir(download_dir):
os.mkdir(download_dir)
hashalgo_dir = os.path.join(root_dir, hashalgo)
if not os.path.isdir(hashalgo_dir):
os.mkdir(hashalgo_dir)
hashalgo_csv = download_fileindex_csv(
repo_name, hashalgo_dir, hashalgo, github_token
)
fileindex = read_fileindex_csv(hashalgo_csv)
logging.debug(hashalgo + ": downloading release assets")
# Find out which filenames are present in multiple versions (need to give them unique names)
filenames = [checksum_filename[1] for checksum_filename in fileindex]
from collections import Counter
# Sort based on filename and filedate
fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))
filenames_counter = Counter(filenames)
# download saves files to current working directory, so we need to temporarily
# change working dir to hashalgo_dir folder
with cd(hashalgo_dir):
fileindex_with_local_filename = []
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else ""
filepath = os.path.join(hashalgo_dir, checksum)
if not os.path.isfile(filepath):
if not github_release.gh_asset_download(repo_name, hashalgo, checksum):
logging.error(
hashalgo
+ ": failed to download "
+ filename
+ " ("
+ checksum
+ ")"
)
continue
logging.debug(
hashalgo + ": downloaded " + filename + " (" + checksum + ")"
)
# determine local filename
if filenames_counter[filename] == 1:
# unique filename
local_filename = filename
else:
# multiple versions of the filename with different content
# add checksum as suffix to distinguish them
local_filename = filename + "." + checksum
local_filepath = os.path.join(download_dir, local_filename)
# set file name and date from index
copyfile(filepath, local_filepath)
set_filedate(local_filepath, date_from_utc_string(filedate if filedate else DEFAULT_FILE_DATE_UTC_STRING))
# save local fileindex
fileindex_with_local_filename.append([checksum, filename, filedate, local_filename])
# Create new hashalgo.csv from existing and incoming files
write_fileindex_csv(hashalgo_csv, fileindex)
hashalgo_local_md = os.path.join(download_dir, hashalgo + "_local.md")
write_fileindex_md(hashalgo_local_md, fileindex_with_local_filename, repo_name, hashalgo, include_local_filename=True)
def upload(repo_name, root_dir, incoming_dir, hashalgo, github_token=None):
"""Upload incoming files associated them with hashalgo release."""
if github_token:
github_release._github_token_cli_arg = github_token
hashcmd = get_hashcmd(hashalgo)
if not hashcmd:
raise ValueError('hashalgo "' + hashalgo + '" not found')
if not os.path.isdir(incoming_dir):
raise ValueError("Missing " + incoming_dir + " directory")
hashalgo_dir = os.path.join(root_dir, hashalgo)
if not os.path.isdir(hashalgo_dir):
os.mkdir(hashalgo_dir)
# Download information about current release
# Get current fileindex
try:
hashalgo_csv = download_fileindex_csv(
repo_name, hashalgo_dir, hashalgo, github_token
)
fileindex = read_fileindex_csv(hashalgo_csv)
except ValueError:
# New release
hashalgo_csv = os.path.join(hashalgo_dir, hashalgo + ".csv")
fileindex = []
# Get list of successfully uploaded assets (to avoid uploading them again)
# and delete partially uploaded ones.
uploaded_assets = (
github_release.get_assets(repo_name, hashalgo) if fileindex else []
)
uploaded_hashes = []
for asset in uploaded_assets:
if asset["state"] == "uploaded":
uploaded_hashes.append(asset["name"])
else:
# Remove asset partially uploaded
github_release.gh_asset_delete(repo_name, hashalgo, asset["name"])
# Update release information with incoming data
# Add incoming files to fileindex and hashalgo_dir
filenames = [
f
for f in os.listdir(incoming_dir)
if os.path.isfile(os.path.join(incoming_dir, f)) and not f.startswith(".")
]
for filename in filenames:
filepath = os.path.join(incoming_dir, filename)
checksum = hashcmd(filepath)
filedate = date_to_utc_string(get_filedate(filepath))
existingItems = [fileindex_item for fileindex_item in fileindex
if fileindex_item[COLUMN_CHECKSUM] == checksum and fileindex_item[COLUMN_FILENAME] == filename]
if not existingItems:
# new item
fileindex.append([checksum, filename, filedate])
# Make sure the hash-named file is present
hashfilepath = os.path.join(hashalgo_dir, checksum)
if not os.path.isfile(hashfilepath):
copyfile(filepath, hashfilepath)
# Create new hashalgo.csv from existing and incoming files | # Upload updated releaes info and new data files
# Create hashalgo release (in case it does not exist)
github_release.gh_release_create(repo_name, hashalgo, publish=True)
# Delete old hashalgo.csv and hashalgo.md
github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + ".csv")
github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + ".md")
# Upload new hashalgo.csv and hashalgo.md
github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_csv)
github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_md)
# Upload new data files
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
if checksum in uploaded_hashes:
# already uploaded
continue
filepath = os.path.join(hashalgo_dir, checksum)
github_release.gh_asset_upload(repo_name, hashalgo, filepath)
# Copy md file content into release notes
with open(hashalgo_md, "r") as file:
release_notes = file.read()
if len(release_notes) > 125000:
note = "Since the release description is > 125000 characters, the corresponding markdown file is instead pushed into the repository."
release_notes = f"See [{hashalgo}.md](https://github.com/{repo_name}/blob/main/{hashalgo}/{hashalgo}.md)\n\n_{note}_"
logging.warning(f"{hashalgo}: {note}")
github_release.gh_release_edit(repo_name, hashalgo, body=release_notes)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Downloads release files or uploads new files in INCOMING folder as release assets."
)
parser.add_argument(
"operation",
help="operation to perform. Valid values: download, upload. Upload adds all files in INCOMING folder. Download gets all files in the .csv index to (hash-algo)-DOWNLOAD folder.",
)
parser.add_argument(
"--hash-algo",
help="hashing algorithm name. If not specified then SHA256 is used. Valid values: MD5, SHA256, SHA224, SHA384, SHA512.",
)
parser.add_argument(
"--github-token",
help="github personal access token. If not specified here then it must be set in GITHUB_TOKEN environment variable.",
)
parser.add_argument(
"--github-repo",
help="github repository (default: Slicer/SlicerTestingData)",
default="Slicer/SlicerTestingData",
)
args = parser.parse_args()
repo_name = args.github_repo
github_token = args.github_token
operation = args.operation
root_dir = os.path.dirname(os.path.realpath(__file__))
if operation == "download":
hashalgo = args.hash_algo if args.hash_algo else "SHA256"
download_dir = os.path.join(root_dir, hashalgo + "-DOWNLOAD")
download(repo_name, root_dir, download_dir, hashalgo, github_token)
elif operation == "upload":
incoming_dir = os.path.join(root_dir, "INCOMING")
hashalgos = [args.hash_algo] if args.hash_algo else ["SHA256"]
for hashalgo in hashalgos:
logging.info("Uploading " + hashalgo)
upload(repo_name, root_dir, incoming_dir, hashalgo, github_token)
else:
parser.print_help()
exit(1) | fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))
write_fileindex_csv(hashalgo_csv, fileindex)
hashalgo_md = os.path.join(root_dir, hashalgo_dir, hashalgo + ".md")
write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo)
| random_line_split |
process_release_data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to download release files or upload new files in INCOMING folder as release files.
It can be run from the command-line.
Requires `githubrelease` package to be installed, installable with ``pip install githubrelease``.
Download SHA256 hashed files to DOWNLOAD folder::
python process_release_data.py download --hashalgo SHA256 --github-token 123123...123
Upload all hashes from INCOMING folder::
python process_release_data.py upload --github-token 123123...123
Show detailed help::
python process_release_data.py -h
"""
import os, sys
import logging
import github_release
from shutil import copyfile
COLUMN_CHECKSUM = 0
COLUMN_FILENAME = 1
COLUMN_FILEDATE = 2
COLUMN_LOCAL_FILENAME = 3
DEFAULT_FILE_DATE_UTC_STRING="2020-01-01T12:00:00.0Z"
def get_hashcmd(hashalgo):
"""Get function that can compute hash for a filename"""
import hashlib
if hashalgo == "MD5":
return lambda filename: hashlib.md5(open(filename, "rb").read()).hexdigest()
elif hashalgo == "SHA224":
return lambda filename: hashlib.sha224(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA256":
return lambda filename: hashlib.sha256(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA384":
return lambda filename: hashlib.sha384(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA512":
return lambda filename: hashlib.sha512(open(filename, "rb").read()).hexdigest()
else:
return None
class | :
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def download_fileindex_csv(repo_name, download_dir, hashalgo, github_token=None):
if github_token:
github_release._github_token_cli_arg = github_token
fileindex_csv = os.path.join(download_dir, hashalgo + ".csv")
if os.path.isfile(fileindex_csv):
os.remove(fileindex_csv)
with cd(download_dir):
if not github_release.gh_asset_download(repo_name, hashalgo, hashalgo + ".csv"):
raise ValueError("Failed to download " + hashalgo + ".csv")
return fileindex_csv
def read_fileindex_csv(hashalgo_csv):
fileindex = []
with open(hashalgo_csv, "r") as f:
for line in f:
fields = line.rstrip().split(";")
if len(fields) <= COLUMN_FILEDATE:
fields.append("") # if date is missing then add an empty field
fileindex.append(fields)
return fileindex
def write_fileindex_csv(hashalgo_csv, fileindex):
with open(hashalgo_csv, "wb") as f:
for fileindex_item in fileindex:
fields = [fileindex_item[COLUMN_CHECKSUM], fileindex_item[COLUMN_FILENAME]]
if len(fileindex_item) > COLUMN_FILEDATE:
fields.append(fileindex_item[COLUMN_FILEDATE])
f.write(bytes(";".join(fields) + "\n", "UTF-8"))
def write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo, format=None, include_local_filename=False):
"""Write file index as markdown.
format: list or table
"""
if format is None:
format = "list"
with open(hashalgo_md, "wb") as f:
if format=="table":
header = []
header.append("| FileName | FileDate | " + hashalgo + " |\n")
header.append("|----------|----------|-------------|\n")
if include_local_filename:
header[0] = "| LocalFileName " + header[0]
header[1] = "|---------------" + header[1]
for header_line in header:
f.write(bytes(header_line, "UTF-8"))
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else ""
local_filename = fileindex_item[COLUMN_LOCAL_FILENAME] if len(fileindex_item) > COLUMN_LOCAL_FILENAME else ""
if format=="table":
row = ""
if include_local_filename:
row += "| " + local_filename + " "
row += "| [" + filename + "](https://github.com/" + repo_name + "/releases/download/" + hashalgo + "/" + checksum + ") "
row += "| " + filedate + " "
row += "| " + checksum + " "
f.write(bytes(row + "|\n", "UTF-8",))
else:
f.write(bytes("- [" + filename + "](https://github.com/" + repo_name + "/releases/download/" + hashalgo + "/" + checksum + ")\n", "UTF-8",))
if include_local_filename:
f.write(bytes(" - LocalFileName: " + local_filename + "\n", "UTF-8",))
if filedate:
f.write(bytes(" - FileDate: " + filedate + "\n", "UTF-8",))
f.write(bytes(" - " + hashalgo +": " + checksum + "\n", "UTF-8",))
def get_filedate(filepath):
# Return
import datetime
return datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath)).replace(tzinfo=datetime.timezone.utc)
def set_filedate(filepath, filedate):
stat = os.stat(filepath)
atime = stat.st_atime
os.utime(filepath, (atime, filedate.timestamp()))
def date_to_utc_string(filedate):
"""Convert date object to string in UTC time zone"""
return filedate.isoformat()
def date_from_utc_string(filedate_utc_string):
"""Convert string in UTC time zone to date object"""
# We only accept date in UTC (indicated by +00:00 or Z suffix)
import datetime
try:
date_object = datetime.datetime.strptime(filedate_utc_string, "%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=datetime.timezone.utc)
except ValueError:
date_object = datetime.datetime.strptime(filedate_utc_string, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=datetime.timezone.utc)
return date_object
def download(repo_name, root_dir, download_dir, hashalgo, github_token=None):
"""Download files associated with HASHALGO release into directory (root_dir)/(hashalgo).
List of files is taken from (root_dir)/(hashalgo).csv. If multiple hashes associated with
the same filename then the last entry will be used.
"""
if github_token:
github_release._github_token_cli_arg = github_token
if not os.path.isdir(download_dir):
os.mkdir(download_dir)
hashalgo_dir = os.path.join(root_dir, hashalgo)
if not os.path.isdir(hashalgo_dir):
os.mkdir(hashalgo_dir)
hashalgo_csv = download_fileindex_csv(
repo_name, hashalgo_dir, hashalgo, github_token
)
fileindex = read_fileindex_csv(hashalgo_csv)
logging.debug(hashalgo + ": downloading release assets")
# Find out which filenames are present in multiple versions (need to give them unique names)
filenames = [checksum_filename[1] for checksum_filename in fileindex]
from collections import Counter
# Sort based on filename and filedate
fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))
filenames_counter = Counter(filenames)
# download saves files to current working directory, so we need to temporarily
# change working dir to hashalgo_dir folder
with cd(hashalgo_dir):
fileindex_with_local_filename = []
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else ""
filepath = os.path.join(hashalgo_dir, checksum)
if not os.path.isfile(filepath):
if not github_release.gh_asset_download(repo_name, hashalgo, checksum):
logging.error(
hashalgo
+ ": failed to download "
+ filename
+ " ("
+ checksum
+ ")"
)
continue
logging.debug(
hashalgo + ": downloaded " + filename + " (" + checksum + ")"
)
# determine local filename
if filenames_counter[filename] == 1:
# unique filename
local_filename = filename
else:
# multiple versions of the filename with different content
# add checksum as suffix to distinguish them
local_filename = filename + "." + checksum
local_filepath = os.path.join(download_dir, local_filename)
# set file name and date from index
copyfile(filepath, local_filepath)
set_filedate(local_filepath, date_from_utc_string(filedate if filedate else DEFAULT_FILE_DATE_UTC_STRING))
# save local fileindex
fileindex_with_local_filename.append([checksum, filename, filedate, local_filename])
# Create new hashalgo.csv from existing and incoming files
write_fileindex_csv(hashalgo_csv, fileindex)
hashalgo_local_md = os.path.join(download_dir, hashalgo + "_local.md")
write_fileindex_md(hashalgo_local_md, fileindex_with_local_filename, repo_name, hashalgo, include_local_filename=True)
def upload(repo_name, root_dir, incoming_dir, hashalgo, github_token=None):
"""Upload incoming files associated them with hashalgo release."""
if github_token:
github_release._github_token_cli_arg = github_token
hashcmd = get_hashcmd(hashalgo)
if not hashcmd:
raise ValueError('hashalgo "' + hashalgo + '" not found')
if not os.path.isdir(incoming_dir):
raise ValueError("Missing " + incoming_dir + " directory")
hashalgo_dir = os.path.join(root_dir, hashalgo)
if not os.path.isdir(hashalgo_dir):
os.mkdir(hashalgo_dir)
# Download information about current release
# Get current fileindex
try:
hashalgo_csv = download_fileindex_csv(
repo_name, hashalgo_dir, hashalgo, github_token
)
fileindex = read_fileindex_csv(hashalgo_csv)
except ValueError:
# New release
hashalgo_csv = os.path.join(hashalgo_dir, hashalgo + ".csv")
fileindex = []
# Get list of successfully uploaded assets (to avoid uploading them again)
# and delete partially uploaded ones.
uploaded_assets = (
github_release.get_assets(repo_name, hashalgo) if fileindex else []
)
uploaded_hashes = []
for asset in uploaded_assets:
if asset["state"] == "uploaded":
uploaded_hashes.append(asset["name"])
else:
# Remove asset partially uploaded
github_release.gh_asset_delete(repo_name, hashalgo, asset["name"])
# Update release information with incoming data
# Add incoming files to fileindex and hashalgo_dir
filenames = [
f
for f in os.listdir(incoming_dir)
if os.path.isfile(os.path.join(incoming_dir, f)) and not f.startswith(".")
]
for filename in filenames:
filepath = os.path.join(incoming_dir, filename)
checksum = hashcmd(filepath)
filedate = date_to_utc_string(get_filedate(filepath))
existingItems = [fileindex_item for fileindex_item in fileindex
if fileindex_item[COLUMN_CHECKSUM] == checksum and fileindex_item[COLUMN_FILENAME] == filename]
if not existingItems:
# new item
fileindex.append([checksum, filename, filedate])
# Make sure the hash-named file is present
hashfilepath = os.path.join(hashalgo_dir, checksum)
if not os.path.isfile(hashfilepath):
copyfile(filepath, hashfilepath)
# Create new hashalgo.csv from existing and incoming files
fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))
write_fileindex_csv(hashalgo_csv, fileindex)
hashalgo_md = os.path.join(root_dir, hashalgo_dir, hashalgo + ".md")
write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo)
# Upload updated releaes info and new data files
# Create hashalgo release (in case it does not exist)
github_release.gh_release_create(repo_name, hashalgo, publish=True)
# Delete old hashalgo.csv and hashalgo.md
github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + ".csv")
github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + ".md")
# Upload new hashalgo.csv and hashalgo.md
github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_csv)
github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_md)
# Upload new data files
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
if checksum in uploaded_hashes:
# already uploaded
continue
filepath = os.path.join(hashalgo_dir, checksum)
github_release.gh_asset_upload(repo_name, hashalgo, filepath)
# Copy md file content into release notes
with open(hashalgo_md, "r") as file:
release_notes = file.read()
if len(release_notes) > 125000:
note = "Since the release description is > 125000 characters, the corresponding markdown file is instead pushed into the repository."
release_notes = f"See [{hashalgo}.md](https://github.com/{repo_name}/blob/main/{hashalgo}/{hashalgo}.md)\n\n_{note}_"
logging.warning(f"{hashalgo}: {note}")
github_release.gh_release_edit(repo_name, hashalgo, body=release_notes)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Downloads release files or uploads new files in INCOMING folder as release assets."
)
parser.add_argument(
"operation",
help="operation to perform. Valid values: download, upload. Upload adds all files in INCOMING folder. Download gets all files in the .csv index to (hash-algo)-DOWNLOAD folder.",
)
parser.add_argument(
"--hash-algo",
help="hashing algorithm name. If not specified then SHA256 is used. Valid values: MD5, SHA256, SHA224, SHA384, SHA512.",
)
parser.add_argument(
"--github-token",
help="github personal access token. If not specified here then it must be set in GITHUB_TOKEN environment variable.",
)
parser.add_argument(
"--github-repo",
help="github repository (default: Slicer/SlicerTestingData)",
default="Slicer/SlicerTestingData",
)
args = parser.parse_args()
repo_name = args.github_repo
github_token = args.github_token
operation = args.operation
root_dir = os.path.dirname(os.path.realpath(__file__))
if operation == "download":
hashalgo = args.hash_algo if args.hash_algo else "SHA256"
download_dir = os.path.join(root_dir, hashalgo + "-DOWNLOAD")
download(repo_name, root_dir, download_dir, hashalgo, github_token)
elif operation == "upload":
incoming_dir = os.path.join(root_dir, "INCOMING")
hashalgos = [args.hash_algo] if args.hash_algo else ["SHA256"]
for hashalgo in hashalgos:
logging.info("Uploading " + hashalgo)
upload(repo_name, root_dir, incoming_dir, hashalgo, github_token)
else:
parser.print_help()
exit(1)
| cd | identifier_name |
process_release_data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to download release files or upload new files in INCOMING folder as release files.
It can be run from the command-line.
Requires `githubrelease` package to be installed, installable with ``pip install githubrelease``.
Download SHA256 hashed files to DOWNLOAD folder::
python process_release_data.py download --hashalgo SHA256 --github-token 123123...123
Upload all hashes from INCOMING folder::
python process_release_data.py upload --github-token 123123...123
Show detailed help::
python process_release_data.py -h
"""
import os, sys
import logging
import github_release
from shutil import copyfile
COLUMN_CHECKSUM = 0
COLUMN_FILENAME = 1
COLUMN_FILEDATE = 2
COLUMN_LOCAL_FILENAME = 3
DEFAULT_FILE_DATE_UTC_STRING="2020-01-01T12:00:00.0Z"
def get_hashcmd(hashalgo):
"""Get function that can compute hash for a filename"""
import hashlib
if hashalgo == "MD5":
return lambda filename: hashlib.md5(open(filename, "rb").read()).hexdigest()
elif hashalgo == "SHA224":
return lambda filename: hashlib.sha224(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA256":
return lambda filename: hashlib.sha256(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA384":
return lambda filename: hashlib.sha384(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA512":
return lambda filename: hashlib.sha512(open(filename, "rb").read()).hexdigest()
else:
return None
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def download_fileindex_csv(repo_name, download_dir, hashalgo, github_token=None):
if github_token:
github_release._github_token_cli_arg = github_token
fileindex_csv = os.path.join(download_dir, hashalgo + ".csv")
if os.path.isfile(fileindex_csv):
os.remove(fileindex_csv)
with cd(download_dir):
if not github_release.gh_asset_download(repo_name, hashalgo, hashalgo + ".csv"):
raise ValueError("Failed to download " + hashalgo + ".csv")
return fileindex_csv
def read_fileindex_csv(hashalgo_csv):
fileindex = []
with open(hashalgo_csv, "r") as f:
for line in f:
fields = line.rstrip().split(";")
if len(fields) <= COLUMN_FILEDATE:
fields.append("") # if date is missing then add an empty field
fileindex.append(fields)
return fileindex
def write_fileindex_csv(hashalgo_csv, fileindex):
with open(hashalgo_csv, "wb") as f:
for fileindex_item in fileindex:
fields = [fileindex_item[COLUMN_CHECKSUM], fileindex_item[COLUMN_FILENAME]]
if len(fileindex_item) > COLUMN_FILEDATE:
fields.append(fileindex_item[COLUMN_FILEDATE])
f.write(bytes(";".join(fields) + "\n", "UTF-8"))
def write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo, format=None, include_local_filename=False):
"""Write file index as markdown.
format: list or table
"""
if format is None:
format = "list"
with open(hashalgo_md, "wb") as f:
if format=="table":
header = []
header.append("| FileName | FileDate | " + hashalgo + " |\n")
header.append("|----------|----------|-------------|\n")
if include_local_filename:
header[0] = "| LocalFileName " + header[0]
header[1] = "|---------------" + header[1]
for header_line in header:
f.write(bytes(header_line, "UTF-8"))
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else ""
local_filename = fileindex_item[COLUMN_LOCAL_FILENAME] if len(fileindex_item) > COLUMN_LOCAL_FILENAME else ""
if format=="table":
row = ""
if include_local_filename:
row += "| " + local_filename + " "
row += "| [" + filename + "](https://github.com/" + repo_name + "/releases/download/" + hashalgo + "/" + checksum + ") "
row += "| " + filedate + " "
row += "| " + checksum + " "
f.write(bytes(row + "|\n", "UTF-8",))
else:
f.write(bytes("- [" + filename + "](https://github.com/" + repo_name + "/releases/download/" + hashalgo + "/" + checksum + ")\n", "UTF-8",))
if include_local_filename:
f.write(bytes(" - LocalFileName: " + local_filename + "\n", "UTF-8",))
if filedate:
f.write(bytes(" - FileDate: " + filedate + "\n", "UTF-8",))
f.write(bytes(" - " + hashalgo +": " + checksum + "\n", "UTF-8",))
def get_filedate(filepath):
# Return
import datetime
return datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath)).replace(tzinfo=datetime.timezone.utc)
def set_filedate(filepath, filedate):
stat = os.stat(filepath)
atime = stat.st_atime
os.utime(filepath, (atime, filedate.timestamp()))
def date_to_utc_string(filedate):
"""Convert date object to string in UTC time zone"""
return filedate.isoformat()
def date_from_utc_string(filedate_utc_string):
"""Convert string in UTC time zone to date object"""
# We only accept date in UTC (indicated by +00:00 or Z suffix)
import datetime
try:
date_object = datetime.datetime.strptime(filedate_utc_string, "%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=datetime.timezone.utc)
except ValueError:
date_object = datetime.datetime.strptime(filedate_utc_string, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=datetime.timezone.utc)
return date_object
def download(repo_name, root_dir, download_dir, hashalgo, github_token=None):
|
def upload(repo_name, root_dir, incoming_dir, hashalgo, github_token=None):
"""Upload incoming files associated them with hashalgo release."""
if github_token:
github_release._github_token_cli_arg = github_token
hashcmd = get_hashcmd(hashalgo)
if not hashcmd:
raise ValueError('hashalgo "' + hashalgo + '" not found')
if not os.path.isdir(incoming_dir):
raise ValueError("Missing " + incoming_dir + " directory")
hashalgo_dir = os.path.join(root_dir, hashalgo)
if not os.path.isdir(hashalgo_dir):
os.mkdir(hashalgo_dir)
# Download information about current release
# Get current fileindex
try:
hashalgo_csv = download_fileindex_csv(
repo_name, hashalgo_dir, hashalgo, github_token
)
fileindex = read_fileindex_csv(hashalgo_csv)
except ValueError:
# New release
hashalgo_csv = os.path.join(hashalgo_dir, hashalgo + ".csv")
fileindex = []
# Get list of successfully uploaded assets (to avoid uploading them again)
# and delete partially uploaded ones.
uploaded_assets = (
github_release.get_assets(repo_name, hashalgo) if fileindex else []
)
uploaded_hashes = []
for asset in uploaded_assets:
if asset["state"] == "uploaded":
uploaded_hashes.append(asset["name"])
else:
# Remove asset partially uploaded
github_release.gh_asset_delete(repo_name, hashalgo, asset["name"])
# Update release information with incoming data
# Add incoming files to fileindex and hashalgo_dir
filenames = [
f
for f in os.listdir(incoming_dir)
if os.path.isfile(os.path.join(incoming_dir, f)) and not f.startswith(".")
]
for filename in filenames:
filepath = os.path.join(incoming_dir, filename)
checksum = hashcmd(filepath)
filedate = date_to_utc_string(get_filedate(filepath))
existingItems = [fileindex_item for fileindex_item in fileindex
if fileindex_item[COLUMN_CHECKSUM] == checksum and fileindex_item[COLUMN_FILENAME] == filename]
if not existingItems:
# new item
fileindex.append([checksum, filename, filedate])
# Make sure the hash-named file is present
hashfilepath = os.path.join(hashalgo_dir, checksum)
if not os.path.isfile(hashfilepath):
copyfile(filepath, hashfilepath)
# Create new hashalgo.csv from existing and incoming files
fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))
write_fileindex_csv(hashalgo_csv, fileindex)
hashalgo_md = os.path.join(root_dir, hashalgo_dir, hashalgo + ".md")
write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo)
# Upload updated releaes info and new data files
# Create hashalgo release (in case it does not exist)
github_release.gh_release_create(repo_name, hashalgo, publish=True)
# Delete old hashalgo.csv and hashalgo.md
github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + ".csv")
github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + ".md")
# Upload new hashalgo.csv and hashalgo.md
github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_csv)
github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_md)
# Upload new data files
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
if checksum in uploaded_hashes:
# already uploaded
continue
filepath = os.path.join(hashalgo_dir, checksum)
github_release.gh_asset_upload(repo_name, hashalgo, filepath)
# Copy md file content into release notes
with open(hashalgo_md, "r") as file:
release_notes = file.read()
if len(release_notes) > 125000:
note = "Since the release description is > 125000 characters, the corresponding markdown file is instead pushed into the repository."
release_notes = f"See [{hashalgo}.md](https://github.com/{repo_name}/blob/main/{hashalgo}/{hashalgo}.md)\n\n_{note}_"
logging.warning(f"{hashalgo}: {note}")
github_release.gh_release_edit(repo_name, hashalgo, body=release_notes)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Downloads release files or uploads new files in INCOMING folder as release assets."
)
parser.add_argument(
"operation",
help="operation to perform. Valid values: download, upload. Upload adds all files in INCOMING folder. Download gets all files in the .csv index to (hash-algo)-DOWNLOAD folder.",
)
parser.add_argument(
"--hash-algo",
help="hashing algorithm name. If not specified then SHA256 is used. Valid values: MD5, SHA256, SHA224, SHA384, SHA512.",
)
parser.add_argument(
"--github-token",
help="github personal access token. If not specified here then it must be set in GITHUB_TOKEN environment variable.",
)
parser.add_argument(
"--github-repo",
help="github repository (default: Slicer/SlicerTestingData)",
default="Slicer/SlicerTestingData",
)
args = parser.parse_args()
repo_name = args.github_repo
github_token = args.github_token
operation = args.operation
root_dir = os.path.dirname(os.path.realpath(__file__))
if operation == "download":
hashalgo = args.hash_algo if args.hash_algo else "SHA256"
download_dir = os.path.join(root_dir, hashalgo + "-DOWNLOAD")
download(repo_name, root_dir, download_dir, hashalgo, github_token)
elif operation == "upload":
incoming_dir = os.path.join(root_dir, "INCOMING")
hashalgos = [args.hash_algo] if args.hash_algo else ["SHA256"]
for hashalgo in hashalgos:
logging.info("Uploading " + hashalgo)
upload(repo_name, root_dir, incoming_dir, hashalgo, github_token)
else:
parser.print_help()
exit(1)
| """Download files associated with HASHALGO release into directory (root_dir)/(hashalgo).
List of files is taken from (root_dir)/(hashalgo).csv. If multiple hashes associated with
the same filename then the last entry will be used.
"""
if github_token:
github_release._github_token_cli_arg = github_token
if not os.path.isdir(download_dir):
os.mkdir(download_dir)
hashalgo_dir = os.path.join(root_dir, hashalgo)
if not os.path.isdir(hashalgo_dir):
os.mkdir(hashalgo_dir)
hashalgo_csv = download_fileindex_csv(
repo_name, hashalgo_dir, hashalgo, github_token
)
fileindex = read_fileindex_csv(hashalgo_csv)
logging.debug(hashalgo + ": downloading release assets")
# Find out which filenames are present in multiple versions (need to give them unique names)
filenames = [checksum_filename[1] for checksum_filename in fileindex]
from collections import Counter
# Sort based on filename and filedate
fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))
filenames_counter = Counter(filenames)
# download saves files to current working directory, so we need to temporarily
# change working dir to hashalgo_dir folder
with cd(hashalgo_dir):
fileindex_with_local_filename = []
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else ""
filepath = os.path.join(hashalgo_dir, checksum)
if not os.path.isfile(filepath):
if not github_release.gh_asset_download(repo_name, hashalgo, checksum):
logging.error(
hashalgo
+ ": failed to download "
+ filename
+ " ("
+ checksum
+ ")"
)
continue
logging.debug(
hashalgo + ": downloaded " + filename + " (" + checksum + ")"
)
# determine local filename
if filenames_counter[filename] == 1:
# unique filename
local_filename = filename
else:
# multiple versions of the filename with different content
# add checksum as suffix to distinguish them
local_filename = filename + "." + checksum
local_filepath = os.path.join(download_dir, local_filename)
# set file name and date from index
copyfile(filepath, local_filepath)
set_filedate(local_filepath, date_from_utc_string(filedate if filedate else DEFAULT_FILE_DATE_UTC_STRING))
# save local fileindex
fileindex_with_local_filename.append([checksum, filename, filedate, local_filename])
# Create new hashalgo.csv from existing and incoming files
write_fileindex_csv(hashalgo_csv, fileindex)
hashalgo_local_md = os.path.join(download_dir, hashalgo + "_local.md")
write_fileindex_md(hashalgo_local_md, fileindex_with_local_filename, repo_name, hashalgo, include_local_filename=True) | identifier_body |
process_release_data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to download release files or upload new files in INCOMING folder as release files.
It can be run from the command-line.
Requires `githubrelease` package to be installed, installable with ``pip install githubrelease``.
Download SHA256 hashed files to DOWNLOAD folder::
python process_release_data.py download --hashalgo SHA256 --github-token 123123...123
Upload all hashes from INCOMING folder::
python process_release_data.py upload --github-token 123123...123
Show detailed help::
python process_release_data.py -h
"""
import os, sys
import logging
import github_release
from shutil import copyfile
COLUMN_CHECKSUM = 0
COLUMN_FILENAME = 1
COLUMN_FILEDATE = 2
COLUMN_LOCAL_FILENAME = 3
DEFAULT_FILE_DATE_UTC_STRING="2020-01-01T12:00:00.0Z"
def get_hashcmd(hashalgo):
"""Get function that can compute hash for a filename"""
import hashlib
if hashalgo == "MD5":
return lambda filename: hashlib.md5(open(filename, "rb").read()).hexdigest()
elif hashalgo == "SHA224":
return lambda filename: hashlib.sha224(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA256":
return lambda filename: hashlib.sha256(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA384":
return lambda filename: hashlib.sha384(open(filename, "rb").read()).hexdigest()
if hashalgo == "SHA512":
return lambda filename: hashlib.sha512(open(filename, "rb").read()).hexdigest()
else:
return None
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def download_fileindex_csv(repo_name, download_dir, hashalgo, github_token=None):
if github_token:
github_release._github_token_cli_arg = github_token
fileindex_csv = os.path.join(download_dir, hashalgo + ".csv")
if os.path.isfile(fileindex_csv):
os.remove(fileindex_csv)
with cd(download_dir):
if not github_release.gh_asset_download(repo_name, hashalgo, hashalgo + ".csv"):
raise ValueError("Failed to download " + hashalgo + ".csv")
return fileindex_csv
def read_fileindex_csv(hashalgo_csv):
fileindex = []
with open(hashalgo_csv, "r") as f:
for line in f:
|
return fileindex
def write_fileindex_csv(hashalgo_csv, fileindex):
with open(hashalgo_csv, "wb") as f:
for fileindex_item in fileindex:
fields = [fileindex_item[COLUMN_CHECKSUM], fileindex_item[COLUMN_FILENAME]]
if len(fileindex_item) > COLUMN_FILEDATE:
fields.append(fileindex_item[COLUMN_FILEDATE])
f.write(bytes(";".join(fields) + "\n", "UTF-8"))
def write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo, format=None, include_local_filename=False):
"""Write file index as markdown.
format: list or table
"""
if format is None:
format = "list"
with open(hashalgo_md, "wb") as f:
if format=="table":
header = []
header.append("| FileName | FileDate | " + hashalgo + " |\n")
header.append("|----------|----------|-------------|\n")
if include_local_filename:
header[0] = "| LocalFileName " + header[0]
header[1] = "|---------------" + header[1]
for header_line in header:
f.write(bytes(header_line, "UTF-8"))
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else ""
local_filename = fileindex_item[COLUMN_LOCAL_FILENAME] if len(fileindex_item) > COLUMN_LOCAL_FILENAME else ""
if format=="table":
row = ""
if include_local_filename:
row += "| " + local_filename + " "
row += "| [" + filename + "](https://github.com/" + repo_name + "/releases/download/" + hashalgo + "/" + checksum + ") "
row += "| " + filedate + " "
row += "| " + checksum + " "
f.write(bytes(row + "|\n", "UTF-8",))
else:
f.write(bytes("- [" + filename + "](https://github.com/" + repo_name + "/releases/download/" + hashalgo + "/" + checksum + ")\n", "UTF-8",))
if include_local_filename:
f.write(bytes(" - LocalFileName: " + local_filename + "\n", "UTF-8",))
if filedate:
f.write(bytes(" - FileDate: " + filedate + "\n", "UTF-8",))
f.write(bytes(" - " + hashalgo +": " + checksum + "\n", "UTF-8",))
def get_filedate(filepath):
# Return
import datetime
return datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath)).replace(tzinfo=datetime.timezone.utc)
def set_filedate(filepath, filedate):
stat = os.stat(filepath)
atime = stat.st_atime
os.utime(filepath, (atime, filedate.timestamp()))
def date_to_utc_string(filedate):
"""Convert date object to string in UTC time zone"""
return filedate.isoformat()
def date_from_utc_string(filedate_utc_string):
"""Convert string in UTC time zone to date object"""
# We only accept date in UTC (indicated by +00:00 or Z suffix)
import datetime
try:
date_object = datetime.datetime.strptime(filedate_utc_string, "%Y-%m-%dT%H:%M:%S.%f+00:00").replace(tzinfo=datetime.timezone.utc)
except ValueError:
date_object = datetime.datetime.strptime(filedate_utc_string, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=datetime.timezone.utc)
return date_object
def download(repo_name, root_dir, download_dir, hashalgo, github_token=None):
"""Download files associated with HASHALGO release into directory (root_dir)/(hashalgo).
List of files is taken from (root_dir)/(hashalgo).csv. If multiple hashes associated with
the same filename then the last entry will be used.
"""
if github_token:
github_release._github_token_cli_arg = github_token
if not os.path.isdir(download_dir):
os.mkdir(download_dir)
hashalgo_dir = os.path.join(root_dir, hashalgo)
if not os.path.isdir(hashalgo_dir):
os.mkdir(hashalgo_dir)
hashalgo_csv = download_fileindex_csv(
repo_name, hashalgo_dir, hashalgo, github_token
)
fileindex = read_fileindex_csv(hashalgo_csv)
logging.debug(hashalgo + ": downloading release assets")
# Find out which filenames are present in multiple versions (need to give them unique names)
filenames = [checksum_filename[1] for checksum_filename in fileindex]
from collections import Counter
# Sort based on filename and filedate
fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))
filenames_counter = Counter(filenames)
# download saves files to current working directory, so we need to temporarily
# change working dir to hashalgo_dir folder
with cd(hashalgo_dir):
fileindex_with_local_filename = []
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else ""
filepath = os.path.join(hashalgo_dir, checksum)
if not os.path.isfile(filepath):
if not github_release.gh_asset_download(repo_name, hashalgo, checksum):
logging.error(
hashalgo
+ ": failed to download "
+ filename
+ " ("
+ checksum
+ ")"
)
continue
logging.debug(
hashalgo + ": downloaded " + filename + " (" + checksum + ")"
)
# determine local filename
if filenames_counter[filename] == 1:
# unique filename
local_filename = filename
else:
# multiple versions of the filename with different content
# add checksum as suffix to distinguish them
local_filename = filename + "." + checksum
local_filepath = os.path.join(download_dir, local_filename)
# set file name and date from index
copyfile(filepath, local_filepath)
set_filedate(local_filepath, date_from_utc_string(filedate if filedate else DEFAULT_FILE_DATE_UTC_STRING))
# save local fileindex
fileindex_with_local_filename.append([checksum, filename, filedate, local_filename])
# Create new hashalgo.csv from existing and incoming files
write_fileindex_csv(hashalgo_csv, fileindex)
hashalgo_local_md = os.path.join(download_dir, hashalgo + "_local.md")
write_fileindex_md(hashalgo_local_md, fileindex_with_local_filename, repo_name, hashalgo, include_local_filename=True)
def upload(repo_name, root_dir, incoming_dir, hashalgo, github_token=None):
"""Upload incoming files associated them with hashalgo release."""
if github_token:
github_release._github_token_cli_arg = github_token
hashcmd = get_hashcmd(hashalgo)
if not hashcmd:
raise ValueError('hashalgo "' + hashalgo + '" not found')
if not os.path.isdir(incoming_dir):
raise ValueError("Missing " + incoming_dir + " directory")
hashalgo_dir = os.path.join(root_dir, hashalgo)
if not os.path.isdir(hashalgo_dir):
os.mkdir(hashalgo_dir)
# Download information about current release
# Get current fileindex
try:
hashalgo_csv = download_fileindex_csv(
repo_name, hashalgo_dir, hashalgo, github_token
)
fileindex = read_fileindex_csv(hashalgo_csv)
except ValueError:
# New release
hashalgo_csv = os.path.join(hashalgo_dir, hashalgo + ".csv")
fileindex = []
# Get list of successfully uploaded assets (to avoid uploading them again)
# and delete partially uploaded ones.
uploaded_assets = (
github_release.get_assets(repo_name, hashalgo) if fileindex else []
)
uploaded_hashes = []
for asset in uploaded_assets:
if asset["state"] == "uploaded":
uploaded_hashes.append(asset["name"])
else:
# Remove asset partially uploaded
github_release.gh_asset_delete(repo_name, hashalgo, asset["name"])
# Update release information with incoming data
# Add incoming files to fileindex and hashalgo_dir
filenames = [
f
for f in os.listdir(incoming_dir)
if os.path.isfile(os.path.join(incoming_dir, f)) and not f.startswith(".")
]
for filename in filenames:
filepath = os.path.join(incoming_dir, filename)
checksum = hashcmd(filepath)
filedate = date_to_utc_string(get_filedate(filepath))
existingItems = [fileindex_item for fileindex_item in fileindex
if fileindex_item[COLUMN_CHECKSUM] == checksum and fileindex_item[COLUMN_FILENAME] == filename]
if not existingItems:
# new item
fileindex.append([checksum, filename, filedate])
# Make sure the hash-named file is present
hashfilepath = os.path.join(hashalgo_dir, checksum)
if not os.path.isfile(hashfilepath):
copyfile(filepath, hashfilepath)
# Create new hashalgo.csv from existing and incoming files
fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))
write_fileindex_csv(hashalgo_csv, fileindex)
hashalgo_md = os.path.join(root_dir, hashalgo_dir, hashalgo + ".md")
write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo)
# Upload updated releaes info and new data files
# Create hashalgo release (in case it does not exist)
github_release.gh_release_create(repo_name, hashalgo, publish=True)
# Delete old hashalgo.csv and hashalgo.md
github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + ".csv")
github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + ".md")
# Upload new hashalgo.csv and hashalgo.md
github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_csv)
github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_md)
# Upload new data files
for fileindex_item in fileindex:
checksum = fileindex_item[COLUMN_CHECKSUM]
filename = fileindex_item[COLUMN_FILENAME]
if checksum in uploaded_hashes:
# already uploaded
continue
filepath = os.path.join(hashalgo_dir, checksum)
github_release.gh_asset_upload(repo_name, hashalgo, filepath)
# Copy md file content into release notes
with open(hashalgo_md, "r") as file:
release_notes = file.read()
if len(release_notes) > 125000:
note = "Since the release description is > 125000 characters, the corresponding markdown file is instead pushed into the repository."
release_notes = f"See [{hashalgo}.md](https://github.com/{repo_name}/blob/main/{hashalgo}/{hashalgo}.md)\n\n_{note}_"
logging.warning(f"{hashalgo}: {note}")
github_release.gh_release_edit(repo_name, hashalgo, body=release_notes)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Downloads release files or uploads new files in INCOMING folder as release assets."
)
parser.add_argument(
"operation",
help="operation to perform. Valid values: download, upload. Upload adds all files in INCOMING folder. Download gets all files in the .csv index to (hash-algo)-DOWNLOAD folder.",
)
parser.add_argument(
"--hash-algo",
help="hashing algorithm name. If not specified then SHA256 is used. Valid values: MD5, SHA256, SHA224, SHA384, SHA512.",
)
parser.add_argument(
"--github-token",
help="github personal access token. If not specified here then it must be set in GITHUB_TOKEN environment variable.",
)
parser.add_argument(
"--github-repo",
help="github repository (default: Slicer/SlicerTestingData)",
default="Slicer/SlicerTestingData",
)
args = parser.parse_args()
repo_name = args.github_repo
github_token = args.github_token
operation = args.operation
root_dir = os.path.dirname(os.path.realpath(__file__))
if operation == "download":
hashalgo = args.hash_algo if args.hash_algo else "SHA256"
download_dir = os.path.join(root_dir, hashalgo + "-DOWNLOAD")
download(repo_name, root_dir, download_dir, hashalgo, github_token)
elif operation == "upload":
incoming_dir = os.path.join(root_dir, "INCOMING")
hashalgos = [args.hash_algo] if args.hash_algo else ["SHA256"]
for hashalgo in hashalgos:
logging.info("Uploading " + hashalgo)
upload(repo_name, root_dir, incoming_dir, hashalgo, github_token)
else:
parser.print_help()
exit(1)
| fields = line.rstrip().split(";")
if len(fields) <= COLUMN_FILEDATE:
fields.append("") # if date is missing then add an empty field
fileindex.append(fields) | conditional_block |
model.py | """
Most of the codes are borrowed from
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py
"""
import copy
import json
import math
import re
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
############## activation functions ##############
##################################################
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {
'relu': nn.ReLU,
'swish': swish,
'gelu': gelu
}
############## the main model ####################
##################################################
class LMModel(nn.Module):
""" Transformer with language model head only """
def __init__(self, cfg, n_vocab, n_special, n_ctx):
super(LMModel, self).__init__()
n_indexs = n_vocab + n_special + n_ctx
self.transformer = TransformerModel(cfg, n_indexs, n_ctx)
self.lm_head = LMHead(self.transformer, cfg, n_vocab)
def forward(self, x, pre_sts=None, last_only=False):
"""
:param x: the input sequence
:param pre_sts: to prevent recalculation. the prefix sequence's hidden states and Q,V saved in previous turn.
:return: logits of vocab words
"""
sts = self.transformer(x, pre_sts)
lm_logits = self.lm_head(sts[-1][0], last_only)
return lm_logits, sts
############## components ########################
##################################################
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, cfg, n_vocab):
super(LMHead, self).__init__()
self.n_embd = cfg.n_embd
embed_shape = model.embed.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.embed.weight # Tied weights
self.n_decoding_vocab = n_vocab + 2 # and SOS, EOS
def forward(self, h, last_only=False):
if last_only:
h = h[:, -1:, :]
lm_logits = self.decoder(h)
return lm_logits[:, :, :self.n_decoding_vocab]
class TransformerModel(nn.Module):
""" Transformer model """
def __init__(self, cfg, n_indexs, n_ctx):
super(TransformerModel, self).__init__()
self.embed = nn.Embedding(n_indexs, cfg.n_embd)
self.drop = nn.Dropout(cfg.embd_pdrop)
block = Block(n_ctx, cfg, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)])
nn.init.normal_(self.embed.weight, std=0.02)
def forward(self, x, pre_sts=None):
# for training mode
|
class Block(nn.Module):
def __init__(self, n_ctx, cfg, scale=False):
super(Block, self).__init__()
nx = cfg.n_embd
self.attn = Attention(nx, n_ctx, cfg, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, cfg)
self.ln_2 = LayerNorm(nx)
def forward(self, x, pre_h=None, pre_key=None, pre_value=None):
l_needed = x.shape[1] if pre_h is None else x.shape[1] - pre_h.shape[1]
x_needed = x[:, -l_needed:, :]
a, key, value = self.attn(x_needed, pre_key, pre_value)
n = self.ln_1(x_needed + a)
m = self.mlp(n)
h = self.ln_2(n + m)
if pre_h is not None:
h = torch.cat([pre_h, h], dim=1)
return h, key, value
class LayerNorm(nn.Module):
"Construct a layernorm module in the OpenAI style (epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % cfg.n_head == 0
self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.attn_pdrop)
self.resid_dropout = nn.Dropout(cfg.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.b[:, :, (w.size(-1)-w.size(-2)):w.size(-1), :w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, pre_key=None, pre_value=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
# prevent recalculation
if pre_key is not None and pre_value is not None:
key = torch.cat([pre_key, key], dim=-1)
value = torch.cat([pre_value, value], dim=-2)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a, key, value
class MLP(nn.Module):
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = cfg.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[cfg.afn]
self.dropout = nn.Dropout(cfg.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
############## pretrained model loader ###########
##################################################
def load_openai_pretrained_model(model, cfg, n_special, dir):
"""
load the pretrained OPENAI transformer language model parameters
:param model: Transformer model
:param cfg:
:param n_special: the number of special tokens
:param dir:
:return:
"""
n_ctx = cfg.n_ctx
n_embd = cfg.n_embd
n_transfer = cfg.n_layer
# Load weights from TF model
print("Loading weights...")
names = json.load(open(dir + 'parameters_names.json'))
shapes = json.load(open(dir + 'params_shapes.json'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(dir + 'params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
if n_ctx > 0:
init_params[0] = init_params[0][:n_ctx]
if n_special > 0:
init_params[0] = np.concatenate(
[init_params[1],
(np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
init_params[0]
], 0)
else:
init_params[0] = np.concatenate(
[init_params[1],
init_params[0]
], 0)
del init_params[1]
if n_transfer == -1:
n_transfer = 0
else:
n_transfer = 1 + n_transfer * 12
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.embed.weight.shape, init_params[0].shape)
raise
model.embed.weight.data = torch.from_numpy(init_params[0])
for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == ip.shape
except AssertionError as e:
e.args += (pointer.shape, ip.shape)
raise
pointer.data = torch.from_numpy(ip)
| if pre_sts is None:
e = self.drop(self.embed(x))
# Add the position information to the input embeddings
h = e.sum(dim=2)
# for eval mode
else:
# get newly added words' embeddings
prev_len = pre_sts[0].size(1)
e_new = self.drop(self.embed(x[:, prev_len:, :]))
h_new = e_new.sum(dim=2)
h = torch.cat([pre_sts[0], h_new], dim=1)
# record the output hidden states of each layer
sts = []
sts.append(h)
for i, block in enumerate(self.h):
# pre_h the prefix sequence's hidden states of the current layer
pre_h, pre_k, pre_v = pre_sts[i+1] if pre_sts is not None else (None, None, None)
h, k, v = block(h, pre_h, pre_k, pre_v)
sts.append((h, k, v))
return sts | identifier_body |
model.py | """
Most of the codes are borrowed from
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py
"""
import copy
import json
import math
import re
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
############## activation functions ##############
##################################################
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {
'relu': nn.ReLU,
'swish': swish,
'gelu': gelu
}
############## the main model ####################
##################################################
class LMModel(nn.Module):
""" Transformer with language model head only """
def __init__(self, cfg, n_vocab, n_special, n_ctx):
super(LMModel, self).__init__()
n_indexs = n_vocab + n_special + n_ctx
self.transformer = TransformerModel(cfg, n_indexs, n_ctx)
self.lm_head = LMHead(self.transformer, cfg, n_vocab)
def forward(self, x, pre_sts=None, last_only=False):
"""
:param x: the input sequence
:param pre_sts: to prevent recalculation. the prefix sequence's hidden states and Q,V saved in previous turn.
:return: logits of vocab words
"""
sts = self.transformer(x, pre_sts)
lm_logits = self.lm_head(sts[-1][0], last_only)
return lm_logits, sts
############## components ########################
##################################################
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, cfg, n_vocab):
super(LMHead, self).__init__()
self.n_embd = cfg.n_embd
embed_shape = model.embed.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.embed.weight # Tied weights
self.n_decoding_vocab = n_vocab + 2 # and SOS, EOS
def forward(self, h, last_only=False):
if last_only:
h = h[:, -1:, :]
lm_logits = self.decoder(h)
return lm_logits[:, :, :self.n_decoding_vocab]
class TransformerModel(nn.Module):
""" Transformer model """
def __init__(self, cfg, n_indexs, n_ctx):
super(TransformerModel, self).__init__()
self.embed = nn.Embedding(n_indexs, cfg.n_embd)
self.drop = nn.Dropout(cfg.embd_pdrop)
block = Block(n_ctx, cfg, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)])
nn.init.normal_(self.embed.weight, std=0.02)
def forward(self, x, pre_sts=None):
# for training mode
if pre_sts is None:
e = self.drop(self.embed(x))
# Add the position information to the input embeddings
h = e.sum(dim=2)
# for eval mode
else:
# get newly added words' embeddings
prev_len = pre_sts[0].size(1)
e_new = self.drop(self.embed(x[:, prev_len:, :]))
h_new = e_new.sum(dim=2)
h = torch.cat([pre_sts[0], h_new], dim=1)
# record the output hidden states of each layer
sts = []
sts.append(h)
for i, block in enumerate(self.h):
# pre_h the prefix sequence's hidden states of the current layer
pre_h, pre_k, pre_v = pre_sts[i+1] if pre_sts is not None else (None, None, None)
h, k, v = block(h, pre_h, pre_k, pre_v)
sts.append((h, k, v))
return sts
class Block(nn.Module):
def __init__(self, n_ctx, cfg, scale=False):
super(Block, self).__init__()
nx = cfg.n_embd
self.attn = Attention(nx, n_ctx, cfg, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, cfg)
self.ln_2 = LayerNorm(nx)
def forward(self, x, pre_h=None, pre_key=None, pre_value=None):
l_needed = x.shape[1] if pre_h is None else x.shape[1] - pre_h.shape[1]
x_needed = x[:, -l_needed:, :]
a, key, value = self.attn(x_needed, pre_key, pre_value)
n = self.ln_1(x_needed + a)
m = self.mlp(n)
h = self.ln_2(n + m)
if pre_h is not None:
h = torch.cat([pre_h, h], dim=1)
return h, key, value
class LayerNorm(nn.Module):
"Construct a layernorm module in the OpenAI style (epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x): | size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % cfg.n_head == 0
self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.attn_pdrop)
self.resid_dropout = nn.Dropout(cfg.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.b[:, :, (w.size(-1)-w.size(-2)):w.size(-1), :w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, pre_key=None, pre_value=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
# prevent recalculation
if pre_key is not None and pre_value is not None:
key = torch.cat([pre_key, key], dim=-1)
value = torch.cat([pre_value, value], dim=-2)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a, key, value
class MLP(nn.Module):
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = cfg.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[cfg.afn]
self.dropout = nn.Dropout(cfg.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
############## pretrained model loader ###########
##################################################
def load_openai_pretrained_model(model, cfg, n_special, dir):
"""
load the pretrained OPENAI transformer language model parameters
:param model: Transformer model
:param cfg:
:param n_special: the number of special tokens
:param dir:
:return:
"""
n_ctx = cfg.n_ctx
n_embd = cfg.n_embd
n_transfer = cfg.n_layer
# Load weights from TF model
print("Loading weights...")
names = json.load(open(dir + 'parameters_names.json'))
shapes = json.load(open(dir + 'params_shapes.json'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(dir + 'params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
if n_ctx > 0:
init_params[0] = init_params[0][:n_ctx]
if n_special > 0:
init_params[0] = np.concatenate(
[init_params[1],
(np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
init_params[0]
], 0)
else:
init_params[0] = np.concatenate(
[init_params[1],
init_params[0]
], 0)
del init_params[1]
if n_transfer == -1:
n_transfer = 0
else:
n_transfer = 1 + n_transfer * 12
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.embed.weight.shape, init_params[0].shape)
raise
model.embed.weight.data = torch.from_numpy(init_params[0])
for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == ip.shape
except AssertionError as e:
e.args += (pointer.shape, ip.shape)
raise
pointer.data = torch.from_numpy(ip) | if self.rf == 1: | random_line_split |
model.py | """
Most of the codes are borrowed from
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py
"""
import copy
import json
import math
import re
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
############## activation functions ##############
##################################################
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {
'relu': nn.ReLU,
'swish': swish,
'gelu': gelu
}
############## the main model ####################
##################################################
class LMModel(nn.Module):
""" Transformer with language model head only """
def __init__(self, cfg, n_vocab, n_special, n_ctx):
super(LMModel, self).__init__()
n_indexs = n_vocab + n_special + n_ctx
self.transformer = TransformerModel(cfg, n_indexs, n_ctx)
self.lm_head = LMHead(self.transformer, cfg, n_vocab)
def forward(self, x, pre_sts=None, last_only=False):
"""
:param x: the input sequence
:param pre_sts: to prevent recalculation. the prefix sequence's hidden states and Q,V saved in previous turn.
:return: logits of vocab words
"""
sts = self.transformer(x, pre_sts)
lm_logits = self.lm_head(sts[-1][0], last_only)
return lm_logits, sts
############## components ########################
##################################################
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, cfg, n_vocab):
super(LMHead, self).__init__()
self.n_embd = cfg.n_embd
embed_shape = model.embed.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.embed.weight # Tied weights
self.n_decoding_vocab = n_vocab + 2 # and SOS, EOS
def forward(self, h, last_only=False):
if last_only:
h = h[:, -1:, :]
lm_logits = self.decoder(h)
return lm_logits[:, :, :self.n_decoding_vocab]
class TransformerModel(nn.Module):
""" Transformer model """
def __init__(self, cfg, n_indexs, n_ctx):
super(TransformerModel, self).__init__()
self.embed = nn.Embedding(n_indexs, cfg.n_embd)
self.drop = nn.Dropout(cfg.embd_pdrop)
block = Block(n_ctx, cfg, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)])
nn.init.normal_(self.embed.weight, std=0.02)
def forward(self, x, pre_sts=None):
# for training mode
if pre_sts is None:
e = self.drop(self.embed(x))
# Add the position information to the input embeddings
h = e.sum(dim=2)
# for eval mode
else:
# get newly added words' embeddings
prev_len = pre_sts[0].size(1)
e_new = self.drop(self.embed(x[:, prev_len:, :]))
h_new = e_new.sum(dim=2)
h = torch.cat([pre_sts[0], h_new], dim=1)
# record the output hidden states of each layer
sts = []
sts.append(h)
for i, block in enumerate(self.h):
# pre_h the prefix sequence's hidden states of the current layer
pre_h, pre_k, pre_v = pre_sts[i+1] if pre_sts is not None else (None, None, None)
h, k, v = block(h, pre_h, pre_k, pre_v)
sts.append((h, k, v))
return sts
class Block(nn.Module):
def __init__(self, n_ctx, cfg, scale=False):
super(Block, self).__init__()
nx = cfg.n_embd
self.attn = Attention(nx, n_ctx, cfg, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, cfg)
self.ln_2 = LayerNorm(nx)
def forward(self, x, pre_h=None, pre_key=None, pre_value=None):
l_needed = x.shape[1] if pre_h is None else x.shape[1] - pre_h.shape[1]
x_needed = x[:, -l_needed:, :]
a, key, value = self.attn(x_needed, pre_key, pre_value)
n = self.ln_1(x_needed + a)
m = self.mlp(n)
h = self.ln_2(n + m)
if pre_h is not None:
h = torch.cat([pre_h, h], dim=1)
return h, key, value
class LayerNorm(nn.Module):
"Construct a layernorm module in the OpenAI style (epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
|
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % cfg.n_head == 0
self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.attn_pdrop)
self.resid_dropout = nn.Dropout(cfg.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.b[:, :, (w.size(-1)-w.size(-2)):w.size(-1), :w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, pre_key=None, pre_value=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
# prevent recalculation
if pre_key is not None and pre_value is not None:
key = torch.cat([pre_key, key], dim=-1)
value = torch.cat([pre_value, value], dim=-2)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a, key, value
class MLP(nn.Module):
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = cfg.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[cfg.afn]
self.dropout = nn.Dropout(cfg.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
############## pretrained model loader ###########
##################################################
def load_openai_pretrained_model(model, cfg, n_special, dir):
"""
load the pretrained OPENAI transformer language model parameters
:param model: Transformer model
:param cfg:
:param n_special: the number of special tokens
:param dir:
:return:
"""
n_ctx = cfg.n_ctx
n_embd = cfg.n_embd
n_transfer = cfg.n_layer
# Load weights from TF model
print("Loading weights...")
names = json.load(open(dir + 'parameters_names.json'))
shapes = json.load(open(dir + 'params_shapes.json'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(dir + 'params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
if n_ctx > 0:
init_params[0] = init_params[0][:n_ctx]
if n_special > 0:
init_params[0] = np.concatenate(
[init_params[1],
(np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
init_params[0]
], 0)
else:
init_params[0] = np.concatenate(
[init_params[1],
init_params[0]
], 0)
del init_params[1]
if n_transfer == -1:
n_transfer = 0
else:
n_transfer = 1 + n_transfer * 12
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.embed.weight.shape, init_params[0].shape)
raise
model.embed.weight.data = torch.from_numpy(init_params[0])
for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == ip.shape
except AssertionError as e:
e.args += (pointer.shape, ip.shape)
raise
pointer.data = torch.from_numpy(ip)
| size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out) | conditional_block |
model.py | """
Most of the codes are borrowed from
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py
"""
import copy
import json
import math
import re
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
############## activation functions ##############
##################################################
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {
'relu': nn.ReLU,
'swish': swish,
'gelu': gelu
}
############## the main model ####################
##################################################
class LMModel(nn.Module):
""" Transformer with language model head only """
def __init__(self, cfg, n_vocab, n_special, n_ctx):
super(LMModel, self).__init__()
n_indexs = n_vocab + n_special + n_ctx
self.transformer = TransformerModel(cfg, n_indexs, n_ctx)
self.lm_head = LMHead(self.transformer, cfg, n_vocab)
def forward(self, x, pre_sts=None, last_only=False):
"""
:param x: the input sequence
:param pre_sts: to prevent recalculation. the prefix sequence's hidden states and Q,V saved in previous turn.
:return: logits of vocab words
"""
sts = self.transformer(x, pre_sts)
lm_logits = self.lm_head(sts[-1][0], last_only)
return lm_logits, sts
############## components ########################
##################################################
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, cfg, n_vocab):
super(LMHead, self).__init__()
self.n_embd = cfg.n_embd
embed_shape = model.embed.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.embed.weight # Tied weights
self.n_decoding_vocab = n_vocab + 2 # and SOS, EOS
def forward(self, h, last_only=False):
if last_only:
h = h[:, -1:, :]
lm_logits = self.decoder(h)
return lm_logits[:, :, :self.n_decoding_vocab]
class TransformerModel(nn.Module):
""" Transformer model """
def __init__(self, cfg, n_indexs, n_ctx):
super(TransformerModel, self).__init__()
self.embed = nn.Embedding(n_indexs, cfg.n_embd)
self.drop = nn.Dropout(cfg.embd_pdrop)
block = Block(n_ctx, cfg, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)])
nn.init.normal_(self.embed.weight, std=0.02)
def forward(self, x, pre_sts=None):
# for training mode
if pre_sts is None:
e = self.drop(self.embed(x))
# Add the position information to the input embeddings
h = e.sum(dim=2)
# for eval mode
else:
# get newly added words' embeddings
prev_len = pre_sts[0].size(1)
e_new = self.drop(self.embed(x[:, prev_len:, :]))
h_new = e_new.sum(dim=2)
h = torch.cat([pre_sts[0], h_new], dim=1)
# record the output hidden states of each layer
sts = []
sts.append(h)
for i, block in enumerate(self.h):
# pre_h the prefix sequence's hidden states of the current layer
pre_h, pre_k, pre_v = pre_sts[i+1] if pre_sts is not None else (None, None, None)
h, k, v = block(h, pre_h, pre_k, pre_v)
sts.append((h, k, v))
return sts
class Block(nn.Module):
def __init__(self, n_ctx, cfg, scale=False):
super(Block, self).__init__()
nx = cfg.n_embd
self.attn = Attention(nx, n_ctx, cfg, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, cfg)
self.ln_2 = LayerNorm(nx)
def forward(self, x, pre_h=None, pre_key=None, pre_value=None):
l_needed = x.shape[1] if pre_h is None else x.shape[1] - pre_h.shape[1]
x_needed = x[:, -l_needed:, :]
a, key, value = self.attn(x_needed, pre_key, pre_value)
n = self.ln_1(x_needed + a)
m = self.mlp(n)
h = self.ln_2(n + m)
if pre_h is not None:
h = torch.cat([pre_h, h], dim=1)
return h, key, value
class LayerNorm(nn.Module):
"Construct a layernorm module in the OpenAI style (epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % cfg.n_head == 0
self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.attn_pdrop)
self.resid_dropout = nn.Dropout(cfg.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.b[:, :, (w.size(-1)-w.size(-2)):w.size(-1), :w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def | (self, x, pre_key=None, pre_value=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
# prevent recalculation
if pre_key is not None and pre_value is not None:
key = torch.cat([pre_key, key], dim=-1)
value = torch.cat([pre_value, value], dim=-2)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a, key, value
class MLP(nn.Module):
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = cfg.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[cfg.afn]
self.dropout = nn.Dropout(cfg.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
############## pretrained model loader ###########
##################################################
def load_openai_pretrained_model(model, cfg, n_special, dir):
"""
load the pretrained OPENAI transformer language model parameters
:param model: Transformer model
:param cfg:
:param n_special: the number of special tokens
:param dir:
:return:
"""
n_ctx = cfg.n_ctx
n_embd = cfg.n_embd
n_transfer = cfg.n_layer
# Load weights from TF model
print("Loading weights...")
names = json.load(open(dir + 'parameters_names.json'))
shapes = json.load(open(dir + 'params_shapes.json'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(dir + 'params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
if n_ctx > 0:
init_params[0] = init_params[0][:n_ctx]
if n_special > 0:
init_params[0] = np.concatenate(
[init_params[1],
(np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
init_params[0]
], 0)
else:
init_params[0] = np.concatenate(
[init_params[1],
init_params[0]
], 0)
del init_params[1]
if n_transfer == -1:
n_transfer = 0
else:
n_transfer = 1 + n_transfer * 12
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.embed.weight.shape, init_params[0].shape)
raise
model.embed.weight.data = torch.from_numpy(init_params[0])
for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == ip.shape
except AssertionError as e:
e.args += (pointer.shape, ip.shape)
raise
pointer.data = torch.from_numpy(ip)
| forward | identifier_name |
manager.rs | use std::rc::Rc;
use std::cell::RefCell;
use std::path::{Path,PathBuf};
use std::fs;
use std::collections::HashMap;
use std::io::Write;
use Realm;
use Result;
use Systemd;
use RealmSymlinks;
use NetworkConfig;
use util::*;
const REALMS_BASE_PATH: &str = "/realms";
pub struct RealmManager {
/// Map from realm name -> realm
realm_map: HashMap<String, Realm>,
/// Sorted for 'list'
realm_list: Vec<Realm>,
/// track status of 'current' and 'default' symlinks
symlinks: Rc<RefCell<RealmSymlinks>>,
/// finds free ip addresses to use
network: Rc<RefCell<NetworkConfig>>,
/// interface to systemd
systemd: Systemd,
}
impl RealmManager {
fn new() -> Result<RealmManager> |
fn create_network_config() -> Result<Rc<RefCell<NetworkConfig>>> {
let mut network = NetworkConfig::new();
network.add_bridge("clear", "172.17.0.0/24")?;
Ok(Rc::new(RefCell::new(network)))
}
pub fn load() -> Result<RealmManager> {
let mut manager = RealmManager::new()?;
manager.symlinks.borrow_mut().load_symlinks()?;
if ! PathBuf::from(REALMS_BASE_PATH).exists() {
bail!("realms base directory {} does not exist", REALMS_BASE_PATH);
}
for dent in fs::read_dir(REALMS_BASE_PATH)? {
let path = dent?.path();
manager.process_realm_path(&path)
.map_err(|e| format_err!("error processing entry {} in realm base dir: {}", path.display(), e))?;
}
manager.realm_list.sort_unstable();
Ok(manager)
}
///
/// Process `path` as an entry from the base realms directory and
/// if `path` is a directory, and directory name has prefix "realm-"
/// extract chars after prefix as realm name and add a new `Realm`
/// instance
///
fn process_realm_path(&mut self, path: &Path) -> Result<()> {
let meta = path.symlink_metadata()?;
if !meta.is_dir() {
return Ok(())
}
let fname = path_filename(path);
if !fname.starts_with("realm-") {
return Ok(())
}
let (_, realm_name) = fname.split_at(6);
if !is_valid_realm_name(realm_name) {
warn!("ignoring directory in realm storage which has invalid realm name: {}", realm_name);
return Ok(())
}
let rootfs = path.join("rootfs");
if !rootfs.exists() {
warn!("realm directory {} does not have a rootfs, ignoring", path.display());
return Ok(())
}
match Realm::new(realm_name, self.symlinks.clone(), self.network.clone()) {
Ok(realm) => { self.add_realm_entry(realm);} ,
Err(e) => warn!("Ignoring '{}': {}", realm_name, e),
};
Ok(())
}
fn add_realm_entry(&mut self, realm: Realm) -> &Realm {
self.realm_map.insert(realm.name().to_owned(), realm.clone());
self.realm_list.push(realm.clone());
self.realm_map.get(realm.name()).expect("cannot find realm we just added to map")
}
fn remove_realm_entry(&mut self, name: &str) -> Result<()> {
self.realm_map.remove(name);
let list = self.realm_list.clone();
let mut have_default = false;
self.realm_list.clear();
for realm in list {
if realm.name() != name {
if realm.is_default() {
have_default = true;
}
self.realm_list.push(realm);
}
}
if !have_default && !self.realm_list.is_empty() {
self.symlinks.borrow_mut().set_default_symlink(self.realm_list[0].name())?;
}
Ok(())
}
pub fn current_realm_name(&self) -> Option<String> {
self.symlinks.borrow().current()
}
pub fn default_realm_name(&self) -> Option<String> {
self.symlinks.borrow().default()
}
///
/// Execute shell in a realm. If `realm_name` is `None` then exec
/// shell in current realm, otherwise look up realm by name.
///
/// If `root_shell` is true, open a root shell, otherwise open
/// a user (uid = 1000) shell.
///
pub fn launch_shell(&self, realm_name: Option<&str>, root_shell: bool) -> Result<()> {
let run_shell = |realm: &Realm| {
info!("opening shell in realm '{}'", realm.name());
realm.exec_shell(root_shell)?;
info!("exiting shell in realm '{}'", realm.name());
Ok(())
};
if let Some(name) = realm_name {
self.with_named_realm(name, true, run_shell)
} else {
self.with_current_realm(run_shell)
}
}
pub fn launch_terminal(&self, name: Option<&str>) -> Result<()> {
let run_terminal = |realm: &Realm| {
info!("opening terminal in realm '{}'", realm.name());
let title_arg = format!("Realm: {}", realm.name());
realm.run(&["/usr/bin/gnome-terminal".to_owned(), "--title".to_owned(), title_arg], true)
};
if let Some(name) = name {
self.with_named_realm(name, true, run_terminal)
} else {
self.with_current_realm(run_terminal)
}
}
pub fn run_in_realm(&self, realm_name: Option<&str>, args: &[String], use_launcher: bool) -> Result<()> {
if let Some(name) = realm_name {
self.with_named_realm(name, true, |realm| realm.run(args, use_launcher))
} else {
self.with_current_realm(|realm| realm.run(args, use_launcher))
}
}
fn with_current_realm<F: Fn(&Realm)->Result<()>>(&self, f: F) -> Result<()> {
match self.symlinks.borrow().current() {
Some(ref name) => {
self.with_named_realm(name, false, f)?;
},
None => {
warn!("No current realm instance to run command in");
}
}
Ok(())
}
fn with_named_realm<F: Fn(&Realm)->Result<()>>(&self, name: &str, want_start: bool, f: F) -> Result<()> {
match self.realm(name) {
Some(realm) => {
if want_start && !realm.is_running()? {
info!("realm '{}' is not running, starting it.", realm.name());
self.start_realm(realm)?;
}
f(realm)
},
None => bail!("no realm with name '{}' exists", name),
}
}
pub fn list(&self) -> Result<()> {
let mut out = ColoredOutput::new();
self.print_realm_header(&mut out);
for realm in &self.realm_list {
self.print_realm(realm, &mut out)?;
}
Ok(())
}
fn print_realm_header(&self, out: &mut ColoredOutput) {
out.write(" REALMS ").bold("bold").write(": current, ").bright("colored")
.write(": running, (default) starts on boot\n").write(" ------\n\n");
}
fn print_realm(&self, realm: &Realm, out: &mut ColoredOutput) -> Result<()> {
let name = format!("{:12}", realm.name());
if realm.is_current() {
out.write(" > ").bold(&name);
} else if realm.is_running()? {
out.write(" ").bright(&name);
} else {
out.write(" ").dim(&name);
}
if realm.is_default() {
out.write(" (default)");
}
out.write("\n");
Ok(())
}
pub fn start_default(&mut self) -> Result<()> {
let default = self.symlinks.borrow().default();
if let Some(ref realm_name) = default {
self.start_named_realm(realm_name)?;
return Ok(());
}
bail!("No default realm to start");
}
pub fn start_named_realm(&mut self, realm_name: &str) -> Result<()> {
info!("starting realm '{}'", realm_name);
self.with_named_realm(realm_name, false, |realm| self.start_realm(realm))
}
fn start_realm(&self, realm: &Realm) -> Result<()> {
let mut symlinks = self.symlinks.borrow_mut();
let no_current_realm = symlinks.current().is_none();
// no realm is current, so make this realm the current one
// service file for realm will also start desktopd, so this symlink
// must be created before launching realm.
if no_current_realm {
symlinks.set_current_symlink(Some(realm.name()))?;
}
if let Err(e) = realm.start() {
if no_current_realm {
// oops realm failed to start, need to reset symlink we changed
symlinks.set_current_symlink(None)?;
}
return Err(e);
}
Ok(())
}
pub fn stop_realm(&mut self, name: &str) -> Result<()> {
match self.realm_map.get(name) {
Some(realm) => {
realm.stop()?;
self.set_current_if_none()?;
},
None => {
warn!("Cannot stop '{}'. Realm does not exist", name);
return Ok(())
},
};
Ok(())
}
fn set_current_if_none(&self) -> Result<()> {
let mut symlinks = self.symlinks.borrow_mut();
if symlinks.current().is_some() {
return Ok(());
}
if let Some(ref name) = self.find_running_realm_name()? {
symlinks.set_current_symlink(Some(name))?;
self.systemd.restart_desktopd()?;
} else {
self.systemd.stop_desktopd()?;
}
Ok(())
}
fn find_running_realm_name(&self) -> Result<Option<String>> {
for realm in self.realm_map.values() {
if realm.is_running()? {
return Ok(Some(realm.name().to_string()));
}
}
Ok(None)
}
pub fn set_current_by_name(&self, realm_name: &str) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| realm.set_current())
}
pub fn set_default_by_name(&self, realm_name: &str) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| realm.set_default())
}
pub fn realm_name_exists(&self, name: &str) -> bool {
self.realm_map.contains_key(name)
}
pub fn realm(&self, name: &str) -> Option<&Realm> {
self.realm_map.get(name)
}
pub fn new_realm(&mut self, name: &str) -> Result<&Realm> {
if !is_valid_realm_name(name) {
bail!("'{}' is not a valid realm name. Only letters, numbers and dash '-' symbol allowed in name. First character must be a letter", name);
} else if self.realm_name_exists(name) {
bail!("A realm with name '{}' already exists", name);
}
let realm = Realm::new(name, self.symlinks.clone(), self.network.clone())?;
match realm.create_realm_directory() {
Ok(()) => Ok(self.add_realm_entry(realm)),
Err(e) => {
fs::remove_dir_all(realm.base_path())?;
Err(e)
},
}
}
pub fn remove_realm(&mut self, realm_name: &str, confirm: bool, save_home: bool) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| {
if realm.base_path().join(".realmlock").exists() {
warn!("Realm '{}' has .realmlock file in base directory to protect it from deletion.", realm.name());
warn!("Remove this file from {} before running 'realms remove {}' if you really want to delete it", realm.base_path().display(), realm.name());
return Ok(());
}
let mut save_home = save_home;
if confirm {
if !RealmManager::confirm_delete(realm.name(), &mut save_home)? {
return Ok(());
}
}
realm.delete_realm(save_home)?;
self.set_current_if_none()
})?;
self.remove_realm_entry(realm_name)?;
Ok(())
}
fn confirm_delete(realm_name: &str, save_home: &mut bool) -> Result<bool> {
let you_sure = RealmManager::prompt_user(&format!("Are you sure you want to remove realm '{}'?", realm_name), false)?;
if !you_sure {
info!("Ok, not removing");
return Ok(false);
}
println!("\nThe home directory for this realm can be saved in /realms/removed/home-{}\n", realm_name);
*save_home = RealmManager::prompt_user("Would you like to save the home directory?", true)?;
Ok(true)
}
fn prompt_user(prompt: &str, default_y: bool) -> Result<bool> {
let yn = if default_y { "(Y/n)" } else { "(y/N)" };
use std::io::{stdin,stdout};
print!("{} {} : ", prompt, yn);
stdout().flush()?;
let mut line = String::new();
stdin().read_line(&mut line)?;
let yes = match line.trim().chars().next() {
Some(c) => c == 'Y' || c == 'y',
None => default_y,
};
Ok(yes)
}
pub fn base_appimg_update(&self) -> Result<()> {
info!("Entering root shell on base appimg");
self.systemd.base_image_update_shell()
}
}
| {
let network = RealmManager::create_network_config()?;
Ok(RealmManager {
realm_map: HashMap::new(),
realm_list: Vec::new(),
symlinks: Rc::new(RefCell::new(RealmSymlinks::new())),
network: network.clone(),
systemd: Systemd::new(network),
})
} | identifier_body |
manager.rs | use std::rc::Rc;
use std::cell::RefCell;
use std::path::{Path,PathBuf};
use std::fs;
use std::collections::HashMap;
use std::io::Write;
use Realm;
use Result;
use Systemd;
use RealmSymlinks;
use NetworkConfig;
use util::*;
const REALMS_BASE_PATH: &str = "/realms";
pub struct RealmManager {
/// Map from realm name -> realm
realm_map: HashMap<String, Realm>,
/// Sorted for 'list'
realm_list: Vec<Realm>,
/// track status of 'current' and 'default' symlinks
symlinks: Rc<RefCell<RealmSymlinks>>,
/// finds free ip addresses to use
network: Rc<RefCell<NetworkConfig>>,
/// interface to systemd
systemd: Systemd,
}
impl RealmManager {
fn new() -> Result<RealmManager> {
let network = RealmManager::create_network_config()?;
Ok(RealmManager {
realm_map: HashMap::new(),
realm_list: Vec::new(),
symlinks: Rc::new(RefCell::new(RealmSymlinks::new())),
network: network.clone(),
systemd: Systemd::new(network),
})
}
fn create_network_config() -> Result<Rc<RefCell<NetworkConfig>>> {
let mut network = NetworkConfig::new();
network.add_bridge("clear", "172.17.0.0/24")?;
Ok(Rc::new(RefCell::new(network)))
}
pub fn load() -> Result<RealmManager> {
let mut manager = RealmManager::new()?;
manager.symlinks.borrow_mut().load_symlinks()?;
if ! PathBuf::from(REALMS_BASE_PATH).exists() {
bail!("realms base directory {} does not exist", REALMS_BASE_PATH);
}
for dent in fs::read_dir(REALMS_BASE_PATH)? {
let path = dent?.path();
manager.process_realm_path(&path)
.map_err(|e| format_err!("error processing entry {} in realm base dir: {}", path.display(), e))?;
}
manager.realm_list.sort_unstable();
Ok(manager)
}
///
/// Process `path` as an entry from the base realms directory and
/// if `path` is a directory, and directory name has prefix "realm-"
/// extract chars after prefix as realm name and add a new `Realm`
/// instance
///
fn process_realm_path(&mut self, path: &Path) -> Result<()> {
let meta = path.symlink_metadata()?;
if !meta.is_dir() {
return Ok(())
}
let fname = path_filename(path);
if !fname.starts_with("realm-") {
return Ok(())
}
let (_, realm_name) = fname.split_at(6);
if !is_valid_realm_name(realm_name) {
warn!("ignoring directory in realm storage which has invalid realm name: {}", realm_name);
return Ok(())
}
let rootfs = path.join("rootfs");
if !rootfs.exists() {
warn!("realm directory {} does not have a rootfs, ignoring", path.display());
return Ok(())
}
match Realm::new(realm_name, self.symlinks.clone(), self.network.clone()) {
Ok(realm) => { self.add_realm_entry(realm);} ,
Err(e) => warn!("Ignoring '{}': {}", realm_name, e),
};
Ok(())
}
fn add_realm_entry(&mut self, realm: Realm) -> &Realm {
self.realm_map.insert(realm.name().to_owned(), realm.clone());
self.realm_list.push(realm.clone());
self.realm_map.get(realm.name()).expect("cannot find realm we just added to map")
}
fn remove_realm_entry(&mut self, name: &str) -> Result<()> {
self.realm_map.remove(name);
let list = self.realm_list.clone();
let mut have_default = false;
self.realm_list.clear();
for realm in list {
if realm.name() != name {
if realm.is_default() {
have_default = true;
}
self.realm_list.push(realm);
}
}
if !have_default && !self.realm_list.is_empty() {
self.symlinks.borrow_mut().set_default_symlink(self.realm_list[0].name())?;
}
Ok(())
}
pub fn current_realm_name(&self) -> Option<String> {
self.symlinks.borrow().current()
}
pub fn default_realm_name(&self) -> Option<String> {
self.symlinks.borrow().default()
}
///
/// Execute shell in a realm. If `realm_name` is `None` then exec
/// shell in current realm, otherwise look up realm by name.
///
/// If `root_shell` is true, open a root shell, otherwise open
/// a user (uid = 1000) shell.
///
pub fn launch_shell(&self, realm_name: Option<&str>, root_shell: bool) -> Result<()> {
let run_shell = |realm: &Realm| {
info!("opening shell in realm '{}'", realm.name());
realm.exec_shell(root_shell)?;
info!("exiting shell in realm '{}'", realm.name());
Ok(())
};
if let Some(name) = realm_name {
self.with_named_realm(name, true, run_shell)
} else {
self.with_current_realm(run_shell)
}
}
pub fn launch_terminal(&self, name: Option<&str>) -> Result<()> {
let run_terminal = |realm: &Realm| {
info!("opening terminal in realm '{}'", realm.name());
let title_arg = format!("Realm: {}", realm.name());
realm.run(&["/usr/bin/gnome-terminal".to_owned(), "--title".to_owned(), title_arg], true)
};
if let Some(name) = name {
self.with_named_realm(name, true, run_terminal)
} else {
self.with_current_realm(run_terminal)
}
}
pub fn run_in_realm(&self, realm_name: Option<&str>, args: &[String], use_launcher: bool) -> Result<()> {
if let Some(name) = realm_name {
self.with_named_realm(name, true, |realm| realm.run(args, use_launcher))
} else {
self.with_current_realm(|realm| realm.run(args, use_launcher))
}
}
fn with_current_realm<F: Fn(&Realm)->Result<()>>(&self, f: F) -> Result<()> {
match self.symlinks.borrow().current() {
Some(ref name) => {
self.with_named_realm(name, false, f)?;
},
None => {
warn!("No current realm instance to run command in");
}
}
Ok(())
}
fn with_named_realm<F: Fn(&Realm)->Result<()>>(&self, name: &str, want_start: bool, f: F) -> Result<()> {
match self.realm(name) {
Some(realm) => {
if want_start && !realm.is_running()? {
info!("realm '{}' is not running, starting it.", realm.name());
self.start_realm(realm)?;
}
f(realm)
},
None => bail!("no realm with name '{}' exists", name),
}
}
pub fn list(&self) -> Result<()> {
let mut out = ColoredOutput::new();
self.print_realm_header(&mut out);
for realm in &self.realm_list {
self.print_realm(realm, &mut out)?;
}
Ok(())
}
fn print_realm_header(&self, out: &mut ColoredOutput) {
out.write(" REALMS ").bold("bold").write(": current, ").bright("colored")
.write(": running, (default) starts on boot\n").write(" ------\n\n");
}
fn print_realm(&self, realm: &Realm, out: &mut ColoredOutput) -> Result<()> {
let name = format!("{:12}", realm.name());
if realm.is_current() {
out.write(" > ").bold(&name);
} else if realm.is_running()? {
out.write(" ").bright(&name);
} else {
out.write(" ").dim(&name);
}
if realm.is_default() {
out.write(" (default)");
}
out.write("\n");
Ok(())
}
pub fn start_default(&mut self) -> Result<()> {
let default = self.symlinks.borrow().default();
if let Some(ref realm_name) = default {
self.start_named_realm(realm_name)?;
return Ok(());
}
bail!("No default realm to start");
}
pub fn start_named_realm(&mut self, realm_name: &str) -> Result<()> {
info!("starting realm '{}'", realm_name);
self.with_named_realm(realm_name, false, |realm| self.start_realm(realm))
}
fn start_realm(&self, realm: &Realm) -> Result<()> {
let mut symlinks = self.symlinks.borrow_mut();
let no_current_realm = symlinks.current().is_none();
// no realm is current, so make this realm the current one
// service file for realm will also start desktopd, so this symlink
// must be created before launching realm.
if no_current_realm {
symlinks.set_current_symlink(Some(realm.name()))?;
}
if let Err(e) = realm.start() {
if no_current_realm {
// oops realm failed to start, need to reset symlink we changed
symlinks.set_current_symlink(None)?;
}
return Err(e);
}
Ok(())
}
pub fn stop_realm(&mut self, name: &str) -> Result<()> {
match self.realm_map.get(name) {
Some(realm) => {
realm.stop()?;
self.set_current_if_none()?;
},
None => {
warn!("Cannot stop '{}'. Realm does not exist", name);
return Ok(())
},
};
Ok(())
}
fn set_current_if_none(&self) -> Result<()> {
let mut symlinks = self.symlinks.borrow_mut();
if symlinks.current().is_some() {
return Ok(());
}
if let Some(ref name) = self.find_running_realm_name()? {
symlinks.set_current_symlink(Some(name))?;
self.systemd.restart_desktopd()?;
} else {
self.systemd.stop_desktopd()?;
}
Ok(())
}
fn find_running_realm_name(&self) -> Result<Option<String>> {
for realm in self.realm_map.values() {
if realm.is_running()? {
return Ok(Some(realm.name().to_string()));
}
}
Ok(None)
}
pub fn set_current_by_name(&self, realm_name: &str) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| realm.set_current())
} |
pub fn set_default_by_name(&self, realm_name: &str) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| realm.set_default())
}
pub fn realm_name_exists(&self, name: &str) -> bool {
self.realm_map.contains_key(name)
}
pub fn realm(&self, name: &str) -> Option<&Realm> {
self.realm_map.get(name)
}
pub fn new_realm(&mut self, name: &str) -> Result<&Realm> {
if !is_valid_realm_name(name) {
bail!("'{}' is not a valid realm name. Only letters, numbers and dash '-' symbol allowed in name. First character must be a letter", name);
} else if self.realm_name_exists(name) {
bail!("A realm with name '{}' already exists", name);
}
let realm = Realm::new(name, self.symlinks.clone(), self.network.clone())?;
match realm.create_realm_directory() {
Ok(()) => Ok(self.add_realm_entry(realm)),
Err(e) => {
fs::remove_dir_all(realm.base_path())?;
Err(e)
},
}
}
pub fn remove_realm(&mut self, realm_name: &str, confirm: bool, save_home: bool) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| {
if realm.base_path().join(".realmlock").exists() {
warn!("Realm '{}' has .realmlock file in base directory to protect it from deletion.", realm.name());
warn!("Remove this file from {} before running 'realms remove {}' if you really want to delete it", realm.base_path().display(), realm.name());
return Ok(());
}
let mut save_home = save_home;
if confirm {
if !RealmManager::confirm_delete(realm.name(), &mut save_home)? {
return Ok(());
}
}
realm.delete_realm(save_home)?;
self.set_current_if_none()
})?;
self.remove_realm_entry(realm_name)?;
Ok(())
}
fn confirm_delete(realm_name: &str, save_home: &mut bool) -> Result<bool> {
let you_sure = RealmManager::prompt_user(&format!("Are you sure you want to remove realm '{}'?", realm_name), false)?;
if !you_sure {
info!("Ok, not removing");
return Ok(false);
}
println!("\nThe home directory for this realm can be saved in /realms/removed/home-{}\n", realm_name);
*save_home = RealmManager::prompt_user("Would you like to save the home directory?", true)?;
Ok(true)
}
fn prompt_user(prompt: &str, default_y: bool) -> Result<bool> {
let yn = if default_y { "(Y/n)" } else { "(y/N)" };
use std::io::{stdin,stdout};
print!("{} {} : ", prompt, yn);
stdout().flush()?;
let mut line = String::new();
stdin().read_line(&mut line)?;
let yes = match line.trim().chars().next() {
Some(c) => c == 'Y' || c == 'y',
None => default_y,
};
Ok(yes)
}
pub fn base_appimg_update(&self) -> Result<()> {
info!("Entering root shell on base appimg");
self.systemd.base_image_update_shell()
}
} | random_line_split | |
manager.rs | use std::rc::Rc;
use std::cell::RefCell;
use std::path::{Path,PathBuf};
use std::fs;
use std::collections::HashMap;
use std::io::Write;
use Realm;
use Result;
use Systemd;
use RealmSymlinks;
use NetworkConfig;
use util::*;
const REALMS_BASE_PATH: &str = "/realms";
pub struct RealmManager {
/// Map from realm name -> realm
realm_map: HashMap<String, Realm>,
/// Sorted for 'list'
realm_list: Vec<Realm>,
/// track status of 'current' and 'default' symlinks
symlinks: Rc<RefCell<RealmSymlinks>>,
/// finds free ip addresses to use
network: Rc<RefCell<NetworkConfig>>,
/// interface to systemd
systemd: Systemd,
}
impl RealmManager {
fn new() -> Result<RealmManager> {
let network = RealmManager::create_network_config()?;
Ok(RealmManager {
realm_map: HashMap::new(),
realm_list: Vec::new(),
symlinks: Rc::new(RefCell::new(RealmSymlinks::new())),
network: network.clone(),
systemd: Systemd::new(network),
})
}
fn create_network_config() -> Result<Rc<RefCell<NetworkConfig>>> {
let mut network = NetworkConfig::new();
network.add_bridge("clear", "172.17.0.0/24")?;
Ok(Rc::new(RefCell::new(network)))
}
pub fn load() -> Result<RealmManager> {
let mut manager = RealmManager::new()?;
manager.symlinks.borrow_mut().load_symlinks()?;
if ! PathBuf::from(REALMS_BASE_PATH).exists() {
bail!("realms base directory {} does not exist", REALMS_BASE_PATH);
}
for dent in fs::read_dir(REALMS_BASE_PATH)? {
let path = dent?.path();
manager.process_realm_path(&path)
.map_err(|e| format_err!("error processing entry {} in realm base dir: {}", path.display(), e))?;
}
manager.realm_list.sort_unstable();
Ok(manager)
}
///
/// Process `path` as an entry from the base realms directory and
/// if `path` is a directory, and directory name has prefix "realm-"
/// extract chars after prefix as realm name and add a new `Realm`
/// instance
///
fn process_realm_path(&mut self, path: &Path) -> Result<()> {
let meta = path.symlink_metadata()?;
if !meta.is_dir() {
return Ok(())
}
let fname = path_filename(path);
if !fname.starts_with("realm-") {
return Ok(())
}
let (_, realm_name) = fname.split_at(6);
if !is_valid_realm_name(realm_name) {
warn!("ignoring directory in realm storage which has invalid realm name: {}", realm_name);
return Ok(())
}
let rootfs = path.join("rootfs");
if !rootfs.exists() {
warn!("realm directory {} does not have a rootfs, ignoring", path.display());
return Ok(())
}
match Realm::new(realm_name, self.symlinks.clone(), self.network.clone()) {
Ok(realm) => { self.add_realm_entry(realm);} ,
Err(e) => warn!("Ignoring '{}': {}", realm_name, e),
};
Ok(())
}
fn | (&mut self, realm: Realm) -> &Realm {
self.realm_map.insert(realm.name().to_owned(), realm.clone());
self.realm_list.push(realm.clone());
self.realm_map.get(realm.name()).expect("cannot find realm we just added to map")
}
fn remove_realm_entry(&mut self, name: &str) -> Result<()> {
self.realm_map.remove(name);
let list = self.realm_list.clone();
let mut have_default = false;
self.realm_list.clear();
for realm in list {
if realm.name() != name {
if realm.is_default() {
have_default = true;
}
self.realm_list.push(realm);
}
}
if !have_default && !self.realm_list.is_empty() {
self.symlinks.borrow_mut().set_default_symlink(self.realm_list[0].name())?;
}
Ok(())
}
pub fn current_realm_name(&self) -> Option<String> {
self.symlinks.borrow().current()
}
pub fn default_realm_name(&self) -> Option<String> {
self.symlinks.borrow().default()
}
///
/// Execute shell in a realm. If `realm_name` is `None` then exec
/// shell in current realm, otherwise look up realm by name.
///
/// If `root_shell` is true, open a root shell, otherwise open
/// a user (uid = 1000) shell.
///
pub fn launch_shell(&self, realm_name: Option<&str>, root_shell: bool) -> Result<()> {
let run_shell = |realm: &Realm| {
info!("opening shell in realm '{}'", realm.name());
realm.exec_shell(root_shell)?;
info!("exiting shell in realm '{}'", realm.name());
Ok(())
};
if let Some(name) = realm_name {
self.with_named_realm(name, true, run_shell)
} else {
self.with_current_realm(run_shell)
}
}
pub fn launch_terminal(&self, name: Option<&str>) -> Result<()> {
let run_terminal = |realm: &Realm| {
info!("opening terminal in realm '{}'", realm.name());
let title_arg = format!("Realm: {}", realm.name());
realm.run(&["/usr/bin/gnome-terminal".to_owned(), "--title".to_owned(), title_arg], true)
};
if let Some(name) = name {
self.with_named_realm(name, true, run_terminal)
} else {
self.with_current_realm(run_terminal)
}
}
pub fn run_in_realm(&self, realm_name: Option<&str>, args: &[String], use_launcher: bool) -> Result<()> {
if let Some(name) = realm_name {
self.with_named_realm(name, true, |realm| realm.run(args, use_launcher))
} else {
self.with_current_realm(|realm| realm.run(args, use_launcher))
}
}
fn with_current_realm<F: Fn(&Realm)->Result<()>>(&self, f: F) -> Result<()> {
match self.symlinks.borrow().current() {
Some(ref name) => {
self.with_named_realm(name, false, f)?;
},
None => {
warn!("No current realm instance to run command in");
}
}
Ok(())
}
fn with_named_realm<F: Fn(&Realm)->Result<()>>(&self, name: &str, want_start: bool, f: F) -> Result<()> {
match self.realm(name) {
Some(realm) => {
if want_start && !realm.is_running()? {
info!("realm '{}' is not running, starting it.", realm.name());
self.start_realm(realm)?;
}
f(realm)
},
None => bail!("no realm with name '{}' exists", name),
}
}
pub fn list(&self) -> Result<()> {
let mut out = ColoredOutput::new();
self.print_realm_header(&mut out);
for realm in &self.realm_list {
self.print_realm(realm, &mut out)?;
}
Ok(())
}
fn print_realm_header(&self, out: &mut ColoredOutput) {
out.write(" REALMS ").bold("bold").write(": current, ").bright("colored")
.write(": running, (default) starts on boot\n").write(" ------\n\n");
}
fn print_realm(&self, realm: &Realm, out: &mut ColoredOutput) -> Result<()> {
let name = format!("{:12}", realm.name());
if realm.is_current() {
out.write(" > ").bold(&name);
} else if realm.is_running()? {
out.write(" ").bright(&name);
} else {
out.write(" ").dim(&name);
}
if realm.is_default() {
out.write(" (default)");
}
out.write("\n");
Ok(())
}
pub fn start_default(&mut self) -> Result<()> {
let default = self.symlinks.borrow().default();
if let Some(ref realm_name) = default {
self.start_named_realm(realm_name)?;
return Ok(());
}
bail!("No default realm to start");
}
pub fn start_named_realm(&mut self, realm_name: &str) -> Result<()> {
info!("starting realm '{}'", realm_name);
self.with_named_realm(realm_name, false, |realm| self.start_realm(realm))
}
fn start_realm(&self, realm: &Realm) -> Result<()> {
let mut symlinks = self.symlinks.borrow_mut();
let no_current_realm = symlinks.current().is_none();
// no realm is current, so make this realm the current one
// service file for realm will also start desktopd, so this symlink
// must be created before launching realm.
if no_current_realm {
symlinks.set_current_symlink(Some(realm.name()))?;
}
if let Err(e) = realm.start() {
if no_current_realm {
// oops realm failed to start, need to reset symlink we changed
symlinks.set_current_symlink(None)?;
}
return Err(e);
}
Ok(())
}
pub fn stop_realm(&mut self, name: &str) -> Result<()> {
match self.realm_map.get(name) {
Some(realm) => {
realm.stop()?;
self.set_current_if_none()?;
},
None => {
warn!("Cannot stop '{}'. Realm does not exist", name);
return Ok(())
},
};
Ok(())
}
fn set_current_if_none(&self) -> Result<()> {
let mut symlinks = self.symlinks.borrow_mut();
if symlinks.current().is_some() {
return Ok(());
}
if let Some(ref name) = self.find_running_realm_name()? {
symlinks.set_current_symlink(Some(name))?;
self.systemd.restart_desktopd()?;
} else {
self.systemd.stop_desktopd()?;
}
Ok(())
}
fn find_running_realm_name(&self) -> Result<Option<String>> {
for realm in self.realm_map.values() {
if realm.is_running()? {
return Ok(Some(realm.name().to_string()));
}
}
Ok(None)
}
pub fn set_current_by_name(&self, realm_name: &str) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| realm.set_current())
}
pub fn set_default_by_name(&self, realm_name: &str) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| realm.set_default())
}
pub fn realm_name_exists(&self, name: &str) -> bool {
self.realm_map.contains_key(name)
}
pub fn realm(&self, name: &str) -> Option<&Realm> {
self.realm_map.get(name)
}
pub fn new_realm(&mut self, name: &str) -> Result<&Realm> {
if !is_valid_realm_name(name) {
bail!("'{}' is not a valid realm name. Only letters, numbers and dash '-' symbol allowed in name. First character must be a letter", name);
} else if self.realm_name_exists(name) {
bail!("A realm with name '{}' already exists", name);
}
let realm = Realm::new(name, self.symlinks.clone(), self.network.clone())?;
match realm.create_realm_directory() {
Ok(()) => Ok(self.add_realm_entry(realm)),
Err(e) => {
fs::remove_dir_all(realm.base_path())?;
Err(e)
},
}
}
pub fn remove_realm(&mut self, realm_name: &str, confirm: bool, save_home: bool) -> Result<()> {
self.with_named_realm(realm_name, false, |realm| {
if realm.base_path().join(".realmlock").exists() {
warn!("Realm '{}' has .realmlock file in base directory to protect it from deletion.", realm.name());
warn!("Remove this file from {} before running 'realms remove {}' if you really want to delete it", realm.base_path().display(), realm.name());
return Ok(());
}
let mut save_home = save_home;
if confirm {
if !RealmManager::confirm_delete(realm.name(), &mut save_home)? {
return Ok(());
}
}
realm.delete_realm(save_home)?;
self.set_current_if_none()
})?;
self.remove_realm_entry(realm_name)?;
Ok(())
}
fn confirm_delete(realm_name: &str, save_home: &mut bool) -> Result<bool> {
let you_sure = RealmManager::prompt_user(&format!("Are you sure you want to remove realm '{}'?", realm_name), false)?;
if !you_sure {
info!("Ok, not removing");
return Ok(false);
}
println!("\nThe home directory for this realm can be saved in /realms/removed/home-{}\n", realm_name);
*save_home = RealmManager::prompt_user("Would you like to save the home directory?", true)?;
Ok(true)
}
fn prompt_user(prompt: &str, default_y: bool) -> Result<bool> {
let yn = if default_y { "(Y/n)" } else { "(y/N)" };
use std::io::{stdin,stdout};
print!("{} {} : ", prompt, yn);
stdout().flush()?;
let mut line = String::new();
stdin().read_line(&mut line)?;
let yes = match line.trim().chars().next() {
Some(c) => c == 'Y' || c == 'y',
None => default_y,
};
Ok(yes)
}
pub fn base_appimg_update(&self) -> Result<()> {
info!("Entering root shell on base appimg");
self.systemd.base_image_update_shell()
}
}
| add_realm_entry | identifier_name |
Multiple_PhotometryConstruction.py | import os
import re
import numpy as np
import pandas as pd
import pymysql
import mysql.connector
from mysql.connector import errorcode
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, Float, Date, PrimaryKeyConstraint, VARCHAR, insert
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from datetime import date
import csv
import matplotlib.pyplot as plt
from sqlalchemy.sql.sqltypes import DECIMAL
from tdt import read_block, epoc_filter, download_demo_data
#Photometry data location
datapath = "C:\\Users\\carte\\Dropbox\\Carter Local\\PostDocStuff\\RISDT\\PTone Fiber\\Photometry Data"
#Check to see which subjects have photometry data
folders = os.listdir(datapath)
def get_time(s465,s405):
timex = []
if len(s465) == 1:
timex.append(np.linspace(1,len(s465[0].data), len(s465[0].data))/s465[0].fs)
else:
for i in range(0, len(s465)):
timex.append(np.linspace(1,len(s465[i].data), len(s465[i].data))/s465[i].fs)
return timex
def evaluate_rawdata(time,Subjects_465,Subjects_405, subjs):
trim = []
note = []
if len(Subjects_465) == 1:
fig1 = plt.figure()
plt.plot(time[0],Subjects_465[0].data, color='green', label = 'Gcamp6f')
plt.plot(time[0],Subjects_405[0].data, color='blueviolet', label = 'ISOS')
plt.ylabel('mV')
plt.xlabel('Seconds')
plt.title('Subject:' + subjs[0])
plt.show()
while True:
try:
proceed = str(input("Does everything look okay?"))
if proceed == 'y':
trim.append(100)
note.append('All good')
break
elif proceed == 'n':
trim.append(int(input("Where should the trim begin?")))
note.append(str(input("Anything to record about these data")))
break
except ValueError:
print("Please enter y or n")
continue
else:
for i in range(0, len(Subjects_465)):
fig1 = plt.figure()
plt.plot(time[i],Subjects_465[i].data, color='green', label = 'Gcamp6f')
plt.plot(time[i],Subjects_405[i].data, color='blueviolet', label = 'ISOS')
plt.ylabel('mV')
plt.xlabel('Seconds')
plt.title('Subject:'+ subjs[i])
plt.show()
while True:
try:
proceed = str(input("Does everything look okay?"))
if proceed == 'y':
trim.append(100)
note.append('All good')
break
elif proceed == 'n':
trim.append(int(input("Where should the trim begin?")))
note.append(str(input("Anything to record about these data")))
break
except ValueError:
print("Please enter y or n")
continue
return trim, note
def trim_and_process(trim,end,timex,s465,s405):
# trim data
trimmed_s465 = []
trimmed_s405 = []
trimmed_time = []
dFF = []
std_dFF = []
baseline = []
y_all = []
y_df = []
s_ind = []
e_ind = []
if len(s465) == 1:
s_ind.append(np.where(timex[0] > trim[0])[0][0])
e_ind.append(np.where(timex[0] > end)[0][0])
trimmed_time.append(timex[0][s_ind[0]:e_ind[0]])
trimmed_s465.append(s465[0].data[s_ind[0]:e_ind[0]])
trimmed_s405.append(s405[0].data[s_ind[0]:e_ind[0]])
#process data
baseline.append(np.polyfit(np.array(trimmed_s405[0]), np.array(trimmed_s465[0]), 1))
y_all.append(np.multiply(baseline[0][0], np.array(trimmed_s405[0])) + baseline[0][1])
y_df.append(np.array(trimmed_s465[0]) - y_all[0])
dFF.append(np.multiply(100,np.divide(y_df[0],y_all[0])))
std_dFF.append(np.std(dFF[0]))
else:
for i in range(0, len(s465)):
s_ind.append(np.where(timex[i] > trim[i])[0][0])
e_ind.append(np.where(timex[i] > end)[0][0])
trimmed_time.append(timex[i][s_ind[i]:e_ind[i]])
trimmed_s465.append(s465[i].data[s_ind[i]:e_ind[i]])
trimmed_s405.append(s405[i].data[s_ind[i]:e_ind[i]])
#process data
baseline.append(np.polyfit(np.array(trimmed_s405[i]), np.array(trimmed_s465[i]), 1))
y_all.append(np.multiply(baseline[i][0], np.array(trimmed_s405[i])) + baseline[i][1])
y_df.append(np.array(trimmed_s465[i]) - y_all[i])
dFF.append(np.multiply(100,np.divide(y_df[i],y_all[i])))
print("you can ignore that warning: when comparing polyfit to" +
" the equivalent in matlab, which gives no errors it yielded the same answer")
return [trimmed_time, dFF, std_dFF]
def send_fiber(data,subjects,session,notes):
md= MetaData(engine)
test_conn = engine.connect()
for s in range(0,len(notes)):
if not engine.dialect.has_table(test_conn,'photodata'):
photodata = Table('photodata', md,
Column('idx', Integer, primary_key=True, nullable=False, autoincrement=True),
Column('Subject',Integer),
Column('Session',String(length=10)),
Column('Note', String(length=100)),
Column('TimeX', DECIMAL(19,10)),
Column('dFF', DECIMAL(19,10)))
md.create_all(engine)
#pull table as a class
Base = automap_base()
Base.prepare(engine,reflect=True)
photodata = Base.classes.photodata
#create data dictionary
list_session = [session]*len(data[0][s])
list_subject = [subjects[s]]*len(data[0][s])
list_notes = [notes[s]]*len(data[0][s])
print("reorganizing data for push to sql")
data_list_dicts = []
for i in range(0, len(data[0][s])):
data_list_dicts.append(dict(Subject=list_subject[i], Session=list_session[i],
Note=list_notes[i], TimeX=data[0][s][i].tolist(), dFF=data[1][s][i].tolist()))
print("pushing fiber data to SQL")
#start session
SQLsession = Session(engine)
SQLsession.bulk_insert_mappings(photodata,data_list_dicts)
SQLsession.flush()
SQLsession.commit()
print("fiber data pushed to SQL db for subject:"+subjects[s])
SQLsession.close()
def sort_events(s,subjects,all_events):
event_list_dict = []
for i in range(0,len(subjects)):
if not all_events[i]:
print("empty events")
else:
for e in all_events[i]:
subject_list = [int(subjects[i])]*len(e.onset)
session_list = [s]*len(e.onset)
name_list = [e.name]*len(e.onset)
onset_list = e.onset.tolist()
offset_list = e.offset.tolist()
for j in range(0,len(onset_list)):
event_list_dict.append(dict(Subject = subject_list[j], Session = session_list[j],
Name = name_list[j], Onset = onset_list[j], Offset = offset_list[j]))
return event_list_dict
def send_events(event_list_dict):
md= MetaData(engine)
test_conn = engine.connect()
if not engine.dialect.has_table(test_conn,'eventdata'):
eventdata = Table('eventdata', md,
Column('idx', Integer, primary_key=True, nullable=False, autoincrement=True),
Column('Subject',Integer),
Column('Session',String(length=10)),
Column('Name', String(length=100)),
Column('Onset', DECIMAL(19,10)),
Column('Offset', DECIMAL(19,10)))
md.create_all(engine)
#pull table as a class
Base = automap_base()
Base.prepare(engine,reflect=True)
eventdata = Base.classes.eventdata
print("pushing event data to sql")
#start session
SQLsession = Session(engine)
SQLsession.bulk_insert_mappings(eventdata,event_list_dict)
SQLsession.flush()
SQLsession.commit()
print("event data pushed to SQL db for all subject")
SQLsession.close()
def pullfiberdata(s,f,import_tdt):
Subjects_465 = []
Subjects_405 = []
if '&' in f:
subjs = f.split()
subjs.remove('&')
else:
subjs = [f]
if len(subjs) == 1:
test465A = import_tdt.streams['_465A'].data[1000]
test465C = import_tdt.streams['_465C'].data[1000]
if test465A > test465C:
Subjects_465.append(import_tdt.streams['_465A'])
Subjects_405.append(import_tdt.streams['_405A'])
else:
Subjects_465.append(import_tdt.streams['_465C'])
Subjects_405.append(import_tdt.streams['_405C'])
else:
Subjects_465.append(import_tdt.streams['_465A'])
Subjects_405.append(import_tdt.streams['_405A'])
Subjects_465.append(import_tdt.streams['_465C'])
Subjects_405.append(import_tdt.streams['_405C'])
time = get_time(Subjects_465,Subjects_405)
trim, note = evaluate_rawdata(time, Subjects_465, Subjects_405, subjs)
processed_data = trim_and_process(trim,import_tdt.info.duration.seconds-1, time, Subjects_465, Subjects_405)
send_fiber(processed_data,subjs,s,note)
def | (s,f,import_tdt):
eventsA = []
eventsB = []
if '&' in f:
subjs = f.split()
subjs.remove('&')
else:
subjs = [f]
if len(subjs) == 1:
test465A = import_tdt.streams['_465A'].data[1000]
test465C = import_tdt.streams['_465C'].data[1000]
if test465A > test465C:
eventsA.append(import_tdt.epocs.PC0_)
eventsA.append(import_tdt.epocs.PC2_)
eventsA.append(import_tdt.epocs.PC4_)
eventsA.append(import_tdt.epocs.PC6_)
else:
eventsB.append(import_tdt.epocs.PC1_)
eventsB.append(import_tdt.epocs.PC3_)
eventsB.append(import_tdt.epocs.PC5_)
eventsB.append(import_tdt.epocs.PC7_)
else:
eventsA.append(import_tdt.epocs.PC0_)
eventsA.append(import_tdt.epocs.PC2_)
eventsA.append(import_tdt.epocs.PC4_)
eventsA.append(import_tdt.epocs.PC6_)
eventsB.append(import_tdt.epocs.PC1_)
eventsB.append(import_tdt.epocs.PC3_)
eventsB.append(import_tdt.epocs.PC5_)
eventsB.append(import_tdt.epocs.PC7_)
all_events = [eventsA,eventsB]
event_list_dict = sort_events(s,subjs,all_events)
send_events(event_list_dict)
#Update data
for f in folders:
obtained_datapath = os.path.join(datapath,f)
sessions = os.listdir(obtained_datapath)
for s in sessions:
final_datapath = os.path.join(obtained_datapath,s)
import_tdt = read_block(final_datapath)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root", pw="", db = "fiberptone"))
pullfiberdata(s,f,import_tdt)
pulleventdata(s,f,import_tdt)
| pulleventdata | identifier_name |
Multiple_PhotometryConstruction.py | import os
import re
import numpy as np
import pandas as pd
import pymysql
import mysql.connector
from mysql.connector import errorcode
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, Float, Date, PrimaryKeyConstraint, VARCHAR, insert
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from datetime import date
import csv
import matplotlib.pyplot as plt
from sqlalchemy.sql.sqltypes import DECIMAL
from tdt import read_block, epoc_filter, download_demo_data
#Photometry data location
datapath = "C:\\Users\\carte\\Dropbox\\Carter Local\\PostDocStuff\\RISDT\\PTone Fiber\\Photometry Data"
#Check to see which subjects have photometry data
folders = os.listdir(datapath)
def get_time(s465,s405):
|
def evaluate_rawdata(time,Subjects_465,Subjects_405, subjs):
trim = []
note = []
if len(Subjects_465) == 1:
fig1 = plt.figure()
plt.plot(time[0],Subjects_465[0].data, color='green', label = 'Gcamp6f')
plt.plot(time[0],Subjects_405[0].data, color='blueviolet', label = 'ISOS')
plt.ylabel('mV')
plt.xlabel('Seconds')
plt.title('Subject:' + subjs[0])
plt.show()
while True:
try:
proceed = str(input("Does everything look okay?"))
if proceed == 'y':
trim.append(100)
note.append('All good')
break
elif proceed == 'n':
trim.append(int(input("Where should the trim begin?")))
note.append(str(input("Anything to record about these data")))
break
except ValueError:
print("Please enter y or n")
continue
else:
for i in range(0, len(Subjects_465)):
fig1 = plt.figure()
plt.plot(time[i],Subjects_465[i].data, color='green', label = 'Gcamp6f')
plt.plot(time[i],Subjects_405[i].data, color='blueviolet', label = 'ISOS')
plt.ylabel('mV')
plt.xlabel('Seconds')
plt.title('Subject:'+ subjs[i])
plt.show()
while True:
try:
proceed = str(input("Does everything look okay?"))
if proceed == 'y':
trim.append(100)
note.append('All good')
break
elif proceed == 'n':
trim.append(int(input("Where should the trim begin?")))
note.append(str(input("Anything to record about these data")))
break
except ValueError:
print("Please enter y or n")
continue
return trim, note
def trim_and_process(trim,end,timex,s465,s405):
# trim data
trimmed_s465 = []
trimmed_s405 = []
trimmed_time = []
dFF = []
std_dFF = []
baseline = []
y_all = []
y_df = []
s_ind = []
e_ind = []
if len(s465) == 1:
s_ind.append(np.where(timex[0] > trim[0])[0][0])
e_ind.append(np.where(timex[0] > end)[0][0])
trimmed_time.append(timex[0][s_ind[0]:e_ind[0]])
trimmed_s465.append(s465[0].data[s_ind[0]:e_ind[0]])
trimmed_s405.append(s405[0].data[s_ind[0]:e_ind[0]])
#process data
baseline.append(np.polyfit(np.array(trimmed_s405[0]), np.array(trimmed_s465[0]), 1))
y_all.append(np.multiply(baseline[0][0], np.array(trimmed_s405[0])) + baseline[0][1])
y_df.append(np.array(trimmed_s465[0]) - y_all[0])
dFF.append(np.multiply(100,np.divide(y_df[0],y_all[0])))
std_dFF.append(np.std(dFF[0]))
else:
for i in range(0, len(s465)):
s_ind.append(np.where(timex[i] > trim[i])[0][0])
e_ind.append(np.where(timex[i] > end)[0][0])
trimmed_time.append(timex[i][s_ind[i]:e_ind[i]])
trimmed_s465.append(s465[i].data[s_ind[i]:e_ind[i]])
trimmed_s405.append(s405[i].data[s_ind[i]:e_ind[i]])
#process data
baseline.append(np.polyfit(np.array(trimmed_s405[i]), np.array(trimmed_s465[i]), 1))
y_all.append(np.multiply(baseline[i][0], np.array(trimmed_s405[i])) + baseline[i][1])
y_df.append(np.array(trimmed_s465[i]) - y_all[i])
dFF.append(np.multiply(100,np.divide(y_df[i],y_all[i])))
print("you can ignore that warning: when comparing polyfit to" +
" the equivalent in matlab, which gives no errors it yielded the same answer")
return [trimmed_time, dFF, std_dFF]
def send_fiber(data,subjects,session,notes):
md= MetaData(engine)
test_conn = engine.connect()
for s in range(0,len(notes)):
if not engine.dialect.has_table(test_conn,'photodata'):
photodata = Table('photodata', md,
Column('idx', Integer, primary_key=True, nullable=False, autoincrement=True),
Column('Subject',Integer),
Column('Session',String(length=10)),
Column('Note', String(length=100)),
Column('TimeX', DECIMAL(19,10)),
Column('dFF', DECIMAL(19,10)))
md.create_all(engine)
#pull table as a class
Base = automap_base()
Base.prepare(engine,reflect=True)
photodata = Base.classes.photodata
#create data dictionary
list_session = [session]*len(data[0][s])
list_subject = [subjects[s]]*len(data[0][s])
list_notes = [notes[s]]*len(data[0][s])
print("reorganizing data for push to sql")
data_list_dicts = []
for i in range(0, len(data[0][s])):
data_list_dicts.append(dict(Subject=list_subject[i], Session=list_session[i],
Note=list_notes[i], TimeX=data[0][s][i].tolist(), dFF=data[1][s][i].tolist()))
print("pushing fiber data to SQL")
#start session
SQLsession = Session(engine)
SQLsession.bulk_insert_mappings(photodata,data_list_dicts)
SQLsession.flush()
SQLsession.commit()
print("fiber data pushed to SQL db for subject:"+subjects[s])
SQLsession.close()
def sort_events(s,subjects,all_events):
event_list_dict = []
for i in range(0,len(subjects)):
if not all_events[i]:
print("empty events")
else:
for e in all_events[i]:
subject_list = [int(subjects[i])]*len(e.onset)
session_list = [s]*len(e.onset)
name_list = [e.name]*len(e.onset)
onset_list = e.onset.tolist()
offset_list = e.offset.tolist()
for j in range(0,len(onset_list)):
event_list_dict.append(dict(Subject = subject_list[j], Session = session_list[j],
Name = name_list[j], Onset = onset_list[j], Offset = offset_list[j]))
return event_list_dict
def send_events(event_list_dict):
md= MetaData(engine)
test_conn = engine.connect()
if not engine.dialect.has_table(test_conn,'eventdata'):
eventdata = Table('eventdata', md,
Column('idx', Integer, primary_key=True, nullable=False, autoincrement=True),
Column('Subject',Integer),
Column('Session',String(length=10)),
Column('Name', String(length=100)),
Column('Onset', DECIMAL(19,10)),
Column('Offset', DECIMAL(19,10)))
md.create_all(engine)
#pull table as a class
Base = automap_base()
Base.prepare(engine,reflect=True)
eventdata = Base.classes.eventdata
print("pushing event data to sql")
#start session
SQLsession = Session(engine)
SQLsession.bulk_insert_mappings(eventdata,event_list_dict)
SQLsession.flush()
SQLsession.commit()
print("event data pushed to SQL db for all subject")
SQLsession.close()
def pullfiberdata(s,f,import_tdt):
Subjects_465 = []
Subjects_405 = []
if '&' in f:
subjs = f.split()
subjs.remove('&')
else:
subjs = [f]
if len(subjs) == 1:
test465A = import_tdt.streams['_465A'].data[1000]
test465C = import_tdt.streams['_465C'].data[1000]
if test465A > test465C:
Subjects_465.append(import_tdt.streams['_465A'])
Subjects_405.append(import_tdt.streams['_405A'])
else:
Subjects_465.append(import_tdt.streams['_465C'])
Subjects_405.append(import_tdt.streams['_405C'])
else:
Subjects_465.append(import_tdt.streams['_465A'])
Subjects_405.append(import_tdt.streams['_405A'])
Subjects_465.append(import_tdt.streams['_465C'])
Subjects_405.append(import_tdt.streams['_405C'])
time = get_time(Subjects_465,Subjects_405)
trim, note = evaluate_rawdata(time, Subjects_465, Subjects_405, subjs)
processed_data = trim_and_process(trim,import_tdt.info.duration.seconds-1, time, Subjects_465, Subjects_405)
send_fiber(processed_data,subjs,s,note)
def pulleventdata(s,f,import_tdt):
eventsA = []
eventsB = []
if '&' in f:
subjs = f.split()
subjs.remove('&')
else:
subjs = [f]
if len(subjs) == 1:
test465A = import_tdt.streams['_465A'].data[1000]
test465C = import_tdt.streams['_465C'].data[1000]
if test465A > test465C:
eventsA.append(import_tdt.epocs.PC0_)
eventsA.append(import_tdt.epocs.PC2_)
eventsA.append(import_tdt.epocs.PC4_)
eventsA.append(import_tdt.epocs.PC6_)
else:
eventsB.append(import_tdt.epocs.PC1_)
eventsB.append(import_tdt.epocs.PC3_)
eventsB.append(import_tdt.epocs.PC5_)
eventsB.append(import_tdt.epocs.PC7_)
else:
eventsA.append(import_tdt.epocs.PC0_)
eventsA.append(import_tdt.epocs.PC2_)
eventsA.append(import_tdt.epocs.PC4_)
eventsA.append(import_tdt.epocs.PC6_)
eventsB.append(import_tdt.epocs.PC1_)
eventsB.append(import_tdt.epocs.PC3_)
eventsB.append(import_tdt.epocs.PC5_)
eventsB.append(import_tdt.epocs.PC7_)
all_events = [eventsA,eventsB]
event_list_dict = sort_events(s,subjs,all_events)
send_events(event_list_dict)
#Update data
for f in folders:
obtained_datapath = os.path.join(datapath,f)
sessions = os.listdir(obtained_datapath)
for s in sessions:
final_datapath = os.path.join(obtained_datapath,s)
import_tdt = read_block(final_datapath)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root", pw="", db = "fiberptone"))
pullfiberdata(s,f,import_tdt)
pulleventdata(s,f,import_tdt)
| timex = []
if len(s465) == 1:
timex.append(np.linspace(1,len(s465[0].data), len(s465[0].data))/s465[0].fs)
else:
for i in range(0, len(s465)):
timex.append(np.linspace(1,len(s465[i].data), len(s465[i].data))/s465[i].fs)
return timex | identifier_body |
Multiple_PhotometryConstruction.py | import os
import re
import numpy as np
import pandas as pd
import pymysql
import mysql.connector
from mysql.connector import errorcode
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, Float, Date, PrimaryKeyConstraint, VARCHAR, insert
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from datetime import date
import csv
import matplotlib.pyplot as plt
from sqlalchemy.sql.sqltypes import DECIMAL
from tdt import read_block, epoc_filter, download_demo_data
#Photometry data location
datapath = "C:\\Users\\carte\\Dropbox\\Carter Local\\PostDocStuff\\RISDT\\PTone Fiber\\Photometry Data"
#Check to see which subjects have photometry data
folders = os.listdir(datapath)
def get_time(s465,s405):
timex = []
if len(s465) == 1:
timex.append(np.linspace(1,len(s465[0].data), len(s465[0].data))/s465[0].fs)
else:
for i in range(0, len(s465)):
timex.append(np.linspace(1,len(s465[i].data), len(s465[i].data))/s465[i].fs)
return timex
def evaluate_rawdata(time,Subjects_465,Subjects_405, subjs):
trim = []
note = []
if len(Subjects_465) == 1:
fig1 = plt.figure()
plt.plot(time[0],Subjects_465[0].data, color='green', label = 'Gcamp6f')
plt.plot(time[0],Subjects_405[0].data, color='blueviolet', label = 'ISOS')
plt.ylabel('mV')
plt.xlabel('Seconds')
plt.title('Subject:' + subjs[0])
plt.show()
while True:
try:
proceed = str(input("Does everything look okay?"))
if proceed == 'y':
trim.append(100)
note.append('All good')
break
elif proceed == 'n':
trim.append(int(input("Where should the trim begin?")))
note.append(str(input("Anything to record about these data")))
break
except ValueError:
print("Please enter y or n")
continue
else:
for i in range(0, len(Subjects_465)):
fig1 = plt.figure()
plt.plot(time[i],Subjects_465[i].data, color='green', label = 'Gcamp6f')
plt.plot(time[i],Subjects_405[i].data, color='blueviolet', label = 'ISOS')
plt.ylabel('mV')
plt.xlabel('Seconds')
plt.title('Subject:'+ subjs[i])
plt.show()
while True:
try:
proceed = str(input("Does everything look okay?"))
if proceed == 'y':
trim.append(100)
note.append('All good')
break
elif proceed == 'n':
trim.append(int(input("Where should the trim begin?")))
note.append(str(input("Anything to record about these data")))
break
except ValueError:
print("Please enter y or n")
continue
return trim, note
def trim_and_process(trim,end,timex,s465,s405):
# trim data
trimmed_s465 = []
trimmed_s405 = []
trimmed_time = []
dFF = []
std_dFF = []
baseline = []
y_all = []
y_df = []
s_ind = []
e_ind = []
if len(s465) == 1:
s_ind.append(np.where(timex[0] > trim[0])[0][0])
e_ind.append(np.where(timex[0] > end)[0][0])
trimmed_time.append(timex[0][s_ind[0]:e_ind[0]])
trimmed_s465.append(s465[0].data[s_ind[0]:e_ind[0]])
trimmed_s405.append(s405[0].data[s_ind[0]:e_ind[0]])
| y_all.append(np.multiply(baseline[0][0], np.array(trimmed_s405[0])) + baseline[0][1])
y_df.append(np.array(trimmed_s465[0]) - y_all[0])
dFF.append(np.multiply(100,np.divide(y_df[0],y_all[0])))
std_dFF.append(np.std(dFF[0]))
else:
for i in range(0, len(s465)):
s_ind.append(np.where(timex[i] > trim[i])[0][0])
e_ind.append(np.where(timex[i] > end)[0][0])
trimmed_time.append(timex[i][s_ind[i]:e_ind[i]])
trimmed_s465.append(s465[i].data[s_ind[i]:e_ind[i]])
trimmed_s405.append(s405[i].data[s_ind[i]:e_ind[i]])
#process data
baseline.append(np.polyfit(np.array(trimmed_s405[i]), np.array(trimmed_s465[i]), 1))
y_all.append(np.multiply(baseline[i][0], np.array(trimmed_s405[i])) + baseline[i][1])
y_df.append(np.array(trimmed_s465[i]) - y_all[i])
dFF.append(np.multiply(100,np.divide(y_df[i],y_all[i])))
print("you can ignore that warning: when comparing polyfit to" +
" the equivalent in matlab, which gives no errors it yielded the same answer")
return [trimmed_time, dFF, std_dFF]
def send_fiber(data,subjects,session,notes):
md= MetaData(engine)
test_conn = engine.connect()
for s in range(0,len(notes)):
if not engine.dialect.has_table(test_conn,'photodata'):
photodata = Table('photodata', md,
Column('idx', Integer, primary_key=True, nullable=False, autoincrement=True),
Column('Subject',Integer),
Column('Session',String(length=10)),
Column('Note', String(length=100)),
Column('TimeX', DECIMAL(19,10)),
Column('dFF', DECIMAL(19,10)))
md.create_all(engine)
#pull table as a class
Base = automap_base()
Base.prepare(engine,reflect=True)
photodata = Base.classes.photodata
#create data dictionary
list_session = [session]*len(data[0][s])
list_subject = [subjects[s]]*len(data[0][s])
list_notes = [notes[s]]*len(data[0][s])
print("reorganizing data for push to sql")
data_list_dicts = []
for i in range(0, len(data[0][s])):
data_list_dicts.append(dict(Subject=list_subject[i], Session=list_session[i],
Note=list_notes[i], TimeX=data[0][s][i].tolist(), dFF=data[1][s][i].tolist()))
print("pushing fiber data to SQL")
#start session
SQLsession = Session(engine)
SQLsession.bulk_insert_mappings(photodata,data_list_dicts)
SQLsession.flush()
SQLsession.commit()
print("fiber data pushed to SQL db for subject:"+subjects[s])
SQLsession.close()
def sort_events(s,subjects,all_events):
event_list_dict = []
for i in range(0,len(subjects)):
if not all_events[i]:
print("empty events")
else:
for e in all_events[i]:
subject_list = [int(subjects[i])]*len(e.onset)
session_list = [s]*len(e.onset)
name_list = [e.name]*len(e.onset)
onset_list = e.onset.tolist()
offset_list = e.offset.tolist()
for j in range(0,len(onset_list)):
event_list_dict.append(dict(Subject = subject_list[j], Session = session_list[j],
Name = name_list[j], Onset = onset_list[j], Offset = offset_list[j]))
return event_list_dict
def send_events(event_list_dict):
md= MetaData(engine)
test_conn = engine.connect()
if not engine.dialect.has_table(test_conn,'eventdata'):
eventdata = Table('eventdata', md,
Column('idx', Integer, primary_key=True, nullable=False, autoincrement=True),
Column('Subject',Integer),
Column('Session',String(length=10)),
Column('Name', String(length=100)),
Column('Onset', DECIMAL(19,10)),
Column('Offset', DECIMAL(19,10)))
md.create_all(engine)
#pull table as a class
Base = automap_base()
Base.prepare(engine,reflect=True)
eventdata = Base.classes.eventdata
print("pushing event data to sql")
#start session
SQLsession = Session(engine)
SQLsession.bulk_insert_mappings(eventdata,event_list_dict)
SQLsession.flush()
SQLsession.commit()
print("event data pushed to SQL db for all subject")
SQLsession.close()
def pullfiberdata(s,f,import_tdt):
Subjects_465 = []
Subjects_405 = []
if '&' in f:
subjs = f.split()
subjs.remove('&')
else:
subjs = [f]
if len(subjs) == 1:
test465A = import_tdt.streams['_465A'].data[1000]
test465C = import_tdt.streams['_465C'].data[1000]
if test465A > test465C:
Subjects_465.append(import_tdt.streams['_465A'])
Subjects_405.append(import_tdt.streams['_405A'])
else:
Subjects_465.append(import_tdt.streams['_465C'])
Subjects_405.append(import_tdt.streams['_405C'])
else:
Subjects_465.append(import_tdt.streams['_465A'])
Subjects_405.append(import_tdt.streams['_405A'])
Subjects_465.append(import_tdt.streams['_465C'])
Subjects_405.append(import_tdt.streams['_405C'])
time = get_time(Subjects_465,Subjects_405)
trim, note = evaluate_rawdata(time, Subjects_465, Subjects_405, subjs)
processed_data = trim_and_process(trim,import_tdt.info.duration.seconds-1, time, Subjects_465, Subjects_405)
send_fiber(processed_data,subjs,s,note)
def pulleventdata(s,f,import_tdt):
eventsA = []
eventsB = []
if '&' in f:
subjs = f.split()
subjs.remove('&')
else:
subjs = [f]
if len(subjs) == 1:
test465A = import_tdt.streams['_465A'].data[1000]
test465C = import_tdt.streams['_465C'].data[1000]
if test465A > test465C:
eventsA.append(import_tdt.epocs.PC0_)
eventsA.append(import_tdt.epocs.PC2_)
eventsA.append(import_tdt.epocs.PC4_)
eventsA.append(import_tdt.epocs.PC6_)
else:
eventsB.append(import_tdt.epocs.PC1_)
eventsB.append(import_tdt.epocs.PC3_)
eventsB.append(import_tdt.epocs.PC5_)
eventsB.append(import_tdt.epocs.PC7_)
else:
eventsA.append(import_tdt.epocs.PC0_)
eventsA.append(import_tdt.epocs.PC2_)
eventsA.append(import_tdt.epocs.PC4_)
eventsA.append(import_tdt.epocs.PC6_)
eventsB.append(import_tdt.epocs.PC1_)
eventsB.append(import_tdt.epocs.PC3_)
eventsB.append(import_tdt.epocs.PC5_)
eventsB.append(import_tdt.epocs.PC7_)
all_events = [eventsA,eventsB]
event_list_dict = sort_events(s,subjs,all_events)
send_events(event_list_dict)
#Update data
for f in folders:
obtained_datapath = os.path.join(datapath,f)
sessions = os.listdir(obtained_datapath)
for s in sessions:
final_datapath = os.path.join(obtained_datapath,s)
import_tdt = read_block(final_datapath)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root", pw="", db = "fiberptone"))
pullfiberdata(s,f,import_tdt)
pulleventdata(s,f,import_tdt) | #process data
baseline.append(np.polyfit(np.array(trimmed_s405[0]), np.array(trimmed_s465[0]), 1)) | random_line_split |
Multiple_PhotometryConstruction.py | import os
import re
import numpy as np
import pandas as pd
import pymysql
import mysql.connector
from mysql.connector import errorcode
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, Float, Date, PrimaryKeyConstraint, VARCHAR, insert
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from datetime import date
import csv
import matplotlib.pyplot as plt
from sqlalchemy.sql.sqltypes import DECIMAL
from tdt import read_block, epoc_filter, download_demo_data
#Photometry data location
datapath = "C:\\Users\\carte\\Dropbox\\Carter Local\\PostDocStuff\\RISDT\\PTone Fiber\\Photometry Data"
#Check to see which subjects have photometry data
folders = os.listdir(datapath)
def get_time(s465,s405):
timex = []
if len(s465) == 1:
timex.append(np.linspace(1,len(s465[0].data), len(s465[0].data))/s465[0].fs)
else:
for i in range(0, len(s465)):
timex.append(np.linspace(1,len(s465[i].data), len(s465[i].data))/s465[i].fs)
return timex
def evaluate_rawdata(time,Subjects_465,Subjects_405, subjs):
trim = []
note = []
if len(Subjects_465) == 1:
fig1 = plt.figure()
plt.plot(time[0],Subjects_465[0].data, color='green', label = 'Gcamp6f')
plt.plot(time[0],Subjects_405[0].data, color='blueviolet', label = 'ISOS')
plt.ylabel('mV')
plt.xlabel('Seconds')
plt.title('Subject:' + subjs[0])
plt.show()
while True:
try:
proceed = str(input("Does everything look okay?"))
if proceed == 'y':
trim.append(100)
note.append('All good')
break
elif proceed == 'n':
trim.append(int(input("Where should the trim begin?")))
note.append(str(input("Anything to record about these data")))
break
except ValueError:
print("Please enter y or n")
continue
else:
for i in range(0, len(Subjects_465)):
fig1 = plt.figure()
plt.plot(time[i],Subjects_465[i].data, color='green', label = 'Gcamp6f')
plt.plot(time[i],Subjects_405[i].data, color='blueviolet', label = 'ISOS')
plt.ylabel('mV')
plt.xlabel('Seconds')
plt.title('Subject:'+ subjs[i])
plt.show()
while True:
try:
proceed = str(input("Does everything look okay?"))
if proceed == 'y':
trim.append(100)
note.append('All good')
break
elif proceed == 'n':
trim.append(int(input("Where should the trim begin?")))
note.append(str(input("Anything to record about these data")))
break
except ValueError:
print("Please enter y or n")
continue
return trim, note
def trim_and_process(trim,end,timex,s465,s405):
# trim data
trimmed_s465 = []
trimmed_s405 = []
trimmed_time = []
dFF = []
std_dFF = []
baseline = []
y_all = []
y_df = []
s_ind = []
e_ind = []
if len(s465) == 1:
s_ind.append(np.where(timex[0] > trim[0])[0][0])
e_ind.append(np.where(timex[0] > end)[0][0])
trimmed_time.append(timex[0][s_ind[0]:e_ind[0]])
trimmed_s465.append(s465[0].data[s_ind[0]:e_ind[0]])
trimmed_s405.append(s405[0].data[s_ind[0]:e_ind[0]])
#process data
baseline.append(np.polyfit(np.array(trimmed_s405[0]), np.array(trimmed_s465[0]), 1))
y_all.append(np.multiply(baseline[0][0], np.array(trimmed_s405[0])) + baseline[0][1])
y_df.append(np.array(trimmed_s465[0]) - y_all[0])
dFF.append(np.multiply(100,np.divide(y_df[0],y_all[0])))
std_dFF.append(np.std(dFF[0]))
else:
for i in range(0, len(s465)):
s_ind.append(np.where(timex[i] > trim[i])[0][0])
e_ind.append(np.where(timex[i] > end)[0][0])
trimmed_time.append(timex[i][s_ind[i]:e_ind[i]])
trimmed_s465.append(s465[i].data[s_ind[i]:e_ind[i]])
trimmed_s405.append(s405[i].data[s_ind[i]:e_ind[i]])
#process data
baseline.append(np.polyfit(np.array(trimmed_s405[i]), np.array(trimmed_s465[i]), 1))
y_all.append(np.multiply(baseline[i][0], np.array(trimmed_s405[i])) + baseline[i][1])
y_df.append(np.array(trimmed_s465[i]) - y_all[i])
dFF.append(np.multiply(100,np.divide(y_df[i],y_all[i])))
print("you can ignore that warning: when comparing polyfit to" +
" the equivalent in matlab, which gives no errors it yielded the same answer")
return [trimmed_time, dFF, std_dFF]
def send_fiber(data,subjects,session,notes):
md= MetaData(engine)
test_conn = engine.connect()
for s in range(0,len(notes)):
if not engine.dialect.has_table(test_conn,'photodata'):
photodata = Table('photodata', md,
Column('idx', Integer, primary_key=True, nullable=False, autoincrement=True),
Column('Subject',Integer),
Column('Session',String(length=10)),
Column('Note', String(length=100)),
Column('TimeX', DECIMAL(19,10)),
Column('dFF', DECIMAL(19,10)))
md.create_all(engine)
#pull table as a class
Base = automap_base()
Base.prepare(engine,reflect=True)
photodata = Base.classes.photodata
#create data dictionary
list_session = [session]*len(data[0][s])
list_subject = [subjects[s]]*len(data[0][s])
list_notes = [notes[s]]*len(data[0][s])
print("reorganizing data for push to sql")
data_list_dicts = []
for i in range(0, len(data[0][s])):
data_list_dicts.append(dict(Subject=list_subject[i], Session=list_session[i],
Note=list_notes[i], TimeX=data[0][s][i].tolist(), dFF=data[1][s][i].tolist()))
print("pushing fiber data to SQL")
#start session
SQLsession = Session(engine)
SQLsession.bulk_insert_mappings(photodata,data_list_dicts)
SQLsession.flush()
SQLsession.commit()
print("fiber data pushed to SQL db for subject:"+subjects[s])
SQLsession.close()
def sort_events(s,subjects,all_events):
event_list_dict = []
for i in range(0,len(subjects)):
if not all_events[i]:
print("empty events")
else:
|
return event_list_dict
def send_events(event_list_dict):
md= MetaData(engine)
test_conn = engine.connect()
if not engine.dialect.has_table(test_conn,'eventdata'):
eventdata = Table('eventdata', md,
Column('idx', Integer, primary_key=True, nullable=False, autoincrement=True),
Column('Subject',Integer),
Column('Session',String(length=10)),
Column('Name', String(length=100)),
Column('Onset', DECIMAL(19,10)),
Column('Offset', DECIMAL(19,10)))
md.create_all(engine)
#pull table as a class
Base = automap_base()
Base.prepare(engine,reflect=True)
eventdata = Base.classes.eventdata
print("pushing event data to sql")
#start session
SQLsession = Session(engine)
SQLsession.bulk_insert_mappings(eventdata,event_list_dict)
SQLsession.flush()
SQLsession.commit()
print("event data pushed to SQL db for all subject")
SQLsession.close()
def pullfiberdata(s,f,import_tdt):
Subjects_465 = []
Subjects_405 = []
if '&' in f:
subjs = f.split()
subjs.remove('&')
else:
subjs = [f]
if len(subjs) == 1:
test465A = import_tdt.streams['_465A'].data[1000]
test465C = import_tdt.streams['_465C'].data[1000]
if test465A > test465C:
Subjects_465.append(import_tdt.streams['_465A'])
Subjects_405.append(import_tdt.streams['_405A'])
else:
Subjects_465.append(import_tdt.streams['_465C'])
Subjects_405.append(import_tdt.streams['_405C'])
else:
Subjects_465.append(import_tdt.streams['_465A'])
Subjects_405.append(import_tdt.streams['_405A'])
Subjects_465.append(import_tdt.streams['_465C'])
Subjects_405.append(import_tdt.streams['_405C'])
time = get_time(Subjects_465,Subjects_405)
trim, note = evaluate_rawdata(time, Subjects_465, Subjects_405, subjs)
processed_data = trim_and_process(trim,import_tdt.info.duration.seconds-1, time, Subjects_465, Subjects_405)
send_fiber(processed_data,subjs,s,note)
def pulleventdata(s,f,import_tdt):
eventsA = []
eventsB = []
if '&' in f:
subjs = f.split()
subjs.remove('&')
else:
subjs = [f]
if len(subjs) == 1:
test465A = import_tdt.streams['_465A'].data[1000]
test465C = import_tdt.streams['_465C'].data[1000]
if test465A > test465C:
eventsA.append(import_tdt.epocs.PC0_)
eventsA.append(import_tdt.epocs.PC2_)
eventsA.append(import_tdt.epocs.PC4_)
eventsA.append(import_tdt.epocs.PC6_)
else:
eventsB.append(import_tdt.epocs.PC1_)
eventsB.append(import_tdt.epocs.PC3_)
eventsB.append(import_tdt.epocs.PC5_)
eventsB.append(import_tdt.epocs.PC7_)
else:
eventsA.append(import_tdt.epocs.PC0_)
eventsA.append(import_tdt.epocs.PC2_)
eventsA.append(import_tdt.epocs.PC4_)
eventsA.append(import_tdt.epocs.PC6_)
eventsB.append(import_tdt.epocs.PC1_)
eventsB.append(import_tdt.epocs.PC3_)
eventsB.append(import_tdt.epocs.PC5_)
eventsB.append(import_tdt.epocs.PC7_)
all_events = [eventsA,eventsB]
event_list_dict = sort_events(s,subjs,all_events)
send_events(event_list_dict)
#Update data
for f in folders:
obtained_datapath = os.path.join(datapath,f)
sessions = os.listdir(obtained_datapath)
for s in sessions:
final_datapath = os.path.join(obtained_datapath,s)
import_tdt = read_block(final_datapath)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root", pw="", db = "fiberptone"))
pullfiberdata(s,f,import_tdt)
pulleventdata(s,f,import_tdt)
| for e in all_events[i]:
subject_list = [int(subjects[i])]*len(e.onset)
session_list = [s]*len(e.onset)
name_list = [e.name]*len(e.onset)
onset_list = e.onset.tolist()
offset_list = e.offset.tolist()
for j in range(0,len(onset_list)):
event_list_dict.append(dict(Subject = subject_list[j], Session = session_list[j],
Name = name_list[j], Onset = onset_list[j], Offset = offset_list[j])) | conditional_block |
apps.go | package commands
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/olekukonko/tablewriter"
"github.com/section/sectionctl/api"
)
// AppsCmd manages apps on Section
type AppsCmd struct {
List AppsListCmd `cmd help:"List apps on Section." default:"1"`
Info AppsInfoCmd `cmd help:"Show detailed app information on Section."`
Create AppsCreateCmd `cmd help:"Create new app on Section."`
Delete AppsDeleteCmd `cmd help:"Delete an existing app on Section."`
Init AppsInitCmd `cmd help:"Initialize your project for deployment."`
Stacks AppsStacksCmd `cmd help:"See the available stacks to create new apps with."`
}
// AppsListCmd handles listing apps running on Section
type AppsListCmd struct {
AccountID int `short:"a" help:"Account ID to find apps under"`
}
// NewTable returns a table with sectionctl standard formatting
func NewTable(out io.Writer) (t *tablewriter.Table) {
t = tablewriter.NewWriter(out)
t.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
t.SetCenterSeparator("|")
t.SetAlignment(tablewriter.ALIGN_LEFT)
return t
}
// Run executes the command
func (c *AppsListCmd) | () (err error) {
var aids []int
if c.AccountID == 0 {
s := NewSpinner("Looking up accounts")
s.Start()
as, err := api.Accounts()
if err != nil {
return fmt.Errorf("unable to look up accounts: %w", err)
}
for _, a := range as {
aids = append(aids, a.ID)
}
s.Stop()
} else {
aids = append(aids, c.AccountID)
}
s := NewSpinner("Looking up apps")
s.Start()
apps := make(map[int][]api.App)
for _, id := range aids {
as, err := api.Applications(id)
if err != nil {
return fmt.Errorf("unable to look up applications: %w", err)
}
apps[id] = as
}
s.Stop()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Account ID", "App ID", "App Name"})
for id, as := range apps {
for _, a := range as {
r := []string{strconv.Itoa(id), strconv.Itoa(a.ID), a.ApplicationName}
table.Append(r)
}
}
table.Render()
return err
}
// AppsInfoCmd shows detailed information on an app running on Section
type AppsInfoCmd struct {
AccountID int `required short:"a"`
AppID int `required short:"i"`
}
// Run executes the command
func (c *AppsInfoCmd) Run() (err error) {
s := NewSpinner("Looking up app info")
s.Start()
app, err := api.Application(c.AccountID, c.AppID)
s.Stop()
if err != nil {
return err
}
fmt.Printf("🌎🌏🌍\n")
fmt.Printf("App Name: %s\n", app.ApplicationName)
fmt.Printf("App ID: %d\n", app.ID)
fmt.Printf("Environment count: %d\n", len(app.Environments))
for i, env := range app.Environments {
fmt.Printf("\n-----------------\n\n")
fmt.Printf("Environment #%d: %s (ID:%d)\n\n", i+1, env.EnvironmentName, env.ID)
fmt.Printf("💬 Domains (%d total)\n", len(env.Domains))
for _, dom := range env.Domains {
fmt.Println()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Attribute", "Value"})
table.SetAutoMergeCells(true)
r := [][]string{
[]string{"Domain name", dom.Name},
[]string{"Zone name", dom.ZoneName},
[]string{"CNAME", dom.CNAME},
[]string{"Mode", dom.Mode},
}
table.AppendBulk(r)
table.Render()
}
fmt.Println()
mod := "modules"
if len(env.Stack) == 1 {
mod = "module"
}
fmt.Printf("🥞 Stack (%d %s total)\n", len(env.Stack), mod)
fmt.Println()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Name", "Image"})
table.SetAutoMergeCells(true)
for _, p := range env.Stack {
r := []string{p.Name, p.Image}
table.Append(r)
}
table.Render()
}
fmt.Println()
return err
}
// AppsCreateCmd handles creating apps on Section
type AppsCreateCmd struct {
AccountID int `required short:"a" help:"ID of account to create the app under"`
Hostname string `required short:"d" help:"FQDN the app can be accessed at"`
Origin string `required short:"o" help:"URL to fetch the origin"`
StackName string `required short:"s" help:"Name of stack to deploy"`
}
// Run executes the command
func (c *AppsCreateCmd) Run() (err error) {
s := NewSpinner(fmt.Sprintf("Creating new app %s", c.Hostname))
s.Start()
api.Timeout = 120 * time.Second // this specific request can take a long time
r, err := api.ApplicationCreate(c.AccountID, c.Hostname, c.Origin, c.StackName)
s.Stop()
if err != nil {
if err == api.ErrStatusForbidden {
stacks, herr := api.Stacks()
if herr != nil {
return fmt.Errorf("unable to query stacks: %w", herr)
}
for _, s := range stacks {
if s.Name == c.StackName {
return err
}
}
return fmt.Errorf("bad request: unable to find stack %s", c.StackName)
}
return err
}
fmt.Printf("\nSuccess: created app '%s' with id '%d'\n", r.ApplicationName, r.ID)
return err
}
// AppsDeleteCmd handles deleting apps on Section
type AppsDeleteCmd struct {
AccountID int `required short:"a" help:"ID of account the app belongs to"`
AppID int `required short:"i" help:"ID of the app to delete"`
}
// Run executes the command
func (c *AppsDeleteCmd) Run() (err error) {
s := NewSpinner(fmt.Sprintf("Deleting app with id '%d'", c.AppID))
s.Start()
api.Timeout = 120 * time.Second // this specific request can take a long time
_, err = api.ApplicationDelete(c.AccountID, c.AppID)
s.Stop()
if err != nil {
return err
}
fmt.Printf("\nSuccess: deleted app with id '%d'\n", c.AppID)
return err
}
// AppsInitCmd creates and validates server.conf and package.json to prepare an app for deployment
type AppsInitCmd struct {
StackName string `optional default:"nodejs-basic" short:"s" help:"Name of stack to deploy. Default is nodejs-basic"`
Force bool `optional short:"f" help:"Resets deployment specific files to their default configuration"`
}
func (c *AppsInitCmd) buildServerConf() []byte {
return []byte(
`location / {
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $host;
include /etc/nginx/section.module/node.conf;
}
location ~ "/next-proxy-hop/" {
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $host;
proxy_pass http://next-hop;
}`)
}
// Run executes the command
func (c *AppsInitCmd) Run() (err error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
switch c.StackName {
case "nodejs-basic":
err := c.InitializeNodeBasicApp(stdout, stderr)
if err != nil {
return fmt.Errorf("[ERROR]: init completed with error %x", err)
}
default:
log.Printf("[ERROR]: Stack name %s does not have an initialization defined\n", c.StackName)
}
return err
}
// Create package.json
func (c *AppsInitCmd) CreatePkgJSON(stdout, stderr bytes.Buffer) (err error) {
cmd := exec.Command("npm", "init", "-y")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
return err
}
// InitializeNodeBasicApp initializes a basic node app.
func (c *AppsInitCmd) InitializeNodeBasicApp(stdout, stderr bytes.Buffer) (err error) {
if c.Force {
log.Println("[INFO] Removing old versions of server.conf and package.json")
err1 := os.Remove("package.json")
err2 := os.Remove("server.conf")
if err1 != nil || err2 != nil {
log.Println("[ERROR] unable to remove files, perhaps they do not exist?")
} else {
log.Println("[DEBUG] Files successfully removed")
}
}
log.Println("[DEBUG] Checking to see if server.conf exists")
checkServConf, err := os.Open("server.conf")
if err != nil {
log.Println("[WARN] server.conf does not exist. Creating server.conf")
f, err := os.Create("server.conf")
if err != nil {
return fmt.Errorf("error in creating a file: server.conf %w", err)
}
b := c.buildServerConf()
f.Write(b)
defer f.Close()
} else {
log.Println("[INFO] Validating server.conf")
fileinfo, err := checkServConf.Stat()
if err != nil {
return fmt.Errorf("error in finding stat of server.conf %w", err)
}
buf := make([]byte, fileinfo.Size())
_, err = checkServConf.Read(buf)
if err != nil {
return fmt.Errorf("error in size stat of server.conf %w", err)
}
fStr := string(buf)
if !strings.Contains(fStr, "location / {") {
log.Println("[WARN] default location unspecified. Edit or delete server.conf and rerun this command")
}
}
defer checkServConf.Close()
log.Println("[DEBUG] Checking to see if package.json exists")
checkPkgJSON, err := os.Open("package.json")
if err != nil {
log.Println("[WARN] package.json does not exist. Creating package.json")
err := c.CreatePkgJSON(stdout, stderr)
if err != nil {
return fmt.Errorf("there was an error creating package.json. Is node installed? %w", err)
}
log.Println("[INFO] package.json created")
}
defer checkPkgJSON.Close()
validPkgJSON, err := os.OpenFile("package.json", os.O_RDWR, 0777)
if err != nil {
return fmt.Errorf("failed to open package.json %w", err)
}
defer validPkgJSON.Close()
log.Println("[INFO] Validating package.json")
buf, err := os.ReadFile("package.json")
if err != nil {
return fmt.Errorf("failed to read package.json %w", err)
}
fStr := string(buf)
if len(fStr) == 0 {
err := os.Remove("package.json")
if err != nil {
log.Println("[ERROR] unable to remove empty package.json")
}
log.Println("[WARN] package.json is empty. Creating package.json")
err = c.CreatePkgJSON(stdout, stderr)
if err != nil {
return fmt.Errorf("there was an error creating package.json. Is node installed? %w", err)
}
log.Println("[INFO] package.json created from empty file")
buf, err = os.ReadFile("package.json")
if err != nil {
return fmt.Errorf("failed to read package.json %w", err)
}
fStr = string(buf)
}
jsonMap := make(map[string]interface{})
err = json.Unmarshal(buf, &jsonMap)
if err != nil {
return fmt.Errorf("package.json is not valid JSON %w", err)
}
lv := jsonMap["scripts"]
jsonToStrMap, ok := lv.(map[string]interface{})
if !ok {
return fmt.Errorf("json unable to be read as map[string]interface %w", err)
}
_, ok = jsonToStrMap["start"]
if !ok {
jsonToStrMap["start"] = "node YOUR_SERVER_HERE.js"
jsonMap["scripts"] = jsonToStrMap
err = os.Truncate("package.json", 0)
if err != nil {
return fmt.Errorf("failed to empty package.json %w", err)
}
set, err := json.MarshalIndent(jsonMap, "", " ")
if err != nil {
log.Println("[ERROR] unable to add start script placeholder")
}
_, err = validPkgJSON.Write(set)
if err != nil {
log.Println("[ERROR] unable to add start script placeholder")
}
}
if strings.Contains(fStr, `YOUR_SERVER_HERE.js`) {
log.Println("[ERROR] start script is required. Please edit the placeholder in package.json")
}
return err
}
// AppsStacksCmd lists available stacks to create new apps with
type AppsStacksCmd struct{}
// Run executes the command
func (c *AppsStacksCmd) Run() (err error) {
s := NewSpinner("Looking up stacks")
s.Start()
k, err := api.Stacks()
s.Stop()
if err != nil {
return fmt.Errorf("unable to look up stacks: %w", err)
}
table := NewTable(os.Stdout)
table.SetHeader([]string{"Name", "Label", "Description", "Type"})
for _, s := range k {
r := []string{s.Name, s.Label, s.Description, s.Type}
table.Append(r)
}
table.Render()
return err
}
| Run | identifier_name |
apps.go | package commands
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/olekukonko/tablewriter"
"github.com/section/sectionctl/api"
)
// AppsCmd manages apps on Section
type AppsCmd struct {
List AppsListCmd `cmd help:"List apps on Section." default:"1"`
Info AppsInfoCmd `cmd help:"Show detailed app information on Section."`
Create AppsCreateCmd `cmd help:"Create new app on Section."`
Delete AppsDeleteCmd `cmd help:"Delete an existing app on Section."`
Init AppsInitCmd `cmd help:"Initialize your project for deployment."`
Stacks AppsStacksCmd `cmd help:"See the available stacks to create new apps with."`
}
// AppsListCmd handles listing apps running on Section
type AppsListCmd struct {
AccountID int `short:"a" help:"Account ID to find apps under"`
}
// NewTable returns a table with sectionctl standard formatting
func NewTable(out io.Writer) (t *tablewriter.Table) {
t = tablewriter.NewWriter(out)
t.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
t.SetCenterSeparator("|")
t.SetAlignment(tablewriter.ALIGN_LEFT)
return t
}
// Run executes the command
func (c *AppsListCmd) Run() (err error) {
var aids []int
if c.AccountID == 0 {
s := NewSpinner("Looking up accounts")
s.Start()
as, err := api.Accounts()
if err != nil {
return fmt.Errorf("unable to look up accounts: %w", err)
}
for _, a := range as {
aids = append(aids, a.ID)
}
s.Stop()
} else {
aids = append(aids, c.AccountID)
}
s := NewSpinner("Looking up apps")
s.Start()
apps := make(map[int][]api.App)
for _, id := range aids {
as, err := api.Applications(id)
if err != nil {
return fmt.Errorf("unable to look up applications: %w", err)
}
apps[id] = as
}
s.Stop()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Account ID", "App ID", "App Name"})
for id, as := range apps {
for _, a := range as {
r := []string{strconv.Itoa(id), strconv.Itoa(a.ID), a.ApplicationName}
table.Append(r)
}
}
table.Render()
return err
}
// AppsInfoCmd shows detailed information on an app running on Section
type AppsInfoCmd struct {
AccountID int `required short:"a"`
AppID int `required short:"i"`
}
// Run executes the command
func (c *AppsInfoCmd) Run() (err error) {
s := NewSpinner("Looking up app info")
s.Start()
app, err := api.Application(c.AccountID, c.AppID)
s.Stop()
if err != nil {
return err
}
fmt.Printf("🌎🌏🌍\n")
fmt.Printf("App Name: %s\n", app.ApplicationName)
fmt.Printf("App ID: %d\n", app.ID)
fmt.Printf("Environment count: %d\n", len(app.Environments))
for i, env := range app.Environments {
fmt.Printf("\n-----------------\n\n")
fmt.Printf("Environment #%d: %s (ID:%d)\n\n", i+1, env.EnvironmentName, env.ID)
fmt.Printf("💬 Domains (%d total)\n", len(env.Domains))
for _, dom := range env.Domains {
fmt.Println()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Attribute", "Value"})
table.SetAutoMergeCells(true)
r := [][]string{
[]string{"Domain name", dom.Name},
[]string{"Zone name", dom.ZoneName},
[]string{"CNAME", dom.CNAME},
[]string{"Mode", dom.Mode},
}
table.AppendBulk(r)
table.Render()
}
fmt.Println()
mod := "modules"
if len(env.Stack) == 1 {
mod = "module"
}
fmt.Printf("🥞 Stack (%d %s total)\n", len(env.Stack), mod)
fmt.Println()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Name", "Image"})
table.SetAutoMergeCells(true)
for _, p := range env.Stack {
r := []str | ()
}
fmt.Println()
return err
}
// AppsCreateCmd handles creating apps on Section
type AppsCreateCmd struct {
AccountID int `required short:"a" help:"ID of account to create the app under"`
Hostname string `required short:"d" help:"FQDN the app can be accessed at"`
Origin string `required short:"o" help:"URL to fetch the origin"`
StackName string `required short:"s" help:"Name of stack to deploy"`
}
// Run executes the command
func (c *AppsCreateCmd) Run() (err error) {
s := NewSpinner(fmt.Sprintf("Creating new app %s", c.Hostname))
s.Start()
api.Timeout = 120 * time.Second // this specific request can take a long time
r, err := api.ApplicationCreate(c.AccountID, c.Hostname, c.Origin, c.StackName)
s.Stop()
if err != nil {
if err == api.ErrStatusForbidden {
stacks, herr := api.Stacks()
if herr != nil {
return fmt.Errorf("unable to query stacks: %w", herr)
}
for _, s := range stacks {
if s.Name == c.StackName {
return err
}
}
return fmt.Errorf("bad request: unable to find stack %s", c.StackName)
}
return err
}
fmt.Printf("\nSuccess: created app '%s' with id '%d'\n", r.ApplicationName, r.ID)
return err
}
// AppsDeleteCmd handles deleting apps on Section
type AppsDeleteCmd struct {
AccountID int `required short:"a" help:"ID of account the app belongs to"`
AppID int `required short:"i" help:"ID of the app to delete"`
}
// Run executes the command
func (c *AppsDeleteCmd) Run() (err error) {
s := NewSpinner(fmt.Sprintf("Deleting app with id '%d'", c.AppID))
s.Start()
api.Timeout = 120 * time.Second // this specific request can take a long time
_, err = api.ApplicationDelete(c.AccountID, c.AppID)
s.Stop()
if err != nil {
return err
}
fmt.Printf("\nSuccess: deleted app with id '%d'\n", c.AppID)
return err
}
// AppsInitCmd creates and validates server.conf and package.json to prepare an app for deployment
type AppsInitCmd struct {
StackName string `optional default:"nodejs-basic" short:"s" help:"Name of stack to deploy. Default is nodejs-basic"`
Force bool `optional short:"f" help:"Resets deployment specific files to their default configuration"`
}
func (c *AppsInitCmd) buildServerConf() []byte {
return []byte(
`location / {
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $host;
include /etc/nginx/section.module/node.conf;
}
location ~ "/next-proxy-hop/" {
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $host;
proxy_pass http://next-hop;
}`)
}
// Run executes the command
func (c *AppsInitCmd) Run() (err error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
switch c.StackName {
case "nodejs-basic":
err := c.InitializeNodeBasicApp(stdout, stderr)
if err != nil {
return fmt.Errorf("[ERROR]: init completed with error %x", err)
}
default:
log.Printf("[ERROR]: Stack name %s does not have an initialization defined\n", c.StackName)
}
return err
}
// Create package.json
func (c *AppsInitCmd) CreatePkgJSON(stdout, stderr bytes.Buffer) (err error) {
cmd := exec.Command("npm", "init", "-y")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
return err
}
// InitializeNodeBasicApp initializes a basic node app.
func (c *AppsInitCmd) InitializeNodeBasicApp(stdout, stderr bytes.Buffer) (err error) {
if c.Force {
log.Println("[INFO] Removing old versions of server.conf and package.json")
err1 := os.Remove("package.json")
err2 := os.Remove("server.conf")
if err1 != nil || err2 != nil {
log.Println("[ERROR] unable to remove files, perhaps they do not exist?")
} else {
log.Println("[DEBUG] Files successfully removed")
}
}
log.Println("[DEBUG] Checking to see if server.conf exists")
checkServConf, err := os.Open("server.conf")
if err != nil {
log.Println("[WARN] server.conf does not exist. Creating server.conf")
f, err := os.Create("server.conf")
if err != nil {
return fmt.Errorf("error in creating a file: server.conf %w", err)
}
b := c.buildServerConf()
f.Write(b)
defer f.Close()
} else {
log.Println("[INFO] Validating server.conf")
fileinfo, err := checkServConf.Stat()
if err != nil {
return fmt.Errorf("error in finding stat of server.conf %w", err)
}
buf := make([]byte, fileinfo.Size())
_, err = checkServConf.Read(buf)
if err != nil {
return fmt.Errorf("error in size stat of server.conf %w", err)
}
fStr := string(buf)
if !strings.Contains(fStr, "location / {") {
log.Println("[WARN] default location unspecified. Edit or delete server.conf and rerun this command")
}
}
defer checkServConf.Close()
log.Println("[DEBUG] Checking to see if package.json exists")
checkPkgJSON, err := os.Open("package.json")
if err != nil {
log.Println("[WARN] package.json does not exist. Creating package.json")
err := c.CreatePkgJSON(stdout, stderr)
if err != nil {
return fmt.Errorf("there was an error creating package.json. Is node installed? %w", err)
}
log.Println("[INFO] package.json created")
}
defer checkPkgJSON.Close()
validPkgJSON, err := os.OpenFile("package.json", os.O_RDWR, 0777)
if err != nil {
return fmt.Errorf("failed to open package.json %w", err)
}
defer validPkgJSON.Close()
log.Println("[INFO] Validating package.json")
buf, err := os.ReadFile("package.json")
if err != nil {
return fmt.Errorf("failed to read package.json %w", err)
}
fStr := string(buf)
if len(fStr) == 0 {
err := os.Remove("package.json")
if err != nil {
log.Println("[ERROR] unable to remove empty package.json")
}
log.Println("[WARN] package.json is empty. Creating package.json")
err = c.CreatePkgJSON(stdout, stderr)
if err != nil {
return fmt.Errorf("there was an error creating package.json. Is node installed? %w", err)
}
log.Println("[INFO] package.json created from empty file")
buf, err = os.ReadFile("package.json")
if err != nil {
return fmt.Errorf("failed to read package.json %w", err)
}
fStr = string(buf)
}
jsonMap := make(map[string]interface{})
err = json.Unmarshal(buf, &jsonMap)
if err != nil {
return fmt.Errorf("package.json is not valid JSON %w", err)
}
lv := jsonMap["scripts"]
jsonToStrMap, ok := lv.(map[string]interface{})
if !ok {
return fmt.Errorf("json unable to be read as map[string]interface %w", err)
}
_, ok = jsonToStrMap["start"]
if !ok {
jsonToStrMap["start"] = "node YOUR_SERVER_HERE.js"
jsonMap["scripts"] = jsonToStrMap
err = os.Truncate("package.json", 0)
if err != nil {
return fmt.Errorf("failed to empty package.json %w", err)
}
set, err := json.MarshalIndent(jsonMap, "", " ")
if err != nil {
log.Println("[ERROR] unable to add start script placeholder")
}
_, err = validPkgJSON.Write(set)
if err != nil {
log.Println("[ERROR] unable to add start script placeholder")
}
}
if strings.Contains(fStr, `YOUR_SERVER_HERE.js`) {
log.Println("[ERROR] start script is required. Please edit the placeholder in package.json")
}
return err
}
// AppsStacksCmd lists available stacks to create new apps with
type AppsStacksCmd struct{}
// Run executes the command
func (c *AppsStacksCmd) Run() (err error) {
s := NewSpinner("Looking up stacks")
s.Start()
k, err := api.Stacks()
s.Stop()
if err != nil {
return fmt.Errorf("unable to look up stacks: %w", err)
}
table := NewTable(os.Stdout)
table.SetHeader([]string{"Name", "Label", "Description", "Type"})
for _, s := range k {
r := []string{s.Name, s.Label, s.Description, s.Type}
table.Append(r)
}
table.Render()
return err
}
| ing{p.Name, p.Image}
table.Append(r)
}
table.Render | conditional_block |
apps.go | package commands
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/olekukonko/tablewriter"
"github.com/section/sectionctl/api"
)
// AppsCmd manages apps on Section
type AppsCmd struct {
List AppsListCmd `cmd help:"List apps on Section." default:"1"`
Info AppsInfoCmd `cmd help:"Show detailed app information on Section."`
Create AppsCreateCmd `cmd help:"Create new app on Section."`
Delete AppsDeleteCmd `cmd help:"Delete an existing app on Section."`
Init AppsInitCmd `cmd help:"Initialize your project for deployment."`
Stacks AppsStacksCmd `cmd help:"See the available stacks to create new apps with."`
}
// AppsListCmd handles listing apps running on Section
type AppsListCmd struct {
AccountID int `short:"a" help:"Account ID to find apps under"`
}
// NewTable returns a table with sectionctl standard formatting
func NewTable(out io.Writer) (t *tablewriter.Table) |
// Run executes the command
func (c *AppsListCmd) Run() (err error) {
var aids []int
if c.AccountID == 0 {
s := NewSpinner("Looking up accounts")
s.Start()
as, err := api.Accounts()
if err != nil {
return fmt.Errorf("unable to look up accounts: %w", err)
}
for _, a := range as {
aids = append(aids, a.ID)
}
s.Stop()
} else {
aids = append(aids, c.AccountID)
}
s := NewSpinner("Looking up apps")
s.Start()
apps := make(map[int][]api.App)
for _, id := range aids {
as, err := api.Applications(id)
if err != nil {
return fmt.Errorf("unable to look up applications: %w", err)
}
apps[id] = as
}
s.Stop()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Account ID", "App ID", "App Name"})
for id, as := range apps {
for _, a := range as {
r := []string{strconv.Itoa(id), strconv.Itoa(a.ID), a.ApplicationName}
table.Append(r)
}
}
table.Render()
return err
}
// AppsInfoCmd shows detailed information on an app running on Section
type AppsInfoCmd struct {
AccountID int `required short:"a"`
AppID int `required short:"i"`
}
// Run executes the command
func (c *AppsInfoCmd) Run() (err error) {
s := NewSpinner("Looking up app info")
s.Start()
app, err := api.Application(c.AccountID, c.AppID)
s.Stop()
if err != nil {
return err
}
fmt.Printf("🌎🌏🌍\n")
fmt.Printf("App Name: %s\n", app.ApplicationName)
fmt.Printf("App ID: %d\n", app.ID)
fmt.Printf("Environment count: %d\n", len(app.Environments))
for i, env := range app.Environments {
fmt.Printf("\n-----------------\n\n")
fmt.Printf("Environment #%d: %s (ID:%d)\n\n", i+1, env.EnvironmentName, env.ID)
fmt.Printf("💬 Domains (%d total)\n", len(env.Domains))
for _, dom := range env.Domains {
fmt.Println()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Attribute", "Value"})
table.SetAutoMergeCells(true)
r := [][]string{
[]string{"Domain name", dom.Name},
[]string{"Zone name", dom.ZoneName},
[]string{"CNAME", dom.CNAME},
[]string{"Mode", dom.Mode},
}
table.AppendBulk(r)
table.Render()
}
fmt.Println()
mod := "modules"
if len(env.Stack) == 1 {
mod = "module"
}
fmt.Printf("🥞 Stack (%d %s total)\n", len(env.Stack), mod)
fmt.Println()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Name", "Image"})
table.SetAutoMergeCells(true)
for _, p := range env.Stack {
r := []string{p.Name, p.Image}
table.Append(r)
}
table.Render()
}
fmt.Println()
return err
}
// AppsCreateCmd handles creating apps on Section
type AppsCreateCmd struct {
AccountID int `required short:"a" help:"ID of account to create the app under"`
Hostname string `required short:"d" help:"FQDN the app can be accessed at"`
Origin string `required short:"o" help:"URL to fetch the origin"`
StackName string `required short:"s" help:"Name of stack to deploy"`
}
// Run executes the command
func (c *AppsCreateCmd) Run() (err error) {
s := NewSpinner(fmt.Sprintf("Creating new app %s", c.Hostname))
s.Start()
api.Timeout = 120 * time.Second // this specific request can take a long time
r, err := api.ApplicationCreate(c.AccountID, c.Hostname, c.Origin, c.StackName)
s.Stop()
if err != nil {
if err == api.ErrStatusForbidden {
stacks, herr := api.Stacks()
if herr != nil {
return fmt.Errorf("unable to query stacks: %w", herr)
}
for _, s := range stacks {
if s.Name == c.StackName {
return err
}
}
return fmt.Errorf("bad request: unable to find stack %s", c.StackName)
}
return err
}
fmt.Printf("\nSuccess: created app '%s' with id '%d'\n", r.ApplicationName, r.ID)
return err
}
// AppsDeleteCmd handles deleting apps on Section
type AppsDeleteCmd struct {
AccountID int `required short:"a" help:"ID of account the app belongs to"`
AppID int `required short:"i" help:"ID of the app to delete"`
}
// Run executes the command
func (c *AppsDeleteCmd) Run() (err error) {
s := NewSpinner(fmt.Sprintf("Deleting app with id '%d'", c.AppID))
s.Start()
api.Timeout = 120 * time.Second // this specific request can take a long time
_, err = api.ApplicationDelete(c.AccountID, c.AppID)
s.Stop()
if err != nil {
return err
}
fmt.Printf("\nSuccess: deleted app with id '%d'\n", c.AppID)
return err
}
// AppsInitCmd creates and validates server.conf and package.json to prepare an app for deployment
type AppsInitCmd struct {
StackName string `optional default:"nodejs-basic" short:"s" help:"Name of stack to deploy. Default is nodejs-basic"`
Force bool `optional short:"f" help:"Resets deployment specific files to their default configuration"`
}
func (c *AppsInitCmd) buildServerConf() []byte {
return []byte(
`location / {
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $host;
include /etc/nginx/section.module/node.conf;
}
location ~ "/next-proxy-hop/" {
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $host;
proxy_pass http://next-hop;
}`)
}
// Run executes the command
func (c *AppsInitCmd) Run() (err error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
switch c.StackName {
case "nodejs-basic":
err := c.InitializeNodeBasicApp(stdout, stderr)
if err != nil {
return fmt.Errorf("[ERROR]: init completed with error %x", err)
}
default:
log.Printf("[ERROR]: Stack name %s does not have an initialization defined\n", c.StackName)
}
return err
}
// Create package.json
func (c *AppsInitCmd) CreatePkgJSON(stdout, stderr bytes.Buffer) (err error) {
cmd := exec.Command("npm", "init", "-y")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
return err
}
// InitializeNodeBasicApp initializes a basic node app.
func (c *AppsInitCmd) InitializeNodeBasicApp(stdout, stderr bytes.Buffer) (err error) {
if c.Force {
log.Println("[INFO] Removing old versions of server.conf and package.json")
err1 := os.Remove("package.json")
err2 := os.Remove("server.conf")
if err1 != nil || err2 != nil {
log.Println("[ERROR] unable to remove files, perhaps they do not exist?")
} else {
log.Println("[DEBUG] Files successfully removed")
}
}
log.Println("[DEBUG] Checking to see if server.conf exists")
checkServConf, err := os.Open("server.conf")
if err != nil {
log.Println("[WARN] server.conf does not exist. Creating server.conf")
f, err := os.Create("server.conf")
if err != nil {
return fmt.Errorf("error in creating a file: server.conf %w", err)
}
b := c.buildServerConf()
f.Write(b)
defer f.Close()
} else {
log.Println("[INFO] Validating server.conf")
fileinfo, err := checkServConf.Stat()
if err != nil {
return fmt.Errorf("error in finding stat of server.conf %w", err)
}
buf := make([]byte, fileinfo.Size())
_, err = checkServConf.Read(buf)
if err != nil {
return fmt.Errorf("error in size stat of server.conf %w", err)
}
fStr := string(buf)
if !strings.Contains(fStr, "location / {") {
log.Println("[WARN] default location unspecified. Edit or delete server.conf and rerun this command")
}
}
defer checkServConf.Close()
log.Println("[DEBUG] Checking to see if package.json exists")
checkPkgJSON, err := os.Open("package.json")
if err != nil {
log.Println("[WARN] package.json does not exist. Creating package.json")
err := c.CreatePkgJSON(stdout, stderr)
if err != nil {
return fmt.Errorf("there was an error creating package.json. Is node installed? %w", err)
}
log.Println("[INFO] package.json created")
}
defer checkPkgJSON.Close()
validPkgJSON, err := os.OpenFile("package.json", os.O_RDWR, 0777)
if err != nil {
return fmt.Errorf("failed to open package.json %w", err)
}
defer validPkgJSON.Close()
log.Println("[INFO] Validating package.json")
buf, err := os.ReadFile("package.json")
if err != nil {
return fmt.Errorf("failed to read package.json %w", err)
}
fStr := string(buf)
if len(fStr) == 0 {
err := os.Remove("package.json")
if err != nil {
log.Println("[ERROR] unable to remove empty package.json")
}
log.Println("[WARN] package.json is empty. Creating package.json")
err = c.CreatePkgJSON(stdout, stderr)
if err != nil {
return fmt.Errorf("there was an error creating package.json. Is node installed? %w", err)
}
log.Println("[INFO] package.json created from empty file")
buf, err = os.ReadFile("package.json")
if err != nil {
return fmt.Errorf("failed to read package.json %w", err)
}
fStr = string(buf)
}
jsonMap := make(map[string]interface{})
err = json.Unmarshal(buf, &jsonMap)
if err != nil {
return fmt.Errorf("package.json is not valid JSON %w", err)
}
lv := jsonMap["scripts"]
jsonToStrMap, ok := lv.(map[string]interface{})
if !ok {
return fmt.Errorf("json unable to be read as map[string]interface %w", err)
}
_, ok = jsonToStrMap["start"]
if !ok {
jsonToStrMap["start"] = "node YOUR_SERVER_HERE.js"
jsonMap["scripts"] = jsonToStrMap
err = os.Truncate("package.json", 0)
if err != nil {
return fmt.Errorf("failed to empty package.json %w", err)
}
set, err := json.MarshalIndent(jsonMap, "", " ")
if err != nil {
log.Println("[ERROR] unable to add start script placeholder")
}
_, err = validPkgJSON.Write(set)
if err != nil {
log.Println("[ERROR] unable to add start script placeholder")
}
}
if strings.Contains(fStr, `YOUR_SERVER_HERE.js`) {
log.Println("[ERROR] start script is required. Please edit the placeholder in package.json")
}
return err
}
// AppsStacksCmd lists available stacks to create new apps with
type AppsStacksCmd struct{}
// Run executes the command
func (c *AppsStacksCmd) Run() (err error) {
s := NewSpinner("Looking up stacks")
s.Start()
k, err := api.Stacks()
s.Stop()
if err != nil {
return fmt.Errorf("unable to look up stacks: %w", err)
}
table := NewTable(os.Stdout)
table.SetHeader([]string{"Name", "Label", "Description", "Type"})
for _, s := range k {
r := []string{s.Name, s.Label, s.Description, s.Type}
table.Append(r)
}
table.Render()
return err
}
| {
t = tablewriter.NewWriter(out)
t.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
t.SetCenterSeparator("|")
t.SetAlignment(tablewriter.ALIGN_LEFT)
return t
} | identifier_body |
apps.go | package commands
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/olekukonko/tablewriter"
"github.com/section/sectionctl/api"
)
// AppsCmd manages apps on Section
type AppsCmd struct {
List AppsListCmd `cmd help:"List apps on Section." default:"1"`
Info AppsInfoCmd `cmd help:"Show detailed app information on Section."`
Create AppsCreateCmd `cmd help:"Create new app on Section."`
Delete AppsDeleteCmd `cmd help:"Delete an existing app on Section."`
Init AppsInitCmd `cmd help:"Initialize your project for deployment."`
Stacks AppsStacksCmd `cmd help:"See the available stacks to create new apps with."`
}
// AppsListCmd handles listing apps running on Section
type AppsListCmd struct {
AccountID int `short:"a" help:"Account ID to find apps under"`
}
// NewTable returns a table with sectionctl standard formatting
func NewTable(out io.Writer) (t *tablewriter.Table) {
t = tablewriter.NewWriter(out)
t.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
t.SetCenterSeparator("|")
t.SetAlignment(tablewriter.ALIGN_LEFT)
return t
}
// Run executes the command
func (c *AppsListCmd) Run() (err error) {
var aids []int
if c.AccountID == 0 {
s := NewSpinner("Looking up accounts")
s.Start()
as, err := api.Accounts()
if err != nil {
return fmt.Errorf("unable to look up accounts: %w", err)
}
for _, a := range as {
aids = append(aids, a.ID)
}
s.Stop()
} else {
aids = append(aids, c.AccountID)
}
s := NewSpinner("Looking up apps")
s.Start()
apps := make(map[int][]api.App)
for _, id := range aids {
as, err := api.Applications(id)
if err != nil {
return fmt.Errorf("unable to look up applications: %w", err)
}
apps[id] = as
}
s.Stop()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Account ID", "App ID", "App Name"})
for id, as := range apps {
for _, a := range as {
r := []string{strconv.Itoa(id), strconv.Itoa(a.ID), a.ApplicationName}
table.Append(r)
}
}
table.Render()
return err
}
// AppsInfoCmd shows detailed information on an app running on Section
type AppsInfoCmd struct {
AccountID int `required short:"a"`
AppID int `required short:"i"`
}
// Run executes the command
func (c *AppsInfoCmd) Run() (err error) {
s := NewSpinner("Looking up app info")
s.Start()
app, err := api.Application(c.AccountID, c.AppID)
s.Stop()
if err != nil {
return err
}
fmt.Printf("🌎🌏🌍\n")
fmt.Printf("App Name: %s\n", app.ApplicationName)
fmt.Printf("App ID: %d\n", app.ID)
fmt.Printf("Environment count: %d\n", len(app.Environments))
for i, env := range app.Environments {
fmt.Printf("\n-----------------\n\n")
fmt.Printf("Environment #%d: %s (ID:%d)\n\n", i+1, env.EnvironmentName, env.ID)
fmt.Printf("💬 Domains (%d total)\n", len(env.Domains))
for _, dom := range env.Domains {
fmt.Println()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Attribute", "Value"})
table.SetAutoMergeCells(true)
r := [][]string{
[]string{"Domain name", dom.Name},
[]string{"Zone name", dom.ZoneName},
[]string{"CNAME", dom.CNAME},
[]string{"Mode", dom.Mode},
}
table.AppendBulk(r)
table.Render()
}
fmt.Println()
mod := "modules"
if len(env.Stack) == 1 {
mod = "module"
}
fmt.Printf("🥞 Stack (%d %s total)\n", len(env.Stack), mod)
fmt.Println()
table := NewTable(os.Stdout)
table.SetHeader([]string{"Name", "Image"})
table.SetAutoMergeCells(true)
for _, p := range env.Stack {
r := []string{p.Name, p.Image}
table.Append(r)
}
table.Render()
}
fmt.Println()
return err
}
// AppsCreateCmd handles creating apps on Section
type AppsCreateCmd struct {
AccountID int `required short:"a" help:"ID of account to create the app under"`
Hostname string `required short:"d" help:"FQDN the app can be accessed at"`
Origin string `required short:"o" help:"URL to fetch the origin"`
StackName string `required short:"s" help:"Name of stack to deploy"`
}
// Run executes the command
func (c *AppsCreateCmd) Run() (err error) {
s := NewSpinner(fmt.Sprintf("Creating new app %s", c.Hostname))
s.Start() | s.Stop()
if err != nil {
if err == api.ErrStatusForbidden {
stacks, herr := api.Stacks()
if herr != nil {
return fmt.Errorf("unable to query stacks: %w", herr)
}
for _, s := range stacks {
if s.Name == c.StackName {
return err
}
}
return fmt.Errorf("bad request: unable to find stack %s", c.StackName)
}
return err
}
fmt.Printf("\nSuccess: created app '%s' with id '%d'\n", r.ApplicationName, r.ID)
return err
}
// AppsDeleteCmd handles deleting apps on Section
type AppsDeleteCmd struct {
AccountID int `required short:"a" help:"ID of account the app belongs to"`
AppID int `required short:"i" help:"ID of the app to delete"`
}
// Run executes the command
func (c *AppsDeleteCmd) Run() (err error) {
s := NewSpinner(fmt.Sprintf("Deleting app with id '%d'", c.AppID))
s.Start()
api.Timeout = 120 * time.Second // this specific request can take a long time
_, err = api.ApplicationDelete(c.AccountID, c.AppID)
s.Stop()
if err != nil {
return err
}
fmt.Printf("\nSuccess: deleted app with id '%d'\n", c.AppID)
return err
}
// AppsInitCmd creates and validates server.conf and package.json to prepare an app for deployment
type AppsInitCmd struct {
StackName string `optional default:"nodejs-basic" short:"s" help:"Name of stack to deploy. Default is nodejs-basic"`
Force bool `optional short:"f" help:"Resets deployment specific files to their default configuration"`
}
func (c *AppsInitCmd) buildServerConf() []byte {
return []byte(
`location / {
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $host;
include /etc/nginx/section.module/node.conf;
}
location ~ "/next-proxy-hop/" {
proxy_set_header X-Forwarded-For $http_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $host;
proxy_pass http://next-hop;
}`)
}
// Run executes the command
func (c *AppsInitCmd) Run() (err error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
switch c.StackName {
case "nodejs-basic":
err := c.InitializeNodeBasicApp(stdout, stderr)
if err != nil {
return fmt.Errorf("[ERROR]: init completed with error %x", err)
}
default:
log.Printf("[ERROR]: Stack name %s does not have an initialization defined\n", c.StackName)
}
return err
}
// Create package.json
func (c *AppsInitCmd) CreatePkgJSON(stdout, stderr bytes.Buffer) (err error) {
cmd := exec.Command("npm", "init", "-y")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
return err
}
// InitializeNodeBasicApp initializes a basic node app.
func (c *AppsInitCmd) InitializeNodeBasicApp(stdout, stderr bytes.Buffer) (err error) {
if c.Force {
log.Println("[INFO] Removing old versions of server.conf and package.json")
err1 := os.Remove("package.json")
err2 := os.Remove("server.conf")
if err1 != nil || err2 != nil {
log.Println("[ERROR] unable to remove files, perhaps they do not exist?")
} else {
log.Println("[DEBUG] Files successfully removed")
}
}
log.Println("[DEBUG] Checking to see if server.conf exists")
checkServConf, err := os.Open("server.conf")
if err != nil {
log.Println("[WARN] server.conf does not exist. Creating server.conf")
f, err := os.Create("server.conf")
if err != nil {
return fmt.Errorf("error in creating a file: server.conf %w", err)
}
b := c.buildServerConf()
f.Write(b)
defer f.Close()
} else {
log.Println("[INFO] Validating server.conf")
fileinfo, err := checkServConf.Stat()
if err != nil {
return fmt.Errorf("error in finding stat of server.conf %w", err)
}
buf := make([]byte, fileinfo.Size())
_, err = checkServConf.Read(buf)
if err != nil {
return fmt.Errorf("error in size stat of server.conf %w", err)
}
fStr := string(buf)
if !strings.Contains(fStr, "location / {") {
log.Println("[WARN] default location unspecified. Edit or delete server.conf and rerun this command")
}
}
defer checkServConf.Close()
log.Println("[DEBUG] Checking to see if package.json exists")
checkPkgJSON, err := os.Open("package.json")
if err != nil {
log.Println("[WARN] package.json does not exist. Creating package.json")
err := c.CreatePkgJSON(stdout, stderr)
if err != nil {
return fmt.Errorf("there was an error creating package.json. Is node installed? %w", err)
}
log.Println("[INFO] package.json created")
}
defer checkPkgJSON.Close()
validPkgJSON, err := os.OpenFile("package.json", os.O_RDWR, 0777)
if err != nil {
return fmt.Errorf("failed to open package.json %w", err)
}
defer validPkgJSON.Close()
log.Println("[INFO] Validating package.json")
buf, err := os.ReadFile("package.json")
if err != nil {
return fmt.Errorf("failed to read package.json %w", err)
}
fStr := string(buf)
if len(fStr) == 0 {
err := os.Remove("package.json")
if err != nil {
log.Println("[ERROR] unable to remove empty package.json")
}
log.Println("[WARN] package.json is empty. Creating package.json")
err = c.CreatePkgJSON(stdout, stderr)
if err != nil {
return fmt.Errorf("there was an error creating package.json. Is node installed? %w", err)
}
log.Println("[INFO] package.json created from empty file")
buf, err = os.ReadFile("package.json")
if err != nil {
return fmt.Errorf("failed to read package.json %w", err)
}
fStr = string(buf)
}
jsonMap := make(map[string]interface{})
err = json.Unmarshal(buf, &jsonMap)
if err != nil {
return fmt.Errorf("package.json is not valid JSON %w", err)
}
lv := jsonMap["scripts"]
jsonToStrMap, ok := lv.(map[string]interface{})
if !ok {
return fmt.Errorf("json unable to be read as map[string]interface %w", err)
}
_, ok = jsonToStrMap["start"]
if !ok {
jsonToStrMap["start"] = "node YOUR_SERVER_HERE.js"
jsonMap["scripts"] = jsonToStrMap
err = os.Truncate("package.json", 0)
if err != nil {
return fmt.Errorf("failed to empty package.json %w", err)
}
set, err := json.MarshalIndent(jsonMap, "", " ")
if err != nil {
log.Println("[ERROR] unable to add start script placeholder")
}
_, err = validPkgJSON.Write(set)
if err != nil {
log.Println("[ERROR] unable to add start script placeholder")
}
}
if strings.Contains(fStr, `YOUR_SERVER_HERE.js`) {
log.Println("[ERROR] start script is required. Please edit the placeholder in package.json")
}
return err
}
// AppsStacksCmd lists available stacks to create new apps with
type AppsStacksCmd struct{}
// Run executes the command
func (c *AppsStacksCmd) Run() (err error) {
s := NewSpinner("Looking up stacks")
s.Start()
k, err := api.Stacks()
s.Stop()
if err != nil {
return fmt.Errorf("unable to look up stacks: %w", err)
}
table := NewTable(os.Stdout)
table.SetHeader([]string{"Name", "Label", "Description", "Type"})
for _, s := range k {
r := []string{s.Name, s.Label, s.Description, s.Type}
table.Append(r)
}
table.Render()
return err
} |
api.Timeout = 120 * time.Second // this specific request can take a long time
r, err := api.ApplicationCreate(c.AccountID, c.Hostname, c.Origin, c.StackName) | random_line_split |
Week_05.py | # Functions
"""
Functions give us the ability to make our programs much more powerful and clean
while also saving us time. We use functions is because of the ability to write once and call repeatedly.
Overview
• How to use functions and what they are
• Passing data around using parameters
• Returning data from functions
• Understanding scope and its importance
• Creating a shopping cart program
"""
# Creating and Calling Functions
"""
A function is a block of code which only runs when it is called.
You can pass data, known as parameters, into a function.
A function can return data as a result.
"""
# writing a function
def printInfo(): # defines what the function does when called
print("Name: John Smith") # calls the function to run
print("Age: 45") # calls the function again
printInfo()
printInfo()
# Function Stages
"""
There are two stages: function definition and function call.
Function definition is where you define the function name, any parameters it's supposed to accept,
and what it's supposed to do in the block of code associated with it.
The second stage is known as the function call. Functions will never run until called,
so you can define as many functions as you’d like, but if you never call one of them, then
nothing will happen. When you call a function, it will run the block of code within the
definition
"""
# UDF vs. Built-in
"""
Built-in functions are included in Python to serve a specific purpose to help build applications.
UDFs are user-defined functions.
"""
# performing a calculation in a function
def calc():
x, y = 5, 10
print(x+y)
calc() # will run the block of code within calc and output 15
# Exercises
"""
1. Print Name: Define a function called myName, and have it print out your name
when called.
"""
# solution
def myName():
name = "Nick"
print(name)
myName()
"""
2. Pizza Toppings: Define a function that prints out all your favorite pizza toppings
called pizzaToppings. Call the function three times.
"""
# solution
def pizzaToppings():
topping1, topping2, topping3 = "Cheese,", "Pepperoni,", "Chicken"
print(topping1, topping2, topping3)
pizzaToppings()
pizzaToppings()
pizzaToppings()
# Parameters
"""
Parameters are temporary variables declared on the function definition. To call a function with
different values, you need to use parameters. This is an arbitrary variable name that you use to reference the value within the function block; however, you usually want it to
be relevent to the data that you're working with. When calling the function, you would pass in the necessary value to run the block of code with.
"""
# Passing a Single Parameter
def printName(full_name):
print("Your name is: {}".format(full_name))
printName("John Smith")
printName("Amanda")
# Multiple parameters
# passing multiple parameters into a function
def addNums(num1, num2):
result = num1+num2
print("{} + {} = {}".format(num1, num2, result))
addNums(5, 8)
addNums(3.5, 5.5)
# passing a list
# using a function to square all information
numbers1 = [2, 4, 5, 10]
numbers2 = [1, 3, 6]
# def squares(nums):
# for num in nums:
# print(nums**2)
# squares(numbers1)
# squares(numbers2)
# Default Parameters
"""
A parameter can be associated with a default value. Take the value of pi for instance, it will always be 3.14,
so we can set a parameter called pi to that exact value to allow us to call the function with an already defined value for pi.
"""
# setting default parameter values
def calcArea(r, pi=3.14):
area = pi*(r**2)
print("Area: {}".format(area))
calcArea(2) # assuming radius is the value of 2
"""
Note: Default parameters must always go after non-default parameters.
"""
# Making Parameters Optional
"""
Sometimes you need to make functions that take optional arguments. The best example
is always middle names; some people have them, and some don’t. If we wanted to write
a function that would print out properly for both situations, we would need to make the
middle name an optional parameter. We do this by assigning an empty string value as
the default:
"""
# setting default parameter values
def printName(first, last, middle=""):
if middle:
print("{} {} {}".format(first, middle, last))
else:
print("{} {}".format(first, last))
printName("John", "Smith")
printName("John", "Smith", "Paul") # will output with middle name
# Keep in mind the order of our parameters! Parameters must line up from left to right according to the function definition.
# Named Parameter Assignment
"""
During the function call, you can explicity assign values into parameter names. This is useful
when you don’t want to mix up the order of values being passed in, as they work from left
to right by default. You can use parameter names to assign values for every parameter if
you choose, but it’s not necessary most of the time. Let’s check out an example:
"""
# explicity assigning values to parameters by referencing the name
def addNums(num1, num2):
print(num2)
print(num1)
addNums(5, num2=2.5)
# *args
"""
The use of *args allows you to pass a variable number of arguments into a function. This
allows you to make functions more modular. The magic isn’t the “args” keyword here
though; it’s really the unary operator ( * ) that allows us to perform this feature. You could
theoretically replace the word args with anyone, like “ *data”, and it would still work.
However, args is the default and general standard throughout the industry.
"""
# using args parameter to take in a tuple of arbitrary value
def outputData(name, *args):
print(type(args))
for arg in args:
print(arg)
outputData("John Smith", 5, True, "Jess")
# **kwargs
"""
Like args, kwargs allows us to take in an arbitrary number of values in a function;
however, it works as a dictionary with keyword arguments instead. Keyword arguments
are values passed in with keys, which allow us to access them easily within the function
block. Again, the magic here is in the two unary operators ( ** ) not the keyword of
kwargs. Let’s check it out:
"""
# using kwargs parameter to take in a dictionary of arbitrary value
def outputData(**kwargs):
print(type(kwargs))
print(kwargs["name"])
print(kwargs["num"])
outputData(name="John Smith", num=5, b=True)
# Exercises
"""
1. User Input: Ask the user to input a word, and pass that word into a function
that checks if the word starts with an uppercase. If it does output “True”,
otherwise “False”.
"""
# solution
def checkCaps(name):
if name[0].isupper() == True:
print("True")
else:
print("Fal | input("Enter your name")
checkCaps(name)
"""
2. No Name: Define a function that takes in two arguments, first_name and last_
name, and makes both optional. If no values are passed into the parameters, it
should output “No name passed in”; otherwise, it should print out the name.
"""
# solution
# def checkName(first_name, last_name):
# print("{} {}".format(first_name, last_name))
# checkName()
# Return Statement - is used to send info back to where the function call occured.
# using return keyword to return the sum of two numbers
def addNums(num1, num2):
return num1+num2
num = addNums(5.5, 4.5) # saves returned value into num
print(num)
print(addNums(10, 10)) # doesn't save returned value
# Ternary Operator
"""
A ternary operator is a shorthand python branching statement.
These operations can be used to assign values into a variable, or in this case, deciding what the return from a function
"""
# shorthand syntax using a ternary operator
def searchList(aList, el):
return True if el in aList else False
result = searchList(["one", 2, "three"], 2) # result = True
print(result)
# Exercises
"""
Full Name: Create a function that takes in a first and last name and returns the
two names joined together.
"""
# solution
def joinNames(firstName, lastName):
return firstName + lastName
firstName, lastName = "Nick ", "Mwangemi"
print(joinNames(firstName, lastName))
"""
2. User Input: Within a function, ask for user input. Have this function return that
input to be stored in a variable outside of the function. Then print out the input.
"""
# solution
def userInput(userInput):
faveSport = input("What's your favourite sport?")
return faveSport
faveSport = userInput("rugby")
print(faveSport)
# Scope -deals with the accessibility of variables declared within a program
# Types: global, function, class
# Global - when you declare a variable to be accessible to an entire file or application
# Function scope is in reference to variables being declared and accessible only withi functions
# Global Scope Access - accessible to the rest of the file
# where global variables can be accessed
# number = 5
# def scopeTest():
# number += 1 # not accessible due to function level scope
# scopeTest()
"""
Note: When passed in, it only passes the value, not the variable
"""
# Handling Function Scope
"""
When dealing with variables declared in a function, you generally won't need to access
it outside of the function. However, in order to access that value, best practice is to return it:
"""
# accessing variables defined in a function
def scopeTest():
word = "function"
return word
value = scopeTest()
print(value)
# In-Place Algorithms
"""
When passing variables into a function, you're simply passing the value of that variable and not the variable
itself. Such that the following will not alter the variable num
"""
num = 5
def changeNum(n):
n += 5
print(num)
changeNum(num)
# Exercises
"""
1. Names: Create a function that will change the list passed in with a parameter
of name at a given index. Such that if I were to pass in “Bill” and index 1,
it would change “Rich” to “Bill.” Use the list and function definition in the
following:
>>> names = ['Bob', 'Rich', 'Amanda']
>>> def changeValue(aList, name, index):
"""
# solution
# names = ['Bob', 'Rich', 'Amanda']
# def changeValue(aList, name, index):
# for name in names:
# names.insert(1, "Bill")
# return names
# print(changeValue(names, "Bill", 1))
| se")
name = | conditional_block |
Week_05.py | # Functions
"""
Functions give us the ability to make our programs much more powerful and clean
while also saving us time. We use functions is because of the ability to write once and call repeatedly.
Overview
• How to use functions and what they are
• Passing data around using parameters
• Returning data from functions
• Understanding scope and its importance
• Creating a shopping cart program
"""
# Creating and Calling Functions
"""
A function is a block of code which only runs when it is called.
You can pass data, known as parameters, into a function.
A function can return data as a result.
"""
# writing a function
def printInfo(): # defines what the function does when called
print("Name: John Smith") # calls the function to run
print("Age: 45") # calls the function again
printInfo()
printInfo()
# Function Stages
"""
There are two stages: function definition and function call.
Function definition is where you define the function name, any parameters it's supposed to accept,
and what it's supposed to do in the block of code associated with it.
The second stage is known as the function call. Functions will never run until called,
so you can define as many functions as you’d like, but if you never call one of them, then
nothing will happen. When you call a function, it will run the block of code within the
definition
"""
# UDF vs. Built-in
"""
Built-in functions are included in Python to serve a specific purpose to help build applications.
UDFs are user-defined functions.
"""
# performing a calculation in a function
def calc():
x, y = 5, 10
print(x+y)
calc() # will run the block of code within calc and output 15
# Exercises
"""
1. Print Name: Define a function called myName, and have it print out your name
when called.
"""
# solution
def myName():
name = "Nick"
print(name)
myName()
"""
2. Pizza Toppings: Define a function that prints out all your favorite pizza toppings
called pizzaToppings. Call the function three times.
"""
# solution
def pizzaToppings():
topping1, topping2, topping3 = "Cheese,", "Pepperoni,", "Chicken"
print(topping1, topping2, topping3)
pizzaToppings()
pizzaToppings()
pizzaToppings()
# Parameters
"""
Parameters are temporary variables declared on the function definition. To call a function with
different values, you need to use parameters. This is an arbitrary variable name that you use to reference the value within the function block; however, you usually want it to
be relevent to the data that you're working with. When calling the function, you would pass in the necessary value to run the block of code with.
"""
# Passing a Single Parameter
def printName(full_name):
print("Your name is: {}".format(full_name))
printName("John Smith")
printName("Amanda")
# Multiple parameters
# passing multiple parameters into a function
def addNums(num1, num2):
result = num1+num2
print("{} + {} = {}".format(num1, num2, result))
addNums(5, 8)
addNums(3.5, 5.5)
# passing a list
# using a function to square all information
numbers1 = [2, 4, 5, 10]
numbers2 = [1, 3, 6]
# def squares(nums):
# for num in nums:
# print(nums**2)
# squares(numbers1)
# squares(numbers2)
# Default Parameters
"""
A parameter can be associated with a default value. Take the value of pi for instance, it will always be 3.14,
so we can set a parameter called pi to that exact value to allow us to call the function with an already defined value for pi.
"""
# setting default parameter values
def calcArea(r, pi=3.14):
area = pi*(r**2)
print("Area: {}".format(area))
calcArea(2) # assuming radius is the value of 2
"""
Note: Default parameters must always go after non-default parameters.
"""
# Making Parameters Optional
"""
Sometimes you need to make functions that take optional arguments. The best example
is always middle names; some people have them, and some don’t. If we wanted to write
a function that would print out properly for both situations, we would need to make the
middle name an optional parameter. We do this by assigning an empty string value as
the default:
"""
# setting default parameter values
def printName(first, last, middle=""):
if middle:
print("{} {} {}".format(first, middle, last))
else:
print("{} {}".format(first, last))
printName("John", "Smith")
printName("John", "Smith", "Paul") # will output with middle name
# Keep in mind the order of our parameters! Parameters must line up from left to right according to the function definition.
# Named Parameter Assignment
"""
During the function call, you can explicity assign values into parameter names. This is useful
when you don’t want to mix up the order of values being passed in, as they work from left
to right by default. You can use parameter names to assign values for every parameter if
you choose, but it’s not necessary most of the time. Let’s check out an example:
"""
# explicity assigning values to parameters by referencing the name
def addNums(num1, num2):
print(num2)
prin | .5)
# *args
"""
The use of *args allows you to pass a variable number of arguments into a function. This
allows you to make functions more modular. The magic isn’t the “args” keyword here
though; it’s really the unary operator ( * ) that allows us to perform this feature. You could
theoretically replace the word args with anyone, like “ *data”, and it would still work.
However, args is the default and general standard throughout the industry.
"""
# using args parameter to take in a tuple of arbitrary value
def outputData(name, *args):
print(type(args))
for arg in args:
print(arg)
outputData("John Smith", 5, True, "Jess")
# **kwargs
"""
Like args, kwargs allows us to take in an arbitrary number of values in a function;
however, it works as a dictionary with keyword arguments instead. Keyword arguments
are values passed in with keys, which allow us to access them easily within the function
block. Again, the magic here is in the two unary operators ( ** ) not the keyword of
kwargs. Let’s check it out:
"""
# using kwargs parameter to take in a dictionary of arbitrary value
def outputData(**kwargs):
print(type(kwargs))
print(kwargs["name"])
print(kwargs["num"])
outputData(name="John Smith", num=5, b=True)
# Exercises
"""
1. User Input: Ask the user to input a word, and pass that word into a function
that checks if the word starts with an uppercase. If it does output “True”,
otherwise “False”.
"""
# solution
def checkCaps(name):
if name[0].isupper() == True:
print("True")
else:
print("False")
name = input("Enter your name")
checkCaps(name)
"""
2. No Name: Define a function that takes in two arguments, first_name and last_
name, and makes both optional. If no values are passed into the parameters, it
should output “No name passed in”; otherwise, it should print out the name.
"""
# solution
# def checkName(first_name, last_name):
# print("{} {}".format(first_name, last_name))
# checkName()
# Return Statement - is used to send info back to where the function call occured.
# using return keyword to return the sum of two numbers
def addNums(num1, num2):
return num1+num2
num = addNums(5.5, 4.5) # saves returned value into num
print(num)
print(addNums(10, 10)) # doesn't save returned value
# Ternary Operator
"""
A ternary operator is a shorthand python branching statement.
These operations can be used to assign values into a variable, or in this case, deciding what the return from a function
"""
# shorthand syntax using a ternary operator
def searchList(aList, el):
return True if el in aList else False
result = searchList(["one", 2, "three"], 2) # result = True
print(result)
# Exercises
"""
Full Name: Create a function that takes in a first and last name and returns the
two names joined together.
"""
# solution
def joinNames(firstName, lastName):
return firstName + lastName
firstName, lastName = "Nick ", "Mwangemi"
print(joinNames(firstName, lastName))
"""
2. User Input: Within a function, ask for user input. Have this function return that
input to be stored in a variable outside of the function. Then print out the input.
"""
# solution
def userInput(userInput):
faveSport = input("What's your favourite sport?")
return faveSport
faveSport = userInput("rugby")
print(faveSport)
# Scope -deals with the accessibility of variables declared within a program
# Types: global, function, class
# Global - when you declare a variable to be accessible to an entire file or application
# Function scope is in reference to variables being declared and accessible only withi functions
# Global Scope Access - accessible to the rest of the file
# where global variables can be accessed
# number = 5
# def scopeTest():
# number += 1 # not accessible due to function level scope
# scopeTest()
"""
Note: When passed in, it only passes the value, not the variable
"""
# Handling Function Scope
"""
When dealing with variables declared in a function, you generally won't need to access
it outside of the function. However, in order to access that value, best practice is to return it:
"""
# accessing variables defined in a function
def scopeTest():
word = "function"
return word
value = scopeTest()
print(value)
# In-Place Algorithms
"""
When passing variables into a function, you're simply passing the value of that variable and not the variable
itself. Such that the following will not alter the variable num
"""
num = 5
def changeNum(n):
n += 5
print(num)
changeNum(num)
# Exercises
"""
1. Names: Create a function that will change the list passed in with a parameter
of name at a given index. Such that if I were to pass in “Bill” and index 1,
it would change “Rich” to “Bill.” Use the list and function definition in the
following:
>>> names = ['Bob', 'Rich', 'Amanda']
>>> def changeValue(aList, name, index):
"""
# solution
# names = ['Bob', 'Rich', 'Amanda']
# def changeValue(aList, name, index):
# for name in names:
# names.insert(1, "Bill")
# return names
# print(changeValue(names, "Bill", 1))
| t(num1)
addNums(5, num2=2 | identifier_body |
Week_05.py | # Functions
"""
Functions give us the ability to make our programs much more powerful and clean
while also saving us time. We use functions is because of the ability to write once and call repeatedly.
Overview
• How to use functions and what they are
• Passing data around using parameters
• Returning data from functions
• Understanding scope and its importance
• Creating a shopping cart program
"""
# Creating and Calling Functions
"""
A function is a block of code which only runs when it is called.
You can pass data, known as parameters, into a function.
A function can return data as a result.
"""
# writing a function
def printInfo(): # defines what the function does when called
print("Name: John Smith") # calls the function to run
print("Age: 45") # calls the function again
printInfo()
printInfo()
# Function Stages
"""
There are two stages: function definition and function call.
Function definition is where you define the function name, any parameters it's supposed to accept,
and what it's supposed to do in the block of code associated with it.
The second stage is known as the function call. Functions will never run until called,
so you can define as many functions as you’d like, but if you never call one of them, then
nothing will happen. When you call a function, it will run the block of code within the
definition
"""
# UDF vs. Built-in
"""
Built-in functions are included in Python to serve a specific purpose to help build applications.
UDFs are user-defined functions.
"""
# performing a calculation in a function
def calc():
x, y = 5, 10
print(x+y)
calc() # will run the block of code within calc and output 15
# Exercises
"""
1. Print Name: Define a function called myName, and have it print out your name
when called.
"""
# solution
def myName():
name = "Nick"
print(name)
myName()
"""
2. Pizza Toppings: Define a function that prints out all your favorite pizza toppings
called pizzaToppings. Call the function three times.
"""
# solution
def pizzaToppings():
topping1, topping2, topping3 = "Cheese,", "Pepperoni,", "Chicken"
print(topping1, topping2, topping3)
pizzaToppings()
pizzaToppings()
pizzaToppings()
# Parameters
"""
Parameters are temporary variables declared on the function definition. To call a function with
different values, you need to use parameters. This is an arbitrary variable name that you use to reference the value within the function block; however, you usually want it to
be relevent to the data that you're working with. When calling the function, you would pass in the necessary value to run the block of code with.
"""
# Passing a Single Parameter
def printName(full_name):
print("Your name is: {}".format(full_name))
printName("John Smith")
printName("Amanda")
# Multiple parameters
# passing multiple parameters into a function
def addNums(num1, num2):
result = num1+num2
print("{} + {} = {}".format(num1, num2, result))
addNums(5, 8)
addNums(3.5, 5.5)
# passing a list
# using a function to square all information
numbers1 = [2, 4, 5, 10]
numbers2 = [1, 3, 6]
# def squares(nums):
# for num in nums:
# print(nums**2)
# squares(numbers1)
# squares(numbers2)
# Default Parameters
"""
A parameter can be associated with a default value. Take the value of pi for instance, it will always be 3.14,
so we can set a parameter called pi to that exact value to allow us to call the function with an already defined value for pi.
"""
# setting default parameter values
def calcArea(r, pi=3.14):
area = pi*(r**2)
print("Area: {}".format(area))
calcArea(2) # assuming radius is the value of 2
"""
Note: Default parameters must always go after non-default parameters.
"""
# Making Parameters Optional
"""
Sometimes you need to make functions that take optional arguments. The best example
is always middle names; some people have them, and some don’t. If we wanted to write
a function that would print out properly for both situations, we would need to make the
middle name an optional parameter. We do this by assigning an empty string value as
the default:
"""
# setting default parameter values
def printName(first, last, middle=""):
if middle:
print("{} {} {}".format(first, middle, last))
else:
print("{} {}".format(first, last))
printName("John", "Smith")
printName("John", "Smith", "Paul") # will output with middle name
# Keep in mind the order of our parameters! Parameters must line up from left to right according to the function definition.
# Named Parameter Assignment
"""
During the function call, you can explicity assign values into parameter names. This is useful
when you don’t want to mix up the order of values being passed in, as they work from left
to right by default. You can use parameter names to assign values for every parameter if
you choose, but it’s not necessary most of the time. Let’s check out an example:
"""
# explicity assigning values to parameters by referencing the name
def addNums(num1, num2):
print(num2)
print(num1)
addNums(5, num2=2.5)
# *args
"""
The use of *args allows you to pass a variable number of arguments into a function. This
allows you to make functions more modular. The magic isn’t the “args” keyword here
though; it’s really the unary operator ( * ) that allows us to perform this feature. You could
theoretically replace the word args with anyone, like “ *data”, and it would still work.
However, args is the default and general standard throughout the industry.
"""
# using args parameter to take in a tuple of arbitrary value
|
outputData("John Smith", 5, True, "Jess")
# **kwargs
"""
Like args, kwargs allows us to take in an arbitrary number of values in a function;
however, it works as a dictionary with keyword arguments instead. Keyword arguments
are values passed in with keys, which allow us to access them easily within the function
block. Again, the magic here is in the two unary operators ( ** ) not the keyword of
kwargs. Let’s check it out:
"""
# using kwargs parameter to take in a dictionary of arbitrary value
def outputData(**kwargs):
print(type(kwargs))
print(kwargs["name"])
print(kwargs["num"])
outputData(name="John Smith", num=5, b=True)
# Exercises
"""
1. User Input: Ask the user to input a word, and pass that word into a function
that checks if the word starts with an uppercase. If it does output “True”,
otherwise “False”.
"""
# solution
def checkCaps(name):
if name[0].isupper() == True:
print("True")
else:
print("False")
name = input("Enter your name")
checkCaps(name)
"""
2. No Name: Define a function that takes in two arguments, first_name and last_
name, and makes both optional. If no values are passed into the parameters, it
should output “No name passed in”; otherwise, it should print out the name.
"""
# solution
# def checkName(first_name, last_name):
# print("{} {}".format(first_name, last_name))
# checkName()
# Return Statement - is used to send info back to where the function call occured.
# using return keyword to return the sum of two numbers
def addNums(num1, num2):
return num1+num2
num = addNums(5.5, 4.5) # saves returned value into num
print(num)
print(addNums(10, 10)) # doesn't save returned value
# Ternary Operator
"""
A ternary operator is a shorthand python branching statement.
These operations can be used to assign values into a variable, or in this case, deciding what the return from a function
"""
# shorthand syntax using a ternary operator
def searchList(aList, el):
return True if el in aList else False
result = searchList(["one", 2, "three"], 2) # result = True
print(result)
# Exercises
"""
Full Name: Create a function that takes in a first and last name and returns the
two names joined together.
"""
# solution
def joinNames(firstName, lastName):
return firstName + lastName
firstName, lastName = "Nick ", "Mwangemi"
print(joinNames(firstName, lastName))
"""
2. User Input: Within a function, ask for user input. Have this function return that
input to be stored in a variable outside of the function. Then print out the input.
"""
# solution
def userInput(userInput):
faveSport = input("What's your favourite sport?")
return faveSport
faveSport = userInput("rugby")
print(faveSport)
# Scope -deals with the accessibility of variables declared within a program
# Types: global, function, class
# Global - when you declare a variable to be accessible to an entire file or application
# Function scope is in reference to variables being declared and accessible only withi functions
# Global Scope Access - accessible to the rest of the file
# where global variables can be accessed
# number = 5
# def scopeTest():
# number += 1 # not accessible due to function level scope
# scopeTest()
"""
Note: When passed in, it only passes the value, not the variable
"""
# Handling Function Scope
"""
When dealing with variables declared in a function, you generally won't need to access
it outside of the function. However, in order to access that value, best practice is to return it:
"""
# accessing variables defined in a function
def scopeTest():
word = "function"
return word
value = scopeTest()
print(value)
# In-Place Algorithms
"""
When passing variables into a function, you're simply passing the value of that variable and not the variable
itself. Such that the following will not alter the variable num
"""
num = 5
def changeNum(n):
n += 5
print(num)
changeNum(num)
# Exercises
"""
1. Names: Create a function that will change the list passed in with a parameter
of name at a given index. Such that if I were to pass in “Bill” and index 1,
it would change “Rich” to “Bill.” Use the list and function definition in the
following:
>>> names = ['Bob', 'Rich', 'Amanda']
>>> def changeValue(aList, name, index):
"""
# solution
# names = ['Bob', 'Rich', 'Amanda']
# def changeValue(aList, name, index):
# for name in names:
# names.insert(1, "Bill")
# return names
# print(changeValue(names, "Bill", 1)) | def outputData(name, *args):
print(type(args))
for arg in args:
print(arg) | random_line_split |
Week_05.py | # Functions
"""
Functions give us the ability to make our programs much more powerful and clean
while also saving us time. We use functions is because of the ability to write once and call repeatedly.
Overview
• How to use functions and what they are
• Passing data around using parameters
• Returning data from functions
• Understanding scope and its importance
• Creating a shopping cart program
"""
# Creating and Calling Functions
"""
A function is a block of code which only runs when it is called.
You can pass data, known as parameters, into a function.
A function can return data as a result.
"""
# writing a function
def printInfo(): # defines what the function does when called
print("Name: John Smith") # calls the function to run
print("Age: 45") # calls the function again
printInfo()
printInfo()
# Function Stages
"""
There are two stages: function definition and function call.
Function definition is where you define the function name, any parameters it's supposed to accept,
and what it's supposed to do in the block of code associated with it.
The second stage is known as the function call. Functions will never run until called,
so you can define as many functions as you’d like, but if you never call one of them, then
nothing will happen. When you call a function, it will run the block of code within the
definition
"""
# UDF vs. Built-in
"""
Built-in functions are included in Python to serve a specific purpose to help build applications.
UDFs are user-defined functions.
"""
# performing a calculation in a function
def calc():
x, y = 5, 10
print(x+y)
calc() # will run the block of code within calc and output 15
# Exercises
"""
1. Print Name: Define a function called myName, and have it print out your name
when called.
"""
# solution
def myName():
name = "Nick"
print(name)
myName()
"""
2. Pizza Toppings: Define a function that prints out all your favorite pizza toppings
called pizzaToppings. Call the function three times.
"""
# solution
def pizzaToppings():
topping1, topping2, topping3 = "Cheese,", "Pepperoni,", "Chicken"
print(topping1, topping2, topping3)
pizzaToppings()
pizzaToppings()
pizzaToppings()
# Parameters
"""
Parameters are temporary variables declared on the function definition. To call a function with
different values, you need to use parameters. This is an arbitrary variable name that you use to reference the value within the function block; however, you usually want it to
be relevent to the data that you're working with. When calling the function, you would pass in the necessary value to run the block of code with.
"""
# Passing a Single Parameter
def printName(fu |
print("Your name is: {}".format(full_name))
printName("John Smith")
printName("Amanda")
# Multiple parameters
# passing multiple parameters into a function
def addNums(num1, num2):
result = num1+num2
print("{} + {} = {}".format(num1, num2, result))
addNums(5, 8)
addNums(3.5, 5.5)
# passing a list
# using a function to square all information
numbers1 = [2, 4, 5, 10]
numbers2 = [1, 3, 6]
# def squares(nums):
# for num in nums:
# print(nums**2)
# squares(numbers1)
# squares(numbers2)
# Default Parameters
"""
A parameter can be associated with a default value. Take the value of pi for instance, it will always be 3.14,
so we can set a parameter called pi to that exact value to allow us to call the function with an already defined value for pi.
"""
# setting default parameter values
def calcArea(r, pi=3.14):
area = pi*(r**2)
print("Area: {}".format(area))
calcArea(2) # assuming radius is the value of 2
"""
Note: Default parameters must always go after non-default parameters.
"""
# Making Parameters Optional
"""
Sometimes you need to make functions that take optional arguments. The best example
is always middle names; some people have them, and some don’t. If we wanted to write
a function that would print out properly for both situations, we would need to make the
middle name an optional parameter. We do this by assigning an empty string value as
the default:
"""
# setting default parameter values
def printName(first, last, middle=""):
if middle:
print("{} {} {}".format(first, middle, last))
else:
print("{} {}".format(first, last))
printName("John", "Smith")
printName("John", "Smith", "Paul") # will output with middle name
# Keep in mind the order of our parameters! Parameters must line up from left to right according to the function definition.
# Named Parameter Assignment
"""
During the function call, you can explicity assign values into parameter names. This is useful
when you don’t want to mix up the order of values being passed in, as they work from left
to right by default. You can use parameter names to assign values for every parameter if
you choose, but it’s not necessary most of the time. Let’s check out an example:
"""
# explicity assigning values to parameters by referencing the name
def addNums(num1, num2):
print(num2)
print(num1)
addNums(5, num2=2.5)
# *args
"""
The use of *args allows you to pass a variable number of arguments into a function. This
allows you to make functions more modular. The magic isn’t the “args” keyword here
though; it’s really the unary operator ( * ) that allows us to perform this feature. You could
theoretically replace the word args with anyone, like “ *data”, and it would still work.
However, args is the default and general standard throughout the industry.
"""
# using args parameter to take in a tuple of arbitrary value
def outputData(name, *args):
print(type(args))
for arg in args:
print(arg)
outputData("John Smith", 5, True, "Jess")
# **kwargs
"""
Like args, kwargs allows us to take in an arbitrary number of values in a function;
however, it works as a dictionary with keyword arguments instead. Keyword arguments
are values passed in with keys, which allow us to access them easily within the function
block. Again, the magic here is in the two unary operators ( ** ) not the keyword of
kwargs. Let’s check it out:
"""
# using kwargs parameter to take in a dictionary of arbitrary value
def outputData(**kwargs):
print(type(kwargs))
print(kwargs["name"])
print(kwargs["num"])
outputData(name="John Smith", num=5, b=True)
# Exercises
"""
1. User Input: Ask the user to input a word, and pass that word into a function
that checks if the word starts with an uppercase. If it does output “True”,
otherwise “False”.
"""
# solution
def checkCaps(name):
if name[0].isupper() == True:
print("True")
else:
print("False")
name = input("Enter your name")
checkCaps(name)
"""
2. No Name: Define a function that takes in two arguments, first_name and last_
name, and makes both optional. If no values are passed into the parameters, it
should output “No name passed in”; otherwise, it should print out the name.
"""
# solution
# def checkName(first_name, last_name):
# print("{} {}".format(first_name, last_name))
# checkName()
# Return Statement - is used to send info back to where the function call occured.
# using return keyword to return the sum of two numbers
def addNums(num1, num2):
return num1+num2
num = addNums(5.5, 4.5) # saves returned value into num
print(num)
print(addNums(10, 10)) # doesn't save returned value
# Ternary Operator
"""
A ternary operator is a shorthand python branching statement.
These operations can be used to assign values into a variable, or in this case, deciding what the return from a function
"""
# shorthand syntax using a ternary operator
def searchList(aList, el):
return True if el in aList else False
result = searchList(["one", 2, "three"], 2) # result = True
print(result)
# Exercises
"""
Full Name: Create a function that takes in a first and last name and returns the
two names joined together.
"""
# solution
def joinNames(firstName, lastName):
return firstName + lastName
firstName, lastName = "Nick ", "Mwangemi"
print(joinNames(firstName, lastName))
"""
2. User Input: Within a function, ask for user input. Have this function return that
input to be stored in a variable outside of the function. Then print out the input.
"""
# solution
def userInput(userInput):
faveSport = input("What's your favourite sport?")
return faveSport
faveSport = userInput("rugby")
print(faveSport)
# Scope -deals with the accessibility of variables declared within a program
# Types: global, function, class
# Global - when you declare a variable to be accessible to an entire file or application
# Function scope is in reference to variables being declared and accessible only withi functions
# Global Scope Access - accessible to the rest of the file
# where global variables can be accessed
# number = 5
# def scopeTest():
# number += 1 # not accessible due to function level scope
# scopeTest()
"""
Note: When passed in, it only passes the value, not the variable
"""
# Handling Function Scope
"""
When dealing with variables declared in a function, you generally won't need to access
it outside of the function. However, in order to access that value, best practice is to return it:
"""
# accessing variables defined in a function
def scopeTest():
word = "function"
return word
value = scopeTest()
print(value)
# In-Place Algorithms
"""
When passing variables into a function, you're simply passing the value of that variable and not the variable
itself. Such that the following will not alter the variable num
"""
num = 5
def changeNum(n):
n += 5
print(num)
changeNum(num)
# Exercises
"""
1. Names: Create a function that will change the list passed in with a parameter
of name at a given index. Such that if I were to pass in “Bill” and index 1,
it would change “Rich” to “Bill.” Use the list and function definition in the
following:
>>> names = ['Bob', 'Rich', 'Amanda']
>>> def changeValue(aList, name, index):
"""
# solution
# names = ['Bob', 'Rich', 'Amanda']
# def changeValue(aList, name, index):
# for name in names:
# names.insert(1, "Bill")
# return names
# print(changeValue(names, "Bill", 1))
| ll_name): | identifier_name |
controller.go | /*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs to manage an NFS export.
package nfs
import (
"fmt"
"reflect"
s "strings"
"github.com/coreos/pkg/capnslog"
opkit "github.com/rook/operator-kit"
nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/cache"
)
const (
customResourceName = "nfsserver"
customResourceNamePlural = "nfsservers"
appName = "rook-nfs"
nfsConfigMapName = "nfs-ganesha-config"
nfsConfigMapPath = "/nfs-ganesha/config"
nfsPort = 2049
rpcPort = 111
)
var logger = capnslog.NewPackageLogger("github.com/rook/rook", "nfs-operator")
// NFSResource represents the nfs export custom resource
var NFSResource = opkit.CustomResource{
Name: customResourceName,
Plural: customResourceNamePlural,
Group: nfsv1alpha1.CustomResourceGroup,
Version: nfsv1alpha1.Version,
Scope: apiextensionsv1beta1.NamespaceScoped,
Kind: reflect.TypeOf(nfsv1alpha1.NFSServer{}).Name(),
}
// Controller represents a controller object for nfs server custom resources
type Controller struct {
context *clusterd.Context
containerImage string
}
// NewController create controller for watching nfsserver custom resources created
func NewController(context *clusterd.Context, containerImage string) *Controller {
return &Controller{
context: context,
containerImage: containerImage,
}
}
// StartWatch watches for instances of nfsserver custom resources and acts on them
func (c *Controller) StartWatch(namespace string, stopCh chan struct{}) error {
resourceHandlerFuncs := cache.ResourceEventHandlerFuncs{
AddFunc: c.onAdd,
UpdateFunc: c.onUpdate,
DeleteFunc: c.onDelete,
}
logger.Infof("start watching nfs server resources in namespace %s", namespace)
watcher := opkit.NewWatcher(NFSResource, namespace, resourceHandlerFuncs, c.context.RookClientset.NfsV1alpha1().RESTClient())
go watcher.Watch(&nfsv1alpha1.NFSServer{}, stopCh)
return nil
}
type nfsServer struct {
name string
context *clusterd.Context
namespace string
spec nfsv1alpha1.NFSServerSpec
ownerRef metav1.OwnerReference
}
func newNfsServer(c *nfsv1alpha1.NFSServer, context *clusterd.Context) *nfsServer {
return &nfsServer{
name: appName,
context: context,
namespace: c.Namespace,
spec: c.Spec,
ownerRef: nfsOwnerRef(c.Namespace, string(c.UID)),
}
}
func nfsOwnerRef(namespace, nfsServerID string) metav1.OwnerReference {
blockOwner := true
return metav1.OwnerReference{
APIVersion: NFSResource.Version,
Kind: NFSResource.Kind,
Name: namespace,
UID: types.UID(nfsServerID),
BlockOwnerDeletion: &blockOwner,
}
}
func getServerConfig(exports []nfsv1alpha1.ExportsSpec) map[string]map[string]string {
claimConfigOpt := make(map[string]map[string]string)
configOpt := make(map[string]string)
for _, export := range exports {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
configOpt["accessMode"] = export.Server.AccessMode
configOpt["squash"] = export.Server.Squash
claimConfigOpt[claimName] = configOpt
}
}
return claimConfigOpt
}
func createAppLabels() map[string]string {
return map[string]string{
k8sutil.AppAttr: appName,
}
}
func createServicePorts() []v1.ServicePort {
return []v1.ServicePort{
{
Name: "nfs",
Port: int32(nfsPort),
TargetPort: intstr.FromInt(int(nfsPort)),
},
{
Name: "rpc",
Port: int32(rpcPort),
TargetPort: intstr.FromInt(int(rpcPort)),
},
}
}
func (c *Controller) createNFSService(nfsServer *nfsServer) error {
// This service is meant to be used by clients to access NFS.
nfsService := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
Labels: createAppLabels(),
},
Spec: v1.ServiceSpec{
Selector: createAppLabels(),
Type: v1.ServiceTypeClusterIP,
Ports: createServicePorts(),
},
}
if _, err := c.context.Clientset.CoreV1().Services(nfsServer.namespace).Create(nfsService); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
logger.Infof("nfs service %s already exists in namespace %s", nfsService.Name, nfsService.Namespace)
} else {
logger.Infof("nfs service %s started in namespace %s", nfsService.Name, nfsService.Namespace)
}
return nil
}
func createGaneshaExport(id int, path string, access string, squash string) string {
var accessType string
// validateNFSServerSpec guarantees `access` will be one of these values at this point
switch s.ToLower(access) {
case "readwrite":
accessType = "RW"
case "readonly":
accessType = "RO"
case "none":
accessType = "None"
}
idStr := fmt.Sprintf("%v", id)
nfsGaneshaConfig := `
EXPORT {
Export_Id = ` + idStr + `;
Path = /` + path + `;
Pseudo = /` + path + `;
Protocols = 4;
Transports = TCP;
Sectype = sys;
Access_Type = ` + accessType + `;
Squash = ` + s.ToLower(squash) + `;
FSAL {
Name = VFS;
}
}`
return nfsGaneshaConfig
}
func createGaneshaConfig(spec *nfsv1alpha1.NFSServerSpec) string {
serverConfig := getServerConfig(spec.Exports)
exportsList := make([]string, 0)
id := 10
for claimName, claimConfig := range serverConfig {
exportsList = append(exportsList, createGaneshaExport(id, claimName, claimConfig["accessMode"], claimConfig["squash"]))
id++
}
// fsid_device parameter is important as in case of an overlayfs there is a chance that the fsid of the mounted share is same as that of the fsid of "/"
// so setting this to true uses device number as fsid
// related issue https://github.com/nfs-ganesha/nfs-ganesha/issues/140
exportsList = append(exportsList, `NFS_Core_Param
{
fsid_device = true;
}`)
nfsGaneshaConfig := s.Join(exportsList, "\n")
return nfsGaneshaConfig
}
func (c *Controller) createNFSConfigMap(nfsServer *nfsServer) error {
nfsGaneshaConfig := createGaneshaConfig(&nfsServer.spec)
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: nfsConfigMapName,
Namespace: nfsServer.namespace,
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
Labels: createAppLabels(),
},
Data: map[string]string{
nfsConfigMapName: nfsGaneshaConfig,
},
}
_, err := c.context.Clientset.CoreV1().ConfigMaps(nfsServer.namespace).Create(configMap)
if err != nil {
return err
}
return nil
}
func getPVCNameList(spec *nfsv1alpha1.NFSServerSpec) []string {
exports := spec.Exports
pvcNameList := make([]string, 0)
for _, export := range exports {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
pvcNameList = append(pvcNameList, claimName)
}
}
return pvcNameList
}
func createPVCSpecList(spec *nfsv1alpha1.NFSServerSpec) []v1.Volume {
pvcSpecList := make([]v1.Volume, 0)
pvcNameList := getPVCNameList(spec)
for _, claimName := range pvcNameList {
pvcSpecList = append(pvcSpecList, v1.Volume{
Name: claimName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
},
})
}
configMapSrc := &v1.ConfigMapVolumeSource{
Items: []v1.KeyToPath{
{
Key: nfsConfigMapName,
Path: nfsConfigMapName,
},
},
}
configMapSrc.Name = nfsConfigMapName
configMapVol := v1.Volume{
Name: nfsConfigMapName,
VolumeSource: v1.VolumeSource{
ConfigMap: configMapSrc,
},
}
pvcSpecList = append(pvcSpecList, configMapVol)
return pvcSpecList
}
func createVolumeMountList(spec *nfsv1alpha1.NFSServerSpec) []v1.VolumeMount {
volumeMountList := make([]v1.VolumeMount, 0)
pvcNameList := getPVCNameList(spec)
for _, claimName := range pvcNameList {
volumeMountList = append(volumeMountList, v1.VolumeMount{
Name: claimName,
MountPath: "/" + claimName,
})
}
configMapVolMount := v1.VolumeMount{
Name: nfsConfigMapName,
MountPath: nfsConfigMapPath,
}
volumeMountList = append(volumeMountList, configMapVolMount)
return volumeMountList
}
func (c *Controller) createNfsPodSpec(nfsServer *nfsServer) v1.PodTemplateSpec {
nfsPodSpec := v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
Labels: createAppLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: nfsServer.name,
Image: c.containerImage,
Command: []string{"/start.sh"},
Ports: []v1.ContainerPort{
{
Name: "nfs-port",
ContainerPort: int32(nfsPort),
},
{
Name: "rpc-port",
ContainerPort: int32(rpcPort),
},
},
VolumeMounts: createVolumeMountList(&nfsServer.spec),
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
"SYS_ADMIN",
"DAC_READ_SEARCH",
},
},
},
},
},
Volumes: createPVCSpecList(&nfsServer.spec),
},
}
return nfsPodSpec
}
func (c *Controller) createNfsStatefulSet(nfsServer *nfsServer, replicas int32) error {
appsClient := c.context.Clientset.AppsV1beta1()
nfsPodSpec := c.createNfsPodSpec(nfsServer)
statefulSet := v1beta1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
Labels: createAppLabels(),
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
},
Spec: v1beta1.StatefulSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: createAppLabels(),
},
Template: nfsPodSpec,
ServiceName: nfsServer.name,
},
}
if _, err := appsClient.StatefulSets(nfsServer.namespace).Create(&statefulSet); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
logger.Infof("stateful set %s already exists in namespace %s", statefulSet.Name, statefulSet.Namespace)
} else {
logger.Infof("stateful set %s created in namespace %s", statefulSet.Name, statefulSet.Namespace)
}
return nil
}
func (c *Controller) onAdd(obj interface{}) {
nfsObj := obj.(*nfsv1alpha1.NFSServer).DeepCopy()
nfsServer := newNfsServer(nfsObj, c.context)
logger.Infof("new NFS server %s added to namespace %s", nfsObj.Name, nfsServer.namespace)
logger.Infof("validating nfs server spec in namespace %s", nfsServer.namespace)
if err := validateNFSServerSpec(nfsServer.spec); err != nil {
logger.Errorf("Invalid NFS Server spec: %+v", err)
return
}
logger.Infof("creating nfs server service in namespace %s", nfsServer.namespace)
if err := c.createNFSService(nfsServer); err != nil {
logger.Errorf("Unable to create NFS service %+v", err)
}
logger.Infof("creating nfs server configuration in namespace %s", nfsServer.namespace)
if err := c.createNFSConfigMap(nfsServer); err != nil {
logger.Errorf("Unable to create NFS ConfigMap %+v", err)
}
logger.Infof("creating nfs server stateful set in namespace %s", nfsServer.namespace)
if err := c.createNfsStatefulSet(nfsServer, int32(nfsServer.spec.Replicas)); err != nil {
logger.Errorf("Unable to create NFS stateful set %+v", err)
}
}
func (c *Controller) onUpdate(oldObj, newObj interface{}) {
oldNfsServ := oldObj.(*nfsv1alpha1.NFSServer).DeepCopy()
logger.Infof("Received update on NFS server %s in namespace %s. This is currently unsupported.", oldNfsServ.Name, oldNfsServ.Namespace)
}
func (c *Controller) onDelete(obj interface{}) {
cluster := obj.(*nfsv1alpha1.NFSServer).DeepCopy()
logger.Infof("cluster %s deleted from namespace %s", cluster.Name, cluster.Namespace)
}
func validateNFSServerSpec(spec nfsv1alpha1.NFSServerSpec) error {
serverConfig := spec.Exports
for _, export := range serverConfig {
if err := validateAccessMode(export.Server.AccessMode); err != nil {
return err
}
if err := validateSquashMode(export.Server.Squash); err != nil {
return err
}
}
return nil
}
func | (mode string) error {
switch s.ToLower(mode) {
case "readonly":
case "readwrite":
case "none":
default:
return fmt.Errorf("Invalid value (%s) for accessMode, valid values are (ReadOnly, ReadWrite, none)", mode)
}
return nil
}
func validateSquashMode(mode string) error {
switch s.ToLower(mode) {
case "none":
case "rootid":
case "root":
case "all":
default:
return fmt.Errorf("Invalid value (%s) for squash, valid values are (none, rootId, root, all)", mode)
}
return nil
}
| validateAccessMode | identifier_name |
controller.go | /*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs to manage an NFS export.
package nfs
import (
"fmt"
"reflect"
s "strings"
"github.com/coreos/pkg/capnslog"
opkit "github.com/rook/operator-kit"
nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/cache"
)
const (
customResourceName = "nfsserver"
customResourceNamePlural = "nfsservers"
appName = "rook-nfs"
nfsConfigMapName = "nfs-ganesha-config"
nfsConfigMapPath = "/nfs-ganesha/config"
nfsPort = 2049
rpcPort = 111
)
var logger = capnslog.NewPackageLogger("github.com/rook/rook", "nfs-operator")
// NFSResource represents the nfs export custom resource
var NFSResource = opkit.CustomResource{
Name: customResourceName,
Plural: customResourceNamePlural,
Group: nfsv1alpha1.CustomResourceGroup,
Version: nfsv1alpha1.Version,
Scope: apiextensionsv1beta1.NamespaceScoped,
Kind: reflect.TypeOf(nfsv1alpha1.NFSServer{}).Name(),
}
// Controller represents a controller object for nfs server custom resources
type Controller struct {
context *clusterd.Context
containerImage string
}
// NewController create controller for watching nfsserver custom resources created
func NewController(context *clusterd.Context, containerImage string) *Controller {
return &Controller{
context: context,
containerImage: containerImage,
}
}
// StartWatch watches for instances of nfsserver custom resources and acts on them
func (c *Controller) StartWatch(namespace string, stopCh chan struct{}) error {
resourceHandlerFuncs := cache.ResourceEventHandlerFuncs{
AddFunc: c.onAdd,
UpdateFunc: c.onUpdate,
DeleteFunc: c.onDelete,
}
logger.Infof("start watching nfs server resources in namespace %s", namespace)
watcher := opkit.NewWatcher(NFSResource, namespace, resourceHandlerFuncs, c.context.RookClientset.NfsV1alpha1().RESTClient())
go watcher.Watch(&nfsv1alpha1.NFSServer{}, stopCh)
return nil
}
type nfsServer struct {
name string
context *clusterd.Context
namespace string
spec nfsv1alpha1.NFSServerSpec
ownerRef metav1.OwnerReference
}
func newNfsServer(c *nfsv1alpha1.NFSServer, context *clusterd.Context) *nfsServer {
return &nfsServer{
name: appName,
context: context,
namespace: c.Namespace,
spec: c.Spec,
ownerRef: nfsOwnerRef(c.Namespace, string(c.UID)),
}
}
func nfsOwnerRef(namespace, nfsServerID string) metav1.OwnerReference {
blockOwner := true
return metav1.OwnerReference{
APIVersion: NFSResource.Version,
Kind: NFSResource.Kind,
Name: namespace,
UID: types.UID(nfsServerID),
BlockOwnerDeletion: &blockOwner,
}
}
func getServerConfig(exports []nfsv1alpha1.ExportsSpec) map[string]map[string]string {
claimConfigOpt := make(map[string]map[string]string)
configOpt := make(map[string]string)
for _, export := range exports {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
configOpt["accessMode"] = export.Server.AccessMode
configOpt["squash"] = export.Server.Squash
claimConfigOpt[claimName] = configOpt
}
}
return claimConfigOpt
}
func createAppLabels() map[string]string {
return map[string]string{
k8sutil.AppAttr: appName,
}
}
func createServicePorts() []v1.ServicePort {
return []v1.ServicePort{
{
Name: "nfs",
Port: int32(nfsPort),
TargetPort: intstr.FromInt(int(nfsPort)),
},
{
Name: "rpc",
Port: int32(rpcPort),
TargetPort: intstr.FromInt(int(rpcPort)),
},
}
}
func (c *Controller) createNFSService(nfsServer *nfsServer) error {
// This service is meant to be used by clients to access NFS.
nfsService := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
Labels: createAppLabels(),
},
Spec: v1.ServiceSpec{
Selector: createAppLabels(),
Type: v1.ServiceTypeClusterIP,
Ports: createServicePorts(),
},
}
if _, err := c.context.Clientset.CoreV1().Services(nfsServer.namespace).Create(nfsService); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
logger.Infof("nfs service %s already exists in namespace %s", nfsService.Name, nfsService.Namespace)
} else {
logger.Infof("nfs service %s started in namespace %s", nfsService.Name, nfsService.Namespace)
}
return nil
}
func createGaneshaExport(id int, path string, access string, squash string) string {
var accessType string
// validateNFSServerSpec guarantees `access` will be one of these values at this point
switch s.ToLower(access) {
case "readwrite":
accessType = "RW"
case "readonly":
accessType = "RO"
case "none":
accessType = "None"
}
idStr := fmt.Sprintf("%v", id)
nfsGaneshaConfig := `
EXPORT {
Export_Id = ` + idStr + `;
Path = /` + path + `;
Pseudo = /` + path + `;
Protocols = 4;
Transports = TCP;
Sectype = sys;
Access_Type = ` + accessType + `;
Squash = ` + s.ToLower(squash) + `;
FSAL {
Name = VFS;
}
}`
return nfsGaneshaConfig
}
func createGaneshaConfig(spec *nfsv1alpha1.NFSServerSpec) string {
serverConfig := getServerConfig(spec.Exports)
exportsList := make([]string, 0)
id := 10
for claimName, claimConfig := range serverConfig {
exportsList = append(exportsList, createGaneshaExport(id, claimName, claimConfig["accessMode"], claimConfig["squash"]))
id++
}
// fsid_device parameter is important as in case of an overlayfs there is a chance that the fsid of the mounted share is same as that of the fsid of "/"
// so setting this to true uses device number as fsid
// related issue https://github.com/nfs-ganesha/nfs-ganesha/issues/140
exportsList = append(exportsList, `NFS_Core_Param
{
fsid_device = true;
}`)
nfsGaneshaConfig := s.Join(exportsList, "\n")
return nfsGaneshaConfig
}
func (c *Controller) createNFSConfigMap(nfsServer *nfsServer) error {
nfsGaneshaConfig := createGaneshaConfig(&nfsServer.spec)
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: nfsConfigMapName,
Namespace: nfsServer.namespace,
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
Labels: createAppLabels(),
},
Data: map[string]string{
nfsConfigMapName: nfsGaneshaConfig,
},
}
_, err := c.context.Clientset.CoreV1().ConfigMaps(nfsServer.namespace).Create(configMap)
if err != nil {
return err
}
return nil
}
func getPVCNameList(spec *nfsv1alpha1.NFSServerSpec) []string {
exports := spec.Exports
pvcNameList := make([]string, 0)
for _, export := range exports {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
pvcNameList = append(pvcNameList, claimName)
}
}
return pvcNameList
}
func createPVCSpecList(spec *nfsv1alpha1.NFSServerSpec) []v1.Volume {
pvcSpecList := make([]v1.Volume, 0)
pvcNameList := getPVCNameList(spec)
for _, claimName := range pvcNameList {
pvcSpecList = append(pvcSpecList, v1.Volume{
Name: claimName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
},
})
}
configMapSrc := &v1.ConfigMapVolumeSource{
Items: []v1.KeyToPath{
{
Key: nfsConfigMapName,
Path: nfsConfigMapName,
},
},
}
configMapSrc.Name = nfsConfigMapName
configMapVol := v1.Volume{
Name: nfsConfigMapName,
VolumeSource: v1.VolumeSource{
ConfigMap: configMapSrc,
},
}
pvcSpecList = append(pvcSpecList, configMapVol)
return pvcSpecList
}
func createVolumeMountList(spec *nfsv1alpha1.NFSServerSpec) []v1.VolumeMount {
volumeMountList := make([]v1.VolumeMount, 0)
pvcNameList := getPVCNameList(spec)
for _, claimName := range pvcNameList {
volumeMountList = append(volumeMountList, v1.VolumeMount{
Name: claimName,
MountPath: "/" + claimName,
})
}
configMapVolMount := v1.VolumeMount{
Name: nfsConfigMapName,
MountPath: nfsConfigMapPath,
}
volumeMountList = append(volumeMountList, configMapVolMount)
return volumeMountList
}
func (c *Controller) createNfsPodSpec(nfsServer *nfsServer) v1.PodTemplateSpec {
nfsPodSpec := v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
Labels: createAppLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: nfsServer.name,
Image: c.containerImage,
Command: []string{"/start.sh"},
Ports: []v1.ContainerPort{
{
Name: "nfs-port",
ContainerPort: int32(nfsPort),
},
{
Name: "rpc-port",
ContainerPort: int32(rpcPort),
},
},
VolumeMounts: createVolumeMountList(&nfsServer.spec),
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
"SYS_ADMIN",
"DAC_READ_SEARCH",
},
},
},
},
},
Volumes: createPVCSpecList(&nfsServer.spec),
},
}
return nfsPodSpec
}
func (c *Controller) createNfsStatefulSet(nfsServer *nfsServer, replicas int32) error {
appsClient := c.context.Clientset.AppsV1beta1()
nfsPodSpec := c.createNfsPodSpec(nfsServer)
statefulSet := v1beta1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
Labels: createAppLabels(),
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
},
Spec: v1beta1.StatefulSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: createAppLabels(),
},
Template: nfsPodSpec,
ServiceName: nfsServer.name,
},
}
if _, err := appsClient.StatefulSets(nfsServer.namespace).Create(&statefulSet); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
logger.Infof("stateful set %s already exists in namespace %s", statefulSet.Name, statefulSet.Namespace)
} else {
logger.Infof("stateful set %s created in namespace %s", statefulSet.Name, statefulSet.Namespace)
}
return nil
}
func (c *Controller) onAdd(obj interface{}) {
nfsObj := obj.(*nfsv1alpha1.NFSServer).DeepCopy()
nfsServer := newNfsServer(nfsObj, c.context)
logger.Infof("new NFS server %s added to namespace %s", nfsObj.Name, nfsServer.namespace)
logger.Infof("validating nfs server spec in namespace %s", nfsServer.namespace)
if err := validateNFSServerSpec(nfsServer.spec); err != nil {
logger.Errorf("Invalid NFS Server spec: %+v", err)
return
}
logger.Infof("creating nfs server service in namespace %s", nfsServer.namespace)
if err := c.createNFSService(nfsServer); err != nil {
logger.Errorf("Unable to create NFS service %+v", err)
}
logger.Infof("creating nfs server configuration in namespace %s", nfsServer.namespace)
if err := c.createNFSConfigMap(nfsServer); err != nil {
logger.Errorf("Unable to create NFS ConfigMap %+v", err)
}
logger.Infof("creating nfs server stateful set in namespace %s", nfsServer.namespace)
if err := c.createNfsStatefulSet(nfsServer, int32(nfsServer.spec.Replicas)); err != nil {
logger.Errorf("Unable to create NFS stateful set %+v", err)
}
}
func (c *Controller) onUpdate(oldObj, newObj interface{}) {
oldNfsServ := oldObj.(*nfsv1alpha1.NFSServer).DeepCopy()
logger.Infof("Received update on NFS server %s in namespace %s. This is currently unsupported.", oldNfsServ.Name, oldNfsServ.Namespace)
}
func (c *Controller) onDelete(obj interface{}) {
cluster := obj.(*nfsv1alpha1.NFSServer).DeepCopy()
logger.Infof("cluster %s deleted from namespace %s", cluster.Name, cluster.Namespace)
}
func validateNFSServerSpec(spec nfsv1alpha1.NFSServerSpec) error {
serverConfig := spec.Exports
for _, export := range serverConfig {
if err := validateAccessMode(export.Server.AccessMode); err != nil {
return err
}
if err := validateSquashMode(export.Server.Squash); err != nil {
return err
}
}
return nil
}
func validateAccessMode(mode string) error {
switch s.ToLower(mode) {
case "readonly":
case "readwrite":
case "none":
default:
return fmt.Errorf("Invalid value (%s) for accessMode, valid values are (ReadOnly, ReadWrite, none)", mode)
}
return nil
}
func validateSquashMode(mode string) error | {
switch s.ToLower(mode) {
case "none":
case "rootid":
case "root":
case "all":
default:
return fmt.Errorf("Invalid value (%s) for squash, valid values are (none, rootId, root, all)", mode)
}
return nil
} | identifier_body | |
controller.go | /*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs to manage an NFS export.
package nfs
import (
"fmt"
"reflect"
s "strings"
"github.com/coreos/pkg/capnslog"
opkit "github.com/rook/operator-kit"
nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/cache"
)
const (
customResourceName = "nfsserver"
customResourceNamePlural = "nfsservers"
appName = "rook-nfs"
nfsConfigMapName = "nfs-ganesha-config"
nfsConfigMapPath = "/nfs-ganesha/config"
nfsPort = 2049
rpcPort = 111
)
var logger = capnslog.NewPackageLogger("github.com/rook/rook", "nfs-operator")
// NFSResource represents the nfs export custom resource
var NFSResource = opkit.CustomResource{
Name: customResourceName,
Plural: customResourceNamePlural, | }
// Controller represents a controller object for nfs server custom resources
type Controller struct {
context *clusterd.Context
containerImage string
}
// NewController create controller for watching nfsserver custom resources created
func NewController(context *clusterd.Context, containerImage string) *Controller {
return &Controller{
context: context,
containerImage: containerImage,
}
}
// StartWatch watches for instances of nfsserver custom resources and acts on them
func (c *Controller) StartWatch(namespace string, stopCh chan struct{}) error {
resourceHandlerFuncs := cache.ResourceEventHandlerFuncs{
AddFunc: c.onAdd,
UpdateFunc: c.onUpdate,
DeleteFunc: c.onDelete,
}
logger.Infof("start watching nfs server resources in namespace %s", namespace)
watcher := opkit.NewWatcher(NFSResource, namespace, resourceHandlerFuncs, c.context.RookClientset.NfsV1alpha1().RESTClient())
go watcher.Watch(&nfsv1alpha1.NFSServer{}, stopCh)
return nil
}
type nfsServer struct {
name string
context *clusterd.Context
namespace string
spec nfsv1alpha1.NFSServerSpec
ownerRef metav1.OwnerReference
}
func newNfsServer(c *nfsv1alpha1.NFSServer, context *clusterd.Context) *nfsServer {
return &nfsServer{
name: appName,
context: context,
namespace: c.Namespace,
spec: c.Spec,
ownerRef: nfsOwnerRef(c.Namespace, string(c.UID)),
}
}
func nfsOwnerRef(namespace, nfsServerID string) metav1.OwnerReference {
blockOwner := true
return metav1.OwnerReference{
APIVersion: NFSResource.Version,
Kind: NFSResource.Kind,
Name: namespace,
UID: types.UID(nfsServerID),
BlockOwnerDeletion: &blockOwner,
}
}
func getServerConfig(exports []nfsv1alpha1.ExportsSpec) map[string]map[string]string {
claimConfigOpt := make(map[string]map[string]string)
configOpt := make(map[string]string)
for _, export := range exports {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
configOpt["accessMode"] = export.Server.AccessMode
configOpt["squash"] = export.Server.Squash
claimConfigOpt[claimName] = configOpt
}
}
return claimConfigOpt
}
func createAppLabels() map[string]string {
return map[string]string{
k8sutil.AppAttr: appName,
}
}
func createServicePorts() []v1.ServicePort {
return []v1.ServicePort{
{
Name: "nfs",
Port: int32(nfsPort),
TargetPort: intstr.FromInt(int(nfsPort)),
},
{
Name: "rpc",
Port: int32(rpcPort),
TargetPort: intstr.FromInt(int(rpcPort)),
},
}
}
func (c *Controller) createNFSService(nfsServer *nfsServer) error {
// This service is meant to be used by clients to access NFS.
nfsService := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
Labels: createAppLabels(),
},
Spec: v1.ServiceSpec{
Selector: createAppLabels(),
Type: v1.ServiceTypeClusterIP,
Ports: createServicePorts(),
},
}
if _, err := c.context.Clientset.CoreV1().Services(nfsServer.namespace).Create(nfsService); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
logger.Infof("nfs service %s already exists in namespace %s", nfsService.Name, nfsService.Namespace)
} else {
logger.Infof("nfs service %s started in namespace %s", nfsService.Name, nfsService.Namespace)
}
return nil
}
func createGaneshaExport(id int, path string, access string, squash string) string {
var accessType string
// validateNFSServerSpec guarantees `access` will be one of these values at this point
switch s.ToLower(access) {
case "readwrite":
accessType = "RW"
case "readonly":
accessType = "RO"
case "none":
accessType = "None"
}
idStr := fmt.Sprintf("%v", id)
nfsGaneshaConfig := `
EXPORT {
Export_Id = ` + idStr + `;
Path = /` + path + `;
Pseudo = /` + path + `;
Protocols = 4;
Transports = TCP;
Sectype = sys;
Access_Type = ` + accessType + `;
Squash = ` + s.ToLower(squash) + `;
FSAL {
Name = VFS;
}
}`
return nfsGaneshaConfig
}
func createGaneshaConfig(spec *nfsv1alpha1.NFSServerSpec) string {
serverConfig := getServerConfig(spec.Exports)
exportsList := make([]string, 0)
id := 10
for claimName, claimConfig := range serverConfig {
exportsList = append(exportsList, createGaneshaExport(id, claimName, claimConfig["accessMode"], claimConfig["squash"]))
id++
}
// fsid_device parameter is important as in case of an overlayfs there is a chance that the fsid of the mounted share is same as that of the fsid of "/"
// so setting this to true uses device number as fsid
// related issue https://github.com/nfs-ganesha/nfs-ganesha/issues/140
exportsList = append(exportsList, `NFS_Core_Param
{
fsid_device = true;
}`)
nfsGaneshaConfig := s.Join(exportsList, "\n")
return nfsGaneshaConfig
}
func (c *Controller) createNFSConfigMap(nfsServer *nfsServer) error {
nfsGaneshaConfig := createGaneshaConfig(&nfsServer.spec)
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: nfsConfigMapName,
Namespace: nfsServer.namespace,
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
Labels: createAppLabels(),
},
Data: map[string]string{
nfsConfigMapName: nfsGaneshaConfig,
},
}
_, err := c.context.Clientset.CoreV1().ConfigMaps(nfsServer.namespace).Create(configMap)
if err != nil {
return err
}
return nil
}
func getPVCNameList(spec *nfsv1alpha1.NFSServerSpec) []string {
exports := spec.Exports
pvcNameList := make([]string, 0)
for _, export := range exports {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
pvcNameList = append(pvcNameList, claimName)
}
}
return pvcNameList
}
func createPVCSpecList(spec *nfsv1alpha1.NFSServerSpec) []v1.Volume {
pvcSpecList := make([]v1.Volume, 0)
pvcNameList := getPVCNameList(spec)
for _, claimName := range pvcNameList {
pvcSpecList = append(pvcSpecList, v1.Volume{
Name: claimName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
},
})
}
configMapSrc := &v1.ConfigMapVolumeSource{
Items: []v1.KeyToPath{
{
Key: nfsConfigMapName,
Path: nfsConfigMapName,
},
},
}
configMapSrc.Name = nfsConfigMapName
configMapVol := v1.Volume{
Name: nfsConfigMapName,
VolumeSource: v1.VolumeSource{
ConfigMap: configMapSrc,
},
}
pvcSpecList = append(pvcSpecList, configMapVol)
return pvcSpecList
}
func createVolumeMountList(spec *nfsv1alpha1.NFSServerSpec) []v1.VolumeMount {
volumeMountList := make([]v1.VolumeMount, 0)
pvcNameList := getPVCNameList(spec)
for _, claimName := range pvcNameList {
volumeMountList = append(volumeMountList, v1.VolumeMount{
Name: claimName,
MountPath: "/" + claimName,
})
}
configMapVolMount := v1.VolumeMount{
Name: nfsConfigMapName,
MountPath: nfsConfigMapPath,
}
volumeMountList = append(volumeMountList, configMapVolMount)
return volumeMountList
}
func (c *Controller) createNfsPodSpec(nfsServer *nfsServer) v1.PodTemplateSpec {
nfsPodSpec := v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
Labels: createAppLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: nfsServer.name,
Image: c.containerImage,
Command: []string{"/start.sh"},
Ports: []v1.ContainerPort{
{
Name: "nfs-port",
ContainerPort: int32(nfsPort),
},
{
Name: "rpc-port",
ContainerPort: int32(rpcPort),
},
},
VolumeMounts: createVolumeMountList(&nfsServer.spec),
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
"SYS_ADMIN",
"DAC_READ_SEARCH",
},
},
},
},
},
Volumes: createPVCSpecList(&nfsServer.spec),
},
}
return nfsPodSpec
}
func (c *Controller) createNfsStatefulSet(nfsServer *nfsServer, replicas int32) error {
appsClient := c.context.Clientset.AppsV1beta1()
nfsPodSpec := c.createNfsPodSpec(nfsServer)
statefulSet := v1beta1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
Labels: createAppLabels(),
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
},
Spec: v1beta1.StatefulSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: createAppLabels(),
},
Template: nfsPodSpec,
ServiceName: nfsServer.name,
},
}
if _, err := appsClient.StatefulSets(nfsServer.namespace).Create(&statefulSet); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
logger.Infof("stateful set %s already exists in namespace %s", statefulSet.Name, statefulSet.Namespace)
} else {
logger.Infof("stateful set %s created in namespace %s", statefulSet.Name, statefulSet.Namespace)
}
return nil
}
func (c *Controller) onAdd(obj interface{}) {
nfsObj := obj.(*nfsv1alpha1.NFSServer).DeepCopy()
nfsServer := newNfsServer(nfsObj, c.context)
logger.Infof("new NFS server %s added to namespace %s", nfsObj.Name, nfsServer.namespace)
logger.Infof("validating nfs server spec in namespace %s", nfsServer.namespace)
if err := validateNFSServerSpec(nfsServer.spec); err != nil {
logger.Errorf("Invalid NFS Server spec: %+v", err)
return
}
logger.Infof("creating nfs server service in namespace %s", nfsServer.namespace)
if err := c.createNFSService(nfsServer); err != nil {
logger.Errorf("Unable to create NFS service %+v", err)
}
logger.Infof("creating nfs server configuration in namespace %s", nfsServer.namespace)
if err := c.createNFSConfigMap(nfsServer); err != nil {
logger.Errorf("Unable to create NFS ConfigMap %+v", err)
}
logger.Infof("creating nfs server stateful set in namespace %s", nfsServer.namespace)
if err := c.createNfsStatefulSet(nfsServer, int32(nfsServer.spec.Replicas)); err != nil {
logger.Errorf("Unable to create NFS stateful set %+v", err)
}
}
func (c *Controller) onUpdate(oldObj, newObj interface{}) {
oldNfsServ := oldObj.(*nfsv1alpha1.NFSServer).DeepCopy()
logger.Infof("Received update on NFS server %s in namespace %s. This is currently unsupported.", oldNfsServ.Name, oldNfsServ.Namespace)
}
func (c *Controller) onDelete(obj interface{}) {
cluster := obj.(*nfsv1alpha1.NFSServer).DeepCopy()
logger.Infof("cluster %s deleted from namespace %s", cluster.Name, cluster.Namespace)
}
func validateNFSServerSpec(spec nfsv1alpha1.NFSServerSpec) error {
serverConfig := spec.Exports
for _, export := range serverConfig {
if err := validateAccessMode(export.Server.AccessMode); err != nil {
return err
}
if err := validateSquashMode(export.Server.Squash); err != nil {
return err
}
}
return nil
}
func validateAccessMode(mode string) error {
switch s.ToLower(mode) {
case "readonly":
case "readwrite":
case "none":
default:
return fmt.Errorf("Invalid value (%s) for accessMode, valid values are (ReadOnly, ReadWrite, none)", mode)
}
return nil
}
func validateSquashMode(mode string) error {
switch s.ToLower(mode) {
case "none":
case "rootid":
case "root":
case "all":
default:
return fmt.Errorf("Invalid value (%s) for squash, valid values are (none, rootId, root, all)", mode)
}
return nil
} | Group: nfsv1alpha1.CustomResourceGroup,
Version: nfsv1alpha1.Version,
Scope: apiextensionsv1beta1.NamespaceScoped,
Kind: reflect.TypeOf(nfsv1alpha1.NFSServer{}).Name(), | random_line_split |
controller.go | /*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs to manage an NFS export.
package nfs
import (
"fmt"
"reflect"
s "strings"
"github.com/coreos/pkg/capnslog"
opkit "github.com/rook/operator-kit"
nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/cache"
)
const (
customResourceName = "nfsserver"
customResourceNamePlural = "nfsservers"
appName = "rook-nfs"
nfsConfigMapName = "nfs-ganesha-config"
nfsConfigMapPath = "/nfs-ganesha/config"
nfsPort = 2049
rpcPort = 111
)
var logger = capnslog.NewPackageLogger("github.com/rook/rook", "nfs-operator")
// NFSResource represents the nfs export custom resource
var NFSResource = opkit.CustomResource{
Name: customResourceName,
Plural: customResourceNamePlural,
Group: nfsv1alpha1.CustomResourceGroup,
Version: nfsv1alpha1.Version,
Scope: apiextensionsv1beta1.NamespaceScoped,
Kind: reflect.TypeOf(nfsv1alpha1.NFSServer{}).Name(),
}
// Controller represents a controller object for nfs server custom resources
type Controller struct {
context *clusterd.Context
containerImage string
}
// NewController create controller for watching nfsserver custom resources created
func NewController(context *clusterd.Context, containerImage string) *Controller {
return &Controller{
context: context,
containerImage: containerImage,
}
}
// StartWatch watches for instances of nfsserver custom resources and acts on them
func (c *Controller) StartWatch(namespace string, stopCh chan struct{}) error {
resourceHandlerFuncs := cache.ResourceEventHandlerFuncs{
AddFunc: c.onAdd,
UpdateFunc: c.onUpdate,
DeleteFunc: c.onDelete,
}
logger.Infof("start watching nfs server resources in namespace %s", namespace)
watcher := opkit.NewWatcher(NFSResource, namespace, resourceHandlerFuncs, c.context.RookClientset.NfsV1alpha1().RESTClient())
go watcher.Watch(&nfsv1alpha1.NFSServer{}, stopCh)
return nil
}
type nfsServer struct {
name string
context *clusterd.Context
namespace string
spec nfsv1alpha1.NFSServerSpec
ownerRef metav1.OwnerReference
}
func newNfsServer(c *nfsv1alpha1.NFSServer, context *clusterd.Context) *nfsServer {
return &nfsServer{
name: appName,
context: context,
namespace: c.Namespace,
spec: c.Spec,
ownerRef: nfsOwnerRef(c.Namespace, string(c.UID)),
}
}
func nfsOwnerRef(namespace, nfsServerID string) metav1.OwnerReference {
blockOwner := true
return metav1.OwnerReference{
APIVersion: NFSResource.Version,
Kind: NFSResource.Kind,
Name: namespace,
UID: types.UID(nfsServerID),
BlockOwnerDeletion: &blockOwner,
}
}
func getServerConfig(exports []nfsv1alpha1.ExportsSpec) map[string]map[string]string {
claimConfigOpt := make(map[string]map[string]string)
configOpt := make(map[string]string)
for _, export := range exports {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
configOpt["accessMode"] = export.Server.AccessMode
configOpt["squash"] = export.Server.Squash
claimConfigOpt[claimName] = configOpt
}
}
return claimConfigOpt
}
func createAppLabels() map[string]string {
return map[string]string{
k8sutil.AppAttr: appName,
}
}
func createServicePorts() []v1.ServicePort {
return []v1.ServicePort{
{
Name: "nfs",
Port: int32(nfsPort),
TargetPort: intstr.FromInt(int(nfsPort)),
},
{
Name: "rpc",
Port: int32(rpcPort),
TargetPort: intstr.FromInt(int(rpcPort)),
},
}
}
func (c *Controller) createNFSService(nfsServer *nfsServer) error {
// This service is meant to be used by clients to access NFS.
nfsService := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
Labels: createAppLabels(),
},
Spec: v1.ServiceSpec{
Selector: createAppLabels(),
Type: v1.ServiceTypeClusterIP,
Ports: createServicePorts(),
},
}
if _, err := c.context.Clientset.CoreV1().Services(nfsServer.namespace).Create(nfsService); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
logger.Infof("nfs service %s already exists in namespace %s", nfsService.Name, nfsService.Namespace)
} else {
logger.Infof("nfs service %s started in namespace %s", nfsService.Name, nfsService.Namespace)
}
return nil
}
func createGaneshaExport(id int, path string, access string, squash string) string {
var accessType string
// validateNFSServerSpec guarantees `access` will be one of these values at this point
switch s.ToLower(access) {
case "readwrite":
accessType = "RW"
case "readonly":
accessType = "RO"
case "none":
accessType = "None"
}
idStr := fmt.Sprintf("%v", id)
nfsGaneshaConfig := `
EXPORT {
Export_Id = ` + idStr + `;
Path = /` + path + `;
Pseudo = /` + path + `;
Protocols = 4;
Transports = TCP;
Sectype = sys;
Access_Type = ` + accessType + `;
Squash = ` + s.ToLower(squash) + `;
FSAL {
Name = VFS;
}
}`
return nfsGaneshaConfig
}
func createGaneshaConfig(spec *nfsv1alpha1.NFSServerSpec) string {
serverConfig := getServerConfig(spec.Exports)
exportsList := make([]string, 0)
id := 10
for claimName, claimConfig := range serverConfig {
exportsList = append(exportsList, createGaneshaExport(id, claimName, claimConfig["accessMode"], claimConfig["squash"]))
id++
}
// fsid_device parameter is important as in case of an overlayfs there is a chance that the fsid of the mounted share is same as that of the fsid of "/"
// so setting this to true uses device number as fsid
// related issue https://github.com/nfs-ganesha/nfs-ganesha/issues/140
exportsList = append(exportsList, `NFS_Core_Param
{
fsid_device = true;
}`)
nfsGaneshaConfig := s.Join(exportsList, "\n")
return nfsGaneshaConfig
}
func (c *Controller) createNFSConfigMap(nfsServer *nfsServer) error {
nfsGaneshaConfig := createGaneshaConfig(&nfsServer.spec)
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: nfsConfigMapName,
Namespace: nfsServer.namespace,
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
Labels: createAppLabels(),
},
Data: map[string]string{
nfsConfigMapName: nfsGaneshaConfig,
},
}
_, err := c.context.Clientset.CoreV1().ConfigMaps(nfsServer.namespace).Create(configMap)
if err != nil {
return err
}
return nil
}
func getPVCNameList(spec *nfsv1alpha1.NFSServerSpec) []string {
exports := spec.Exports
pvcNameList := make([]string, 0)
for _, export := range exports |
return pvcNameList
}
func createPVCSpecList(spec *nfsv1alpha1.NFSServerSpec) []v1.Volume {
pvcSpecList := make([]v1.Volume, 0)
pvcNameList := getPVCNameList(spec)
for _, claimName := range pvcNameList {
pvcSpecList = append(pvcSpecList, v1.Volume{
Name: claimName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
},
})
}
configMapSrc := &v1.ConfigMapVolumeSource{
Items: []v1.KeyToPath{
{
Key: nfsConfigMapName,
Path: nfsConfigMapName,
},
},
}
configMapSrc.Name = nfsConfigMapName
configMapVol := v1.Volume{
Name: nfsConfigMapName,
VolumeSource: v1.VolumeSource{
ConfigMap: configMapSrc,
},
}
pvcSpecList = append(pvcSpecList, configMapVol)
return pvcSpecList
}
func createVolumeMountList(spec *nfsv1alpha1.NFSServerSpec) []v1.VolumeMount {
volumeMountList := make([]v1.VolumeMount, 0)
pvcNameList := getPVCNameList(spec)
for _, claimName := range pvcNameList {
volumeMountList = append(volumeMountList, v1.VolumeMount{
Name: claimName,
MountPath: "/" + claimName,
})
}
configMapVolMount := v1.VolumeMount{
Name: nfsConfigMapName,
MountPath: nfsConfigMapPath,
}
volumeMountList = append(volumeMountList, configMapVolMount)
return volumeMountList
}
func (c *Controller) createNfsPodSpec(nfsServer *nfsServer) v1.PodTemplateSpec {
nfsPodSpec := v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
Labels: createAppLabels(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: nfsServer.name,
Image: c.containerImage,
Command: []string{"/start.sh"},
Ports: []v1.ContainerPort{
{
Name: "nfs-port",
ContainerPort: int32(nfsPort),
},
{
Name: "rpc-port",
ContainerPort: int32(rpcPort),
},
},
VolumeMounts: createVolumeMountList(&nfsServer.spec),
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
"SYS_ADMIN",
"DAC_READ_SEARCH",
},
},
},
},
},
Volumes: createPVCSpecList(&nfsServer.spec),
},
}
return nfsPodSpec
}
func (c *Controller) createNfsStatefulSet(nfsServer *nfsServer, replicas int32) error {
appsClient := c.context.Clientset.AppsV1beta1()
nfsPodSpec := c.createNfsPodSpec(nfsServer)
statefulSet := v1beta1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: nfsServer.name,
Namespace: nfsServer.namespace,
Labels: createAppLabels(),
OwnerReferences: []metav1.OwnerReference{nfsServer.ownerRef},
},
Spec: v1beta1.StatefulSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: createAppLabels(),
},
Template: nfsPodSpec,
ServiceName: nfsServer.name,
},
}
if _, err := appsClient.StatefulSets(nfsServer.namespace).Create(&statefulSet); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
logger.Infof("stateful set %s already exists in namespace %s", statefulSet.Name, statefulSet.Namespace)
} else {
logger.Infof("stateful set %s created in namespace %s", statefulSet.Name, statefulSet.Namespace)
}
return nil
}
func (c *Controller) onAdd(obj interface{}) {
nfsObj := obj.(*nfsv1alpha1.NFSServer).DeepCopy()
nfsServer := newNfsServer(nfsObj, c.context)
logger.Infof("new NFS server %s added to namespace %s", nfsObj.Name, nfsServer.namespace)
logger.Infof("validating nfs server spec in namespace %s", nfsServer.namespace)
if err := validateNFSServerSpec(nfsServer.spec); err != nil {
logger.Errorf("Invalid NFS Server spec: %+v", err)
return
}
logger.Infof("creating nfs server service in namespace %s", nfsServer.namespace)
if err := c.createNFSService(nfsServer); err != nil {
logger.Errorf("Unable to create NFS service %+v", err)
}
logger.Infof("creating nfs server configuration in namespace %s", nfsServer.namespace)
if err := c.createNFSConfigMap(nfsServer); err != nil {
logger.Errorf("Unable to create NFS ConfigMap %+v", err)
}
logger.Infof("creating nfs server stateful set in namespace %s", nfsServer.namespace)
if err := c.createNfsStatefulSet(nfsServer, int32(nfsServer.spec.Replicas)); err != nil {
logger.Errorf("Unable to create NFS stateful set %+v", err)
}
}
func (c *Controller) onUpdate(oldObj, newObj interface{}) {
oldNfsServ := oldObj.(*nfsv1alpha1.NFSServer).DeepCopy()
logger.Infof("Received update on NFS server %s in namespace %s. This is currently unsupported.", oldNfsServ.Name, oldNfsServ.Namespace)
}
func (c *Controller) onDelete(obj interface{}) {
cluster := obj.(*nfsv1alpha1.NFSServer).DeepCopy()
logger.Infof("cluster %s deleted from namespace %s", cluster.Name, cluster.Namespace)
}
func validateNFSServerSpec(spec nfsv1alpha1.NFSServerSpec) error {
serverConfig := spec.Exports
for _, export := range serverConfig {
if err := validateAccessMode(export.Server.AccessMode); err != nil {
return err
}
if err := validateSquashMode(export.Server.Squash); err != nil {
return err
}
}
return nil
}
func validateAccessMode(mode string) error {
switch s.ToLower(mode) {
case "readonly":
case "readwrite":
case "none":
default:
return fmt.Errorf("Invalid value (%s) for accessMode, valid values are (ReadOnly, ReadWrite, none)", mode)
}
return nil
}
func validateSquashMode(mode string) error {
switch s.ToLower(mode) {
case "none":
case "rootid":
case "root":
case "all":
default:
return fmt.Errorf("Invalid value (%s) for squash, valid values are (none, rootId, root, all)", mode)
}
return nil
}
| {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
pvcNameList = append(pvcNameList, claimName)
}
} | conditional_block |
role_conversion.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The role conversion updates are defined in this module.
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
]
import gae_django
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.runtime import DeadlineExceededError
from django import http
from soc.models.host import Host
from soc.models.linkable import Linkable
from soc.models.mentor import Mentor
from soc.models.org_admin import OrgAdmin
from soc.models.role import StudentInfo
from soc.modules.gsoc.models.mentor import GSoCMentor
from soc.modules.gsoc.models.organization import GSoCOrganization
from soc.modules.gsoc.models.org_admin import GSoCOrgAdmin
from soc.modules.gsoc.models.profile import GSoCProfile
from soc.modules.gsoc.models.program import GSoCProgram
from soc.modules.gsoc.models.student import GSoCStudent
from soc.modules.gsoc.models.student_project import StudentProject
from soc.modules.gsoc.models.student_proposal import StudentProposal
ROLE_MODELS = [GSoCMentor, GSoCOrgAdmin, GSoCStudent]
POPULATED_PROFILE_PROPS = set(
GSoCProfile.properties()) - set(Linkable.properties())
POPULATED_STUDENT_PROPS = StudentInfo.properties()
def getDjangoURLPatterns():
"""Returns the URL patterns for the tasks in this module.
"""
patterns = [
(r'^tasks/role_conversion/update_references',
'soc.tasks.updates.role_conversion.updateReferences'),
(r'^tasks/role_conversion/update_project_references',
'soc.tasks.updates.role_conversion.updateStudentProjectReferences'),
(r'^tasks/role_conversion/update_proposal_references',
'soc.tasks.updates.role_conversion.updateStudentProposalReferences'),
(r'^tasks/role_conversion/update_roles$',
'soc.tasks.updates.role_conversion.updateRoles'),
(r'^tasks/role_conversion/update_mentors$',
'soc.tasks.updates.role_conversion.updateMentors'),
(r'^tasks/role_conversion/update_org_admins$',
'soc.tasks.updates.role_conversion.updateOrgAdmins'),
(r'^tasks/role_conversion/update_students$',
'soc.tasks.updates.role_conversion.updateStudents'),
(r'^tasks/role_conversion/update_hosts$',
'soc.tasks.updates.role_conversion.updateHosts'),
]
return patterns
class HostUpdater(object):
"""Class which is responsible for updating Host entities.
"""
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _process(self, start_key, batch_size):
"""Retrieves Host entities and updates them.
"""
query = Host.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
sponsor = entity.scope
host_for = entity.user.host_for
if not host_for:
host_for = []
user = entity.user
if sponsor.key() not in host_for:
host_for.append(sponsor.key())
user.host_for = host_for
db.put(user)
# process the next batch of entities
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
class RoleUpdater(object):
"""Class which is responsible for updating the entities.
"""
def __init__(self, model, profile_model, program_field, role_field=None):
self.MODEL = model
self.PROFILE_MODEL = profile_model
self.PROGRAM_FIELD = program_field
self.ROLE_FIELD = role_field
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _processEntity(self, entity):
program = getattr(entity, self.PROGRAM_FIELD)
user = entity.user
# try to find an existing Profile entity or create a new one
key_name = program.key().name() + '/' + user.link_id
properties = {
'link_id': entity.link_id,
'scope_path': program.key().name(),
'scope': program,
'parent': user,
}
for prop in POPULATED_PROFILE_PROPS:
properties[prop] = getattr(entity, prop)
profile = self.PROFILE_MODEL.get_or_insert(
key_name=key_name, **properties)
# do not update anything if the role is already in the profile
if profile.student_info and self.MODEL == GSoCStudent:
return
elif self.ROLE_FIELD:
if entity.scope.key() in getattr(profile, self.ROLE_FIELD):
return
to_put = [profile]
# a non-invalid role is found, we should re-populate the profile
if profile.status == 'invalid' and entity.status != 'invalid':
for prop_name in entity.properties():
value = getattr(entity, prop_name)
setattr(profile, prop_name, value)
if profile.student_info:
profile.student_info = None
if self.ROLE_FIELD:
# the role is either Mentor or OrgAdmin
getattr(profile, self.ROLE_FIELD).append(entity.scope.key())
else:
# the role is certainly Student; we have to create a new StudentInfo
properties = {}
for prop in POPULATED_STUDENT_PROPS:
properties[prop] = getattr(entity, prop)
key_name = profile.key().name()
student_info = StudentInfo(key_name=key_name,
parent=profile, **properties)
profile.student_info = student_info
to_put.append(student_info)
db.run_in_transaction(db.put, to_put)
def _process(self, start_key, batch_size):
"""Retrieves entities and creates or updates a corresponding
Profile entity.
"""
query = self.MODEL.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
try:
self._processEntity(entity)
except db.Error, e:
import logging
logging.exception(e)
logging.error("Broke on %s: %s" % (entity.key().name(), self.MODEL))
# process the next batch of entities
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
def updateHosts(request):
"""Starts a task which updates Host entities.
"""
updater = HostUpdater()
updater.run()
return http.HttpResponse("Ok")
def updateRole(role_name):
"""Starts a task which updates a particular role.
"""
if role_name == 'gsoc_mentor':
updater = RoleUpdater(GSoCMentor, GSoCProfile, 'program', 'mentor_for')
elif role_name == 'gsoc_org_admin':
updater = RoleUpdater(
GSoCOrgAdmin, GSoCProfile, 'program', 'org_admin_for')
elif role_name == 'gsoc_student':
updater = RoleUpdater(GSoCStudent, GSoCProfile, 'scope')
updater.run()
return http.HttpResponse("Ok")
def updateRoles(request):
"""Starts a bunch of iterative tasks which update particular roles.
In order to prevent issues with concurrent access to entities, we set
ETA so that each role is processed in separation.
"""
# update org admins
#updateRole('gsoc_org_admin')
# update mentors
#updateRole('gsoc_mentor')
# update students
# we can assume that students cannot have any other roles, so we do not
# need to set ETA
updateRole('gsoc_student')
def updateMentors(request):
"""Starts an iterative task which update mentors.
"""
return updateRole('gsoc_mentor')
def updateOrgAdmins(request):
"""Starts an iterative task which update org admins.
"""
return updateRole('gsoc_org_admin')
def updateStudents(request):
"""Starts an iterative task which update students.
"""
return updateRole('gsoc_student')
def _getProfileForRole(entity, profile_model):
"""Returns GSoCProfile or GCIProfile which corresponds to the specified
entity.
"""
if isinstance(entity, profile_model):
return entity
if isinstance(entity, OrgAdmin) or isinstance(entity, Mentor):
key_name = entity.program.key().name() + '/' + entity.user.key().name()
else:
key_name = entity.key().name()
parent = entity.user
return profile_model.get_by_key_name(key_name, parent=parent)
def _getProfileKeyForRoleKey(key, profile_model):
"""Returns Key instance of the Profile which corresponds to the Role which
is represented by the specified Key.
"""
entity = db.get(key)
profile = _getProfileForRole(entity, profile_model)
return profile.key()
class ReferenceUpdater(object):
"""Class which is responsible for updating references to Profile in
the specified model.
"""
def __init__(self, model, profile_model, fields_to_update,
lists_to_update=[]):
self.MODEL = model
self.PROFILE_MODEL = profile_model
self.FIELDS_TO_UPDATE = fields_to_update
self.LISTS_TO_UPDATE = lists_to_update
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _process(self, start_key, batch_size):
"""Iterates through the entities and updates the references.
"""
query = self.MODEL.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
for field in self.FIELDS_TO_UPDATE:
old_reference = getattr(entity, field)
if not old_reference:
continue
# check if the field has not been updated
if isinstance(old_reference, self.PROFILE_MODEL):
continue
profile = _getProfileForRole(old_reference, self.PROFILE_MODEL)
setattr(entity, field, profile)
for list_property in self.LISTS_TO_UPDATE:
l = getattr(entity, list_property)
new_l = []
for key in l:
new_l.append(_getProfileKeyForRoleKey(key, self.PROFILE_MODEL))
setattr(entity, list_property, new_l)
db.put(entities)
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
def updateReferencesForModel(model):
"""Starts a task which updates references for a particular model.
"""
if model == 'student_proposal':
updater = ReferenceUpdater(StudentProposal, GSoCProfile,
['scope', 'mentor'], ['possible_mentors'])
elif model == 'student_project':
updater = ReferenceUpdater(StudentProject, GSoCProfile,
['mentor', 'student'], ['additional_mentors'])
updater.run()
return http.HttpResponse("Ok")
def updateStudentProjectReferences(request):
|
def updateStudentProposalReferences(request):
"""Starts a bunch of iterative tasks which update references in
StudentProposals.
"""
return updateReferencesForModel('student_proposal')
def updateReferences(request):
"""Starts a bunch of iterative tasks which update references to various roles.
"""
# updates student proposals
updateReferencesForModel('student_proposal')
# updates student projects
updateReferencesForModel('student_project')
return http.HttpResponse("Ok")
| """Starts a bunch of iterative tasks which update references in
StudentProjects.
"""
return updateReferencesForModel('student_project') | identifier_body |
role_conversion.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The role conversion updates are defined in this module.
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
]
import gae_django
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.runtime import DeadlineExceededError
from django import http
from soc.models.host import Host
from soc.models.linkable import Linkable
from soc.models.mentor import Mentor
from soc.models.org_admin import OrgAdmin
from soc.models.role import StudentInfo
from soc.modules.gsoc.models.mentor import GSoCMentor
from soc.modules.gsoc.models.organization import GSoCOrganization
from soc.modules.gsoc.models.org_admin import GSoCOrgAdmin
from soc.modules.gsoc.models.profile import GSoCProfile
from soc.modules.gsoc.models.program import GSoCProgram
from soc.modules.gsoc.models.student import GSoCStudent
from soc.modules.gsoc.models.student_project import StudentProject
from soc.modules.gsoc.models.student_proposal import StudentProposal
ROLE_MODELS = [GSoCMentor, GSoCOrgAdmin, GSoCStudent]
POPULATED_PROFILE_PROPS = set(
GSoCProfile.properties()) - set(Linkable.properties())
POPULATED_STUDENT_PROPS = StudentInfo.properties()
def getDjangoURLPatterns():
"""Returns the URL patterns for the tasks in this module.
"""
patterns = [
(r'^tasks/role_conversion/update_references',
'soc.tasks.updates.role_conversion.updateReferences'),
(r'^tasks/role_conversion/update_project_references',
'soc.tasks.updates.role_conversion.updateStudentProjectReferences'),
(r'^tasks/role_conversion/update_proposal_references',
'soc.tasks.updates.role_conversion.updateStudentProposalReferences'),
(r'^tasks/role_conversion/update_roles$',
'soc.tasks.updates.role_conversion.updateRoles'),
(r'^tasks/role_conversion/update_mentors$',
'soc.tasks.updates.role_conversion.updateMentors'),
(r'^tasks/role_conversion/update_org_admins$',
'soc.tasks.updates.role_conversion.updateOrgAdmins'),
(r'^tasks/role_conversion/update_students$',
'soc.tasks.updates.role_conversion.updateStudents'),
(r'^tasks/role_conversion/update_hosts$',
'soc.tasks.updates.role_conversion.updateHosts'),
]
return patterns
class HostUpdater(object):
"""Class which is responsible for updating Host entities.
"""
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _process(self, start_key, batch_size):
"""Retrieves Host entities and updates them.
"""
query = Host.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
sponsor = entity.scope
host_for = entity.user.host_for
if not host_for:
host_for = []
user = entity.user
if sponsor.key() not in host_for:
host_for.append(sponsor.key())
user.host_for = host_for
db.put(user)
# process the next batch of entities
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
class RoleUpdater(object):
"""Class which is responsible for updating the entities.
"""
def __init__(self, model, profile_model, program_field, role_field=None):
self.MODEL = model
self.PROFILE_MODEL = profile_model
self.PROGRAM_FIELD = program_field
self.ROLE_FIELD = role_field
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _processEntity(self, entity):
program = getattr(entity, self.PROGRAM_FIELD)
user = entity.user
# try to find an existing Profile entity or create a new one
key_name = program.key().name() + '/' + user.link_id
properties = {
'link_id': entity.link_id,
'scope_path': program.key().name(),
'scope': program,
'parent': user,
}
for prop in POPULATED_PROFILE_PROPS:
properties[prop] = getattr(entity, prop)
profile = self.PROFILE_MODEL.get_or_insert(
key_name=key_name, **properties)
# do not update anything if the role is already in the profile
if profile.student_info and self.MODEL == GSoCStudent:
return
elif self.ROLE_FIELD:
if entity.scope.key() in getattr(profile, self.ROLE_FIELD):
return
to_put = [profile]
# a non-invalid role is found, we should re-populate the profile
if profile.status == 'invalid' and entity.status != 'invalid':
for prop_name in entity.properties():
value = getattr(entity, prop_name)
setattr(profile, prop_name, value)
if profile.student_info:
profile.student_info = None
if self.ROLE_FIELD:
# the role is either Mentor or OrgAdmin
getattr(profile, self.ROLE_FIELD).append(entity.scope.key())
else:
# the role is certainly Student; we have to create a new StudentInfo
properties = {}
for prop in POPULATED_STUDENT_PROPS:
properties[prop] = getattr(entity, prop)
key_name = profile.key().name()
student_info = StudentInfo(key_name=key_name,
parent=profile, **properties)
profile.student_info = student_info
to_put.append(student_info)
db.run_in_transaction(db.put, to_put)
def _process(self, start_key, batch_size):
"""Retrieves entities and creates or updates a corresponding
Profile entity.
"""
query = self.MODEL.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
try:
self._processEntity(entity)
except db.Error, e:
import logging
logging.exception(e)
logging.error("Broke on %s: %s" % (entity.key().name(), self.MODEL))
# process the next batch of entities
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
def | (request):
"""Starts a task which updates Host entities.
"""
updater = HostUpdater()
updater.run()
return http.HttpResponse("Ok")
def updateRole(role_name):
"""Starts a task which updates a particular role.
"""
if role_name == 'gsoc_mentor':
updater = RoleUpdater(GSoCMentor, GSoCProfile, 'program', 'mentor_for')
elif role_name == 'gsoc_org_admin':
updater = RoleUpdater(
GSoCOrgAdmin, GSoCProfile, 'program', 'org_admin_for')
elif role_name == 'gsoc_student':
updater = RoleUpdater(GSoCStudent, GSoCProfile, 'scope')
updater.run()
return http.HttpResponse("Ok")
def updateRoles(request):
"""Starts a bunch of iterative tasks which update particular roles.
In order to prevent issues with concurrent access to entities, we set
ETA so that each role is processed in separation.
"""
# update org admins
#updateRole('gsoc_org_admin')
# update mentors
#updateRole('gsoc_mentor')
# update students
# we can assume that students cannot have any other roles, so we do not
# need to set ETA
updateRole('gsoc_student')
def updateMentors(request):
"""Starts an iterative task which update mentors.
"""
return updateRole('gsoc_mentor')
def updateOrgAdmins(request):
"""Starts an iterative task which update org admins.
"""
return updateRole('gsoc_org_admin')
def updateStudents(request):
"""Starts an iterative task which update students.
"""
return updateRole('gsoc_student')
def _getProfileForRole(entity, profile_model):
"""Returns GSoCProfile or GCIProfile which corresponds to the specified
entity.
"""
if isinstance(entity, profile_model):
return entity
if isinstance(entity, OrgAdmin) or isinstance(entity, Mentor):
key_name = entity.program.key().name() + '/' + entity.user.key().name()
else:
key_name = entity.key().name()
parent = entity.user
return profile_model.get_by_key_name(key_name, parent=parent)
def _getProfileKeyForRoleKey(key, profile_model):
"""Returns Key instance of the Profile which corresponds to the Role which
is represented by the specified Key.
"""
entity = db.get(key)
profile = _getProfileForRole(entity, profile_model)
return profile.key()
class ReferenceUpdater(object):
"""Class which is responsible for updating references to Profile in
the specified model.
"""
def __init__(self, model, profile_model, fields_to_update,
lists_to_update=[]):
self.MODEL = model
self.PROFILE_MODEL = profile_model
self.FIELDS_TO_UPDATE = fields_to_update
self.LISTS_TO_UPDATE = lists_to_update
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _process(self, start_key, batch_size):
"""Iterates through the entities and updates the references.
"""
query = self.MODEL.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
for field in self.FIELDS_TO_UPDATE:
old_reference = getattr(entity, field)
if not old_reference:
continue
# check if the field has not been updated
if isinstance(old_reference, self.PROFILE_MODEL):
continue
profile = _getProfileForRole(old_reference, self.PROFILE_MODEL)
setattr(entity, field, profile)
for list_property in self.LISTS_TO_UPDATE:
l = getattr(entity, list_property)
new_l = []
for key in l:
new_l.append(_getProfileKeyForRoleKey(key, self.PROFILE_MODEL))
setattr(entity, list_property, new_l)
db.put(entities)
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
def updateReferencesForModel(model):
"""Starts a task which updates references for a particular model.
"""
if model == 'student_proposal':
updater = ReferenceUpdater(StudentProposal, GSoCProfile,
['scope', 'mentor'], ['possible_mentors'])
elif model == 'student_project':
updater = ReferenceUpdater(StudentProject, GSoCProfile,
['mentor', 'student'], ['additional_mentors'])
updater.run()
return http.HttpResponse("Ok")
def updateStudentProjectReferences(request):
"""Starts a bunch of iterative tasks which update references in
StudentProjects.
"""
return updateReferencesForModel('student_project')
def updateStudentProposalReferences(request):
"""Starts a bunch of iterative tasks which update references in
StudentProposals.
"""
return updateReferencesForModel('student_proposal')
def updateReferences(request):
"""Starts a bunch of iterative tasks which update references to various roles.
"""
# updates student proposals
updateReferencesForModel('student_proposal')
# updates student projects
updateReferencesForModel('student_project')
return http.HttpResponse("Ok")
| updateHosts | identifier_name |
role_conversion.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. | # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The role conversion updates are defined in this module.
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
]
import gae_django
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.runtime import DeadlineExceededError
from django import http
from soc.models.host import Host
from soc.models.linkable import Linkable
from soc.models.mentor import Mentor
from soc.models.org_admin import OrgAdmin
from soc.models.role import StudentInfo
from soc.modules.gsoc.models.mentor import GSoCMentor
from soc.modules.gsoc.models.organization import GSoCOrganization
from soc.modules.gsoc.models.org_admin import GSoCOrgAdmin
from soc.modules.gsoc.models.profile import GSoCProfile
from soc.modules.gsoc.models.program import GSoCProgram
from soc.modules.gsoc.models.student import GSoCStudent
from soc.modules.gsoc.models.student_project import StudentProject
from soc.modules.gsoc.models.student_proposal import StudentProposal
ROLE_MODELS = [GSoCMentor, GSoCOrgAdmin, GSoCStudent]
POPULATED_PROFILE_PROPS = set(
GSoCProfile.properties()) - set(Linkable.properties())
POPULATED_STUDENT_PROPS = StudentInfo.properties()
def getDjangoURLPatterns():
"""Returns the URL patterns for the tasks in this module.
"""
patterns = [
(r'^tasks/role_conversion/update_references',
'soc.tasks.updates.role_conversion.updateReferences'),
(r'^tasks/role_conversion/update_project_references',
'soc.tasks.updates.role_conversion.updateStudentProjectReferences'),
(r'^tasks/role_conversion/update_proposal_references',
'soc.tasks.updates.role_conversion.updateStudentProposalReferences'),
(r'^tasks/role_conversion/update_roles$',
'soc.tasks.updates.role_conversion.updateRoles'),
(r'^tasks/role_conversion/update_mentors$',
'soc.tasks.updates.role_conversion.updateMentors'),
(r'^tasks/role_conversion/update_org_admins$',
'soc.tasks.updates.role_conversion.updateOrgAdmins'),
(r'^tasks/role_conversion/update_students$',
'soc.tasks.updates.role_conversion.updateStudents'),
(r'^tasks/role_conversion/update_hosts$',
'soc.tasks.updates.role_conversion.updateHosts'),
]
return patterns
class HostUpdater(object):
"""Class which is responsible for updating Host entities.
"""
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _process(self, start_key, batch_size):
"""Retrieves Host entities and updates them.
"""
query = Host.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
sponsor = entity.scope
host_for = entity.user.host_for
if not host_for:
host_for = []
user = entity.user
if sponsor.key() not in host_for:
host_for.append(sponsor.key())
user.host_for = host_for
db.put(user)
# process the next batch of entities
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
class RoleUpdater(object):
"""Class which is responsible for updating the entities.
"""
def __init__(self, model, profile_model, program_field, role_field=None):
self.MODEL = model
self.PROFILE_MODEL = profile_model
self.PROGRAM_FIELD = program_field
self.ROLE_FIELD = role_field
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _processEntity(self, entity):
program = getattr(entity, self.PROGRAM_FIELD)
user = entity.user
# try to find an existing Profile entity or create a new one
key_name = program.key().name() + '/' + user.link_id
properties = {
'link_id': entity.link_id,
'scope_path': program.key().name(),
'scope': program,
'parent': user,
}
for prop in POPULATED_PROFILE_PROPS:
properties[prop] = getattr(entity, prop)
profile = self.PROFILE_MODEL.get_or_insert(
key_name=key_name, **properties)
# do not update anything if the role is already in the profile
if profile.student_info and self.MODEL == GSoCStudent:
return
elif self.ROLE_FIELD:
if entity.scope.key() in getattr(profile, self.ROLE_FIELD):
return
to_put = [profile]
# a non-invalid role is found, we should re-populate the profile
if profile.status == 'invalid' and entity.status != 'invalid':
for prop_name in entity.properties():
value = getattr(entity, prop_name)
setattr(profile, prop_name, value)
if profile.student_info:
profile.student_info = None
if self.ROLE_FIELD:
# the role is either Mentor or OrgAdmin
getattr(profile, self.ROLE_FIELD).append(entity.scope.key())
else:
# the role is certainly Student; we have to create a new StudentInfo
properties = {}
for prop in POPULATED_STUDENT_PROPS:
properties[prop] = getattr(entity, prop)
key_name = profile.key().name()
student_info = StudentInfo(key_name=key_name,
parent=profile, **properties)
profile.student_info = student_info
to_put.append(student_info)
db.run_in_transaction(db.put, to_put)
def _process(self, start_key, batch_size):
"""Retrieves entities and creates or updates a corresponding
Profile entity.
"""
query = self.MODEL.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
try:
self._processEntity(entity)
except db.Error, e:
import logging
logging.exception(e)
logging.error("Broke on %s: %s" % (entity.key().name(), self.MODEL))
# process the next batch of entities
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
def updateHosts(request):
"""Starts a task which updates Host entities.
"""
updater = HostUpdater()
updater.run()
return http.HttpResponse("Ok")
def updateRole(role_name):
"""Starts a task which updates a particular role.
"""
if role_name == 'gsoc_mentor':
updater = RoleUpdater(GSoCMentor, GSoCProfile, 'program', 'mentor_for')
elif role_name == 'gsoc_org_admin':
updater = RoleUpdater(
GSoCOrgAdmin, GSoCProfile, 'program', 'org_admin_for')
elif role_name == 'gsoc_student':
updater = RoleUpdater(GSoCStudent, GSoCProfile, 'scope')
updater.run()
return http.HttpResponse("Ok")
def updateRoles(request):
"""Starts a bunch of iterative tasks which update particular roles.
In order to prevent issues with concurrent access to entities, we set
ETA so that each role is processed in separation.
"""
# update org admins
#updateRole('gsoc_org_admin')
# update mentors
#updateRole('gsoc_mentor')
# update students
# we can assume that students cannot have any other roles, so we do not
# need to set ETA
updateRole('gsoc_student')
def updateMentors(request):
"""Starts an iterative task which update mentors.
"""
return updateRole('gsoc_mentor')
def updateOrgAdmins(request):
"""Starts an iterative task which update org admins.
"""
return updateRole('gsoc_org_admin')
def updateStudents(request):
"""Starts an iterative task which update students.
"""
return updateRole('gsoc_student')
def _getProfileForRole(entity, profile_model):
"""Returns GSoCProfile or GCIProfile which corresponds to the specified
entity.
"""
if isinstance(entity, profile_model):
return entity
if isinstance(entity, OrgAdmin) or isinstance(entity, Mentor):
key_name = entity.program.key().name() + '/' + entity.user.key().name()
else:
key_name = entity.key().name()
parent = entity.user
return profile_model.get_by_key_name(key_name, parent=parent)
def _getProfileKeyForRoleKey(key, profile_model):
"""Returns Key instance of the Profile which corresponds to the Role which
is represented by the specified Key.
"""
entity = db.get(key)
profile = _getProfileForRole(entity, profile_model)
return profile.key()
class ReferenceUpdater(object):
"""Class which is responsible for updating references to Profile in
the specified model.
"""
def __init__(self, model, profile_model, fields_to_update,
lists_to_update=[]):
self.MODEL = model
self.PROFILE_MODEL = profile_model
self.FIELDS_TO_UPDATE = fields_to_update
self.LISTS_TO_UPDATE = lists_to_update
def run(self, batch_size=25):
"""Starts the updater.
"""
self._process(None, batch_size)
def _process(self, start_key, batch_size):
"""Iterates through the entities and updates the references.
"""
query = self.MODEL.all()
if start_key:
query.filter('__key__ > ', start_key)
try:
entities = query.fetch(batch_size)
if not entities:
# all entities has already been processed
return
for entity in entities:
for field in self.FIELDS_TO_UPDATE:
old_reference = getattr(entity, field)
if not old_reference:
continue
# check if the field has not been updated
if isinstance(old_reference, self.PROFILE_MODEL):
continue
profile = _getProfileForRole(old_reference, self.PROFILE_MODEL)
setattr(entity, field, profile)
for list_property in self.LISTS_TO_UPDATE:
l = getattr(entity, list_property)
new_l = []
for key in l:
new_l.append(_getProfileKeyForRoleKey(key, self.PROFILE_MODEL))
setattr(entity, list_property, new_l)
db.put(entities)
start_key = entities[-1].key()
deferred.defer(self._process, start_key, batch_size)
except DeadlineExceededError:
# here we should probably be more careful
deferred.defer(self._process, start_key, batch_size)
def updateReferencesForModel(model):
"""Starts a task which updates references for a particular model.
"""
if model == 'student_proposal':
updater = ReferenceUpdater(StudentProposal, GSoCProfile,
['scope', 'mentor'], ['possible_mentors'])
elif model == 'student_project':
updater = ReferenceUpdater(StudentProject, GSoCProfile,
['mentor', 'student'], ['additional_mentors'])
updater.run()
return http.HttpResponse("Ok")
def updateStudentProjectReferences(request):
"""Starts a bunch of iterative tasks which update references in
StudentProjects.
"""
return updateReferencesForModel('student_project')
def updateStudentProposalReferences(request):
"""Starts a bunch of iterative tasks which update references in
StudentProposals.
"""
return updateReferencesForModel('student_proposal')
def updateReferences(request):
"""Starts a bunch of iterative tasks which update references to various roles.
"""
# updates student proposals
updateReferencesForModel('student_proposal')
# updates student projects
updateReferencesForModel('student_project')
return http.HttpResponse("Ok") | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.