file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ledger_cleanup_service.rs | //! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::Arc;
use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 400GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
}
impl LedgerCleanupService {
pub fn new(
new_root_receiver: Receiver<Slot>,
blockstore: Arc<Blockstore>,
max_ledger_shreds: u64,
exit: &Arc<AtomicBool>,
) -> Self {
info!(
"LedgerCleanupService active. Max Ledger Slots {}",
max_ledger_shreds
);
let exit = exit.clone();
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = Self::cleanup_ledger(
&new_root_receiver,
&blockstore,
max_ledger_shreds,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
RecvTimeoutError::Timeout => (),
}
}
})
.unwrap();
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
}
pub fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel;
#[test]
fn test_cleanup() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blockstore.insert_shreds(shreds, None, false).unwrap(); |
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > 40));
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
} | let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel(); | random_line_split |
space_group.py | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ignore false-positives for redefined `product` functions:
# pylint: disable=function-redefined
import numpy as np
from functools import reduce
from math import pi
from typing import Optional
from collections.abc import Iterable, Sequence
from .lattice import Lattice
from netket.utils import struct
from netket.utils.types import Array, Union
from netket.utils.float import prune_zeros
from netket.utils.dispatch import dispatch
from netket.utils.group import (
Identity,
PointGroup,
Permutation,
PermutationGroup,
)
class Translation(Permutation):
r"""
Custom subclass of `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def __init__(self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`.
"""
perms = []
for p in self.point_group_:
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
|
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
)
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * np.outer(np.arange(n_trans), big_star[:, axis]))
shape = (
[1] * axis
+ [n_trans]
+ [1] * (self.lattice.ndim - 1 - axis)
+ [len(self.point_group_)]
)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).reshape(
-1, len(self.point_group_)
)
# Multiply the factors together and sum over the "p" PGSymmetry axis
# Translations are more major than point group operations
result = np.einsum(
"igp, tp -> itg", point_group_factors, trans_factors
).reshape(point_group_factors.shape[0], -1)
return prune_zeros(result)
def one_arm_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*, projected onto *k* itself.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the projected characters for a number of irreps of
the space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
# Convert k to reciprocal lattice vectors
k = _ensure_iterable(k)
# Little-group irrep factors
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k) * np.exp(
-1j * (self.point_group_.translations() @ k)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * k[axis] * np.arange(n_trans))
shape = [1] * axis + [n_trans] + [1] * (self.lattice.ndim - 1 - axis)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).ravel()
# Multiply the factors together
# Translations are more major than point group operations
result = np.einsum("ig, t -> itg", point_group_factors, trans_factors).reshape(
point_group_factors.shape[0], -1
)
return prune_zeros(result)
| perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p))) | conditional_block |
space_group.py | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ignore false-positives for redefined `product` functions:
# pylint: disable=function-redefined
import numpy as np
from functools import reduce
from math import pi
from typing import Optional
from collections.abc import Iterable, Sequence
from .lattice import Lattice
from netket.utils import struct
from netket.utils.types import Array, Union
from netket.utils.float import prune_zeros
from netket.utils.dispatch import dispatch
from netket.utils.group import (
Identity,
PointGroup,
Permutation,
PermutationGroup,
)
class Translation(Permutation):
r"""
Custom subclass of `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def __init__(self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`.
"""
perms = []
for p in self.point_group_:
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
|
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * np.outer(np.arange(n_trans), big_star[:, axis]))
shape = (
[1] * axis
+ [n_trans]
+ [1] * (self.lattice.ndim - 1 - axis)
+ [len(self.point_group_)]
)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).reshape(
-1, len(self.point_group_)
)
# Multiply the factors together and sum over the "p" PGSymmetry axis
# Translations are more major than point group operations
result = np.einsum(
"igp, tp -> itg", point_group_factors, trans_factors
).reshape(point_group_factors.shape[0], -1)
return prune_zeros(result)
def one_arm_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*, projected onto *k* itself.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the projected characters for a number of irreps of
the space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
# Convert k to reciprocal lattice vectors
k = _ensure_iterable(k)
# Little-group irrep factors
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k) * np.exp(
-1j * (self.point_group_.translations() @ k)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * k[axis] * np.arange(n_trans))
shape = [1] * axis + [n_trans] + [1] * (self.lattice.ndim - 1 - axis)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).ravel()
# Multiply the factors together
# Translations are more major than point group operations
result = np.einsum("ig, t -> itg", point_group_factors, trans_factors).reshape(
point_group_factors.shape[0], -1
)
return prune_zeros(result)
| """
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
) | identifier_body |
space_group.py | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ignore false-positives for redefined `product` functions:
# pylint: disable=function-redefined
import numpy as np
from functools import reduce
from math import pi
from typing import Optional
from collections.abc import Iterable, Sequence
from .lattice import Lattice
from netket.utils import struct
from netket.utils.types import Array, Union
from netket.utils.float import prune_zeros
from netket.utils.dispatch import dispatch
from netket.utils.group import (
Identity,
PointGroup,
Permutation,
PermutationGroup,
)
class Translation(Permutation):
r"""
Custom subclass of `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def | (self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`.
"""
perms = []
for p in self.point_group_:
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
)
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * np.outer(np.arange(n_trans), big_star[:, axis]))
shape = (
[1] * axis
+ [n_trans]
+ [1] * (self.lattice.ndim - 1 - axis)
+ [len(self.point_group_)]
)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).reshape(
-1, len(self.point_group_)
)
# Multiply the factors together and sum over the "p" PGSymmetry axis
# Translations are more major than point group operations
result = np.einsum(
"igp, tp -> itg", point_group_factors, trans_factors
).reshape(point_group_factors.shape[0], -1)
return prune_zeros(result)
def one_arm_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*, projected onto *k* itself.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the projected characters for a number of irreps of
the space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
# Convert k to reciprocal lattice vectors
k = _ensure_iterable(k)
# Little-group irrep factors
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k) * np.exp(
-1j * (self.point_group_.translations() @ k)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * k[axis] * np.arange(n_trans))
shape = [1] * axis + [n_trans] + [1] * (self.lattice.ndim - 1 - axis)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).ravel()
# Multiply the factors together
# Translations are more major than point group operations
result = np.einsum("ig, t -> itg", point_group_factors, trans_factors).reshape(
point_group_factors.shape[0], -1
)
return prune_zeros(result)
| __init__ | identifier_name |
space_group.py | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ignore false-positives for redefined `product` functions:
# pylint: disable=function-redefined
import numpy as np
from functools import reduce
from math import pi
from typing import Optional
from collections.abc import Iterable, Sequence
from .lattice import Lattice
from netket.utils import struct
from netket.utils.types import Array, Union
from netket.utils.float import prune_zeros
from netket.utils.dispatch import dispatch
from netket.utils.group import (
Identity,
PointGroup,
Permutation,
PermutationGroup,
)
class Translation(Permutation):
r"""
Custom subclass of `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def __init__(self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`. | if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
)
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * np.outer(np.arange(n_trans), big_star[:, axis]))
shape = (
[1] * axis
+ [n_trans]
+ [1] * (self.lattice.ndim - 1 - axis)
+ [len(self.point_group_)]
)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).reshape(
-1, len(self.point_group_)
)
# Multiply the factors together and sum over the "p" PGSymmetry axis
# Translations are more major than point group operations
result = np.einsum(
"igp, tp -> itg", point_group_factors, trans_factors
).reshape(point_group_factors.shape[0], -1)
return prune_zeros(result)
def one_arm_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*, projected onto *k* itself.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the projected characters for a number of irreps of
the space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
# Convert k to reciprocal lattice vectors
k = _ensure_iterable(k)
# Little-group irrep factors
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k) * np.exp(
-1j * (self.point_group_.translations() @ k)
)
# Translational factors
trans_factors = []
for axis in range(self.lattice.ndim):
n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1
factors = np.exp(-1j * k[axis] * np.arange(n_trans))
shape = [1] * axis + [n_trans] + [1] * (self.lattice.ndim - 1 - axis)
trans_factors.append(factors.reshape(shape))
trans_factors = reduce(np.multiply, trans_factors).ravel()
# Multiply the factors together
# Translations are more major than point group operations
result = np.einsum("ig, t -> itg", point_group_factors, trans_factors).reshape(
point_group_factors.shape[0], -1
)
return prune_zeros(result) | """
perms = []
for p in self.point_group_: | random_line_split |
basic_model.py | from collections import defaultdict
# from torchtext.vocab import Vocab
from torch.utils.data.dataset import Dataset, TensorDataset
from pathlib import Path
from collections import Counter
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from torch.utils.data.dataloader import DataLoader
from utils import * | # taken from the paper
MLP_HIDDEN_DIM = 100
EPOCHS = 150
WORD_EMBEDDING_DIM = 100
POS_EMBEDDING_DIM = 25
HIDDEN_DIM = 125
LEARNING_RATE = 0.01
EARLY_STOPPING = 10 # num epochs with no validation acc improvement to stop training
PATH = "./basic_model_best_params"
cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# class not_efficientMLP(nn.Module):
# def __init__(self, lstm_dim, mlp_hidden_dim):
# super(not_efficientMLP, self).__init__()
# self.first_linear = nn.Linear(2 * lstm_dim, mlp_hidden_dim)
# self.non_linearity = nn.ReLU()
# self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
#
# def forward(self, lstm_out):
# sentence_length = lstm_out.shape[0]
# scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# for i, v_i in enumerate(lstm_out):
# for j, v_j in enumerate(lstm_out):
# if i == j:
# scores[i, j] = 0
# else:
# a = torch.cat((v_i, v_j), dim=0)
# x = self.first_linear(a)
# y = self.non_linearity(x)
# scores[i, j] = self.second_mlp(y)
# return scores
class SplittedMLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(SplittedMLP, self).__init__()
self.fc_h = nn.Linear(lstm_dim, mlp_hidden_dim, bias=True) # fully-connected to output mu
self.fc_m = nn.Linear(lstm_dim, mlp_hidden_dim, bias=False) # fully-connected to output mu
def forward(self, lstm_out):
heads_hidden = self.fc_h(lstm_out)
mods_hidden = self.fc_m(lstm_out)
return heads_hidden, mods_hidden
class MLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(MLP, self).__init__()
self.first_mlp = SplittedMLP(lstm_dim, mlp_hidden_dim)
self.non_linearity = nn.Tanh()
self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
def forward(self, lstm_out):
sentence_length = lstm_out.shape[0]
heads_hidden, mods_hidden = self.first_mlp(lstm_out)
scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# we will fill the table row by row, using broadcasting
for mod in range(sentence_length):
mod_hidden = mods_hidden[mod]
summed_values = mod_hidden + heads_hidden # a single mod with all heads possibilities
x = self.non_linearity(summed_values)
scores[:, mod] = torch.flatten(self.second_mlp(x))
scores[mod, mod] = -np.inf # a word cant be its head
return scores
class DnnDependencyParser(nn.Module):
def __init__(self, word_embedding_dim, pos_embedding_dim, hidden_dim, word_vocab_size, tag_vocab_size):
super(DnnDependencyParser, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get a tensor of size word_vocab_size and return a word embedding
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
# get a tensor of size tag_vocab_size and return a pos embedding
self.pos_embedding = nn.Embedding(tag_vocab_size, pos_embedding_dim)
self.lstm = nn.LSTM(input_size=word_embedding_dim + pos_embedding_dim, hidden_size=hidden_dim, num_layers=2,
bidirectional=True, batch_first=False)
self.mlp = MLP(2*hidden_dim, MLP_HIDDEN_DIM)
# self.mlp = not_efficientMLP(2*hidden_dim, MLP_HIDDEN_DIM)
def forward(self, word_idx_tensor, pos_idx_tensor):
# get x = concat(e(w), e(p))
e_w = self.word_embedding(word_idx_tensor.to(self.device)) # [batch_size, seq_length, e_w]
e_p = self.pos_embedding(pos_idx_tensor.to(self.device)) # [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
val_dataloader = DataLoader(val_set, shuffle=False)
test = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'test')
test_dataloader = DataLoader(test, shuffle=False)
a = next(iter(train_dataloader))
# a[0] -> word - idx of a sentence
# a[1] -> pos - idx of a sentence
# a[2] -> head token per sentence
assert len(a[0]) == len(a[1]) == len(a[2])
word_vocab_size = len(train.word2idx)
print(word_vocab_size)
tag_vocab_size = len(train.pos_idx_mappings)
print(tag_vocab_size)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model = DnnDependencyParser(WORD_EMBEDDING_DIM, POS_EMBEDDING_DIM, HIDDEN_DIM, word_vocab_size, tag_vocab_size).to(device)
if use_cuda:
model.cuda()
# Define the loss function as the Negative Log Likelihood loss (NLLLoss)
loss_function = nn.NLLLoss()
# We will be using a simple SGD optimizer to minimize the loss function
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
acumulate_grad_steps = 128 # This is the actual batch_size, while we officially use batch_size=1
# Training start
print("Training Started")
epoch_loss_list = []
epoch_train_acc_list = []
epoch_test_acc_list = []
best_val_acc = 0
num_epochs_wo_improvement = 0
for epoch in range(EPOCHS):
val_acc = evaluate(model, val_dataloader)
print("EPOCH = ", epoch)
print("EPOCH val acc = ", val_acc)
if val_acc < best_val_acc: # no improvement
num_epochs_wo_improvement += 1
if num_epochs_wo_improvement >= EARLY_STOPPING:
print("STOPPED TRAINING DUE TO EARLY STOPPING")
return
else: # improvement
print("saving model since it improved on validation :)")
torch.save(model.state_dict(), PATH)
num_epochs_wo_improvement = 0
best_val_acc = val_acc
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
print(epoch_train_acc_list)
plt.savefig('./basic_model_graphs.png')
# train
acc = 0 # to keep track of accuracy
printable_loss = 0 # To keep track of the loss value
i = 0
batch_loss = 0
batch_acc = 0
epoch_loss = 0
for batch_idx, input_data in enumerate(train_dataloader):
i += 1
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
loss = NLLL_function(tag_scores, heads_tensor[0].to(device))
# epoch statistics
epoch_loss += loss
#
loss = loss / acumulate_grad_steps
loss.backward()
batch_loss += loss
acc = (accuracy(heads_tensor[0].cpu(), tag_scores.cpu())) / acumulate_grad_steps
batch_acc += acc
if i % acumulate_grad_steps == 0:
optimizer.step()
model.zero_grad()
print("batch_loss = ", batch_loss.item())
print("batch_acc = ", batch_acc)
batch_loss = 0
batch_acc = 0
# end of epoch - get statistics
epoch_loss_list.append(epoch_loss / i)
epoch_train_acc_list.append(evaluate(model, train_dataloader))
epoch_test_acc_list.append(evaluate(model, test_dataloader))
# end of train - plot the two graphs
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
plt.show()
plt.savefig('basic_model_graphs.png')
if __name__ == "__main__" :
if HYPER_PARAMETER_TUNING:
hyper_parameter_tuning()
else:
main() | import matplotlib.pyplot as plt
from chu_liu_edmonds import *
from os import path
| random_line_split |
basic_model.py | from collections import defaultdict
# from torchtext.vocab import Vocab
from torch.utils.data.dataset import Dataset, TensorDataset
from pathlib import Path
from collections import Counter
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from torch.utils.data.dataloader import DataLoader
from utils import *
import matplotlib.pyplot as plt
from chu_liu_edmonds import *
from os import path
# taken from the paper
MLP_HIDDEN_DIM = 100
EPOCHS = 150
WORD_EMBEDDING_DIM = 100
POS_EMBEDDING_DIM = 25
HIDDEN_DIM = 125
LEARNING_RATE = 0.01
EARLY_STOPPING = 10 # num epochs with no validation acc improvement to stop training
PATH = "./basic_model_best_params"
cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# class not_efficientMLP(nn.Module):
# def __init__(self, lstm_dim, mlp_hidden_dim):
# super(not_efficientMLP, self).__init__()
# self.first_linear = nn.Linear(2 * lstm_dim, mlp_hidden_dim)
# self.non_linearity = nn.ReLU()
# self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
#
# def forward(self, lstm_out):
# sentence_length = lstm_out.shape[0]
# scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# for i, v_i in enumerate(lstm_out):
# for j, v_j in enumerate(lstm_out):
# if i == j:
# scores[i, j] = 0
# else:
# a = torch.cat((v_i, v_j), dim=0)
# x = self.first_linear(a)
# y = self.non_linearity(x)
# scores[i, j] = self.second_mlp(y)
# return scores
class SplittedMLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(SplittedMLP, self).__init__()
self.fc_h = nn.Linear(lstm_dim, mlp_hidden_dim, bias=True) # fully-connected to output mu
self.fc_m = nn.Linear(lstm_dim, mlp_hidden_dim, bias=False) # fully-connected to output mu
def forward(self, lstm_out):
heads_hidden = self.fc_h(lstm_out)
mods_hidden = self.fc_m(lstm_out)
return heads_hidden, mods_hidden
class MLP(nn.Module):
def | (self, lstm_dim, mlp_hidden_dim):
super(MLP, self).__init__()
self.first_mlp = SplittedMLP(lstm_dim, mlp_hidden_dim)
self.non_linearity = nn.Tanh()
self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
def forward(self, lstm_out):
sentence_length = lstm_out.shape[0]
heads_hidden, mods_hidden = self.first_mlp(lstm_out)
scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# we will fill the table row by row, using broadcasting
for mod in range(sentence_length):
mod_hidden = mods_hidden[mod]
summed_values = mod_hidden + heads_hidden # a single mod with all heads possibilities
x = self.non_linearity(summed_values)
scores[:, mod] = torch.flatten(self.second_mlp(x))
scores[mod, mod] = -np.inf # a word cant be its head
return scores
class DnnDependencyParser(nn.Module):
def __init__(self, word_embedding_dim, pos_embedding_dim, hidden_dim, word_vocab_size, tag_vocab_size):
super(DnnDependencyParser, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get a tensor of size word_vocab_size and return a word embedding
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
# get a tensor of size tag_vocab_size and return a pos embedding
self.pos_embedding = nn.Embedding(tag_vocab_size, pos_embedding_dim)
self.lstm = nn.LSTM(input_size=word_embedding_dim + pos_embedding_dim, hidden_size=hidden_dim, num_layers=2,
bidirectional=True, batch_first=False)
self.mlp = MLP(2*hidden_dim, MLP_HIDDEN_DIM)
# self.mlp = not_efficientMLP(2*hidden_dim, MLP_HIDDEN_DIM)
def forward(self, word_idx_tensor, pos_idx_tensor):
# get x = concat(e(w), e(p))
e_w = self.word_embedding(word_idx_tensor.to(self.device)) # [batch_size, seq_length, e_w]
e_p = self.pos_embedding(pos_idx_tensor.to(self.device)) # [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
val_dataloader = DataLoader(val_set, shuffle=False)
test = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'test')
test_dataloader = DataLoader(test, shuffle=False)
a = next(iter(train_dataloader))
# a[0] -> word - idx of a sentence
# a[1] -> pos - idx of a sentence
# a[2] -> head token per sentence
assert len(a[0]) == len(a[1]) == len(a[2])
word_vocab_size = len(train.word2idx)
print(word_vocab_size)
tag_vocab_size = len(train.pos_idx_mappings)
print(tag_vocab_size)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model = DnnDependencyParser(WORD_EMBEDDING_DIM, POS_EMBEDDING_DIM, HIDDEN_DIM, word_vocab_size, tag_vocab_size).to(device)
if use_cuda:
model.cuda()
# Define the loss function as the Negative Log Likelihood loss (NLLLoss)
loss_function = nn.NLLLoss()
# We will be using a simple SGD optimizer to minimize the loss function
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
acumulate_grad_steps = 128 # This is the actual batch_size, while we officially use batch_size=1
# Training start
print("Training Started")
epoch_loss_list = []
epoch_train_acc_list = []
epoch_test_acc_list = []
best_val_acc = 0
num_epochs_wo_improvement = 0
for epoch in range(EPOCHS):
val_acc = evaluate(model, val_dataloader)
print("EPOCH = ", epoch)
print("EPOCH val acc = ", val_acc)
if val_acc < best_val_acc: # no improvement
num_epochs_wo_improvement += 1
if num_epochs_wo_improvement >= EARLY_STOPPING:
print("STOPPED TRAINING DUE TO EARLY STOPPING")
return
else: # improvement
print("saving model since it improved on validation :)")
torch.save(model.state_dict(), PATH)
num_epochs_wo_improvement = 0
best_val_acc = val_acc
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
print(epoch_train_acc_list)
plt.savefig('./basic_model_graphs.png')
# train
acc = 0 # to keep track of accuracy
printable_loss = 0 # To keep track of the loss value
i = 0
batch_loss = 0
batch_acc = 0
epoch_loss = 0
for batch_idx, input_data in enumerate(train_dataloader):
i += 1
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
loss = NLLL_function(tag_scores, heads_tensor[0].to(device))
# epoch statistics
epoch_loss += loss
#
loss = loss / acumulate_grad_steps
loss.backward()
batch_loss += loss
acc = (accuracy(heads_tensor[0].cpu(), tag_scores.cpu())) / acumulate_grad_steps
batch_acc += acc
if i % acumulate_grad_steps == 0:
optimizer.step()
model.zero_grad()
print("batch_loss = ", batch_loss.item())
print("batch_acc = ", batch_acc)
batch_loss = 0
batch_acc = 0
# end of epoch - get statistics
epoch_loss_list.append(epoch_loss / i)
epoch_train_acc_list.append(evaluate(model, train_dataloader))
epoch_test_acc_list.append(evaluate(model, test_dataloader))
# end of train - plot the two graphs
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
plt.show()
plt.savefig('basic_model_graphs.png')
if __name__ == "__main__" :
if HYPER_PARAMETER_TUNING:
hyper_parameter_tuning()
else:
main() | __init__ | identifier_name |
basic_model.py | from collections import defaultdict
# from torchtext.vocab import Vocab
from torch.utils.data.dataset import Dataset, TensorDataset
from pathlib import Path
from collections import Counter
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from torch.utils.data.dataloader import DataLoader
from utils import *
import matplotlib.pyplot as plt
from chu_liu_edmonds import *
from os import path
# taken from the paper
MLP_HIDDEN_DIM = 100
EPOCHS = 150
WORD_EMBEDDING_DIM = 100
POS_EMBEDDING_DIM = 25
HIDDEN_DIM = 125
LEARNING_RATE = 0.01
EARLY_STOPPING = 10 # num epochs with no validation acc improvement to stop training
PATH = "./basic_model_best_params"
cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# class not_efficientMLP(nn.Module):
# def __init__(self, lstm_dim, mlp_hidden_dim):
# super(not_efficientMLP, self).__init__()
# self.first_linear = nn.Linear(2 * lstm_dim, mlp_hidden_dim)
# self.non_linearity = nn.ReLU()
# self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
#
# def forward(self, lstm_out):
# sentence_length = lstm_out.shape[0]
# scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# for i, v_i in enumerate(lstm_out):
# for j, v_j in enumerate(lstm_out):
# if i == j:
# scores[i, j] = 0
# else:
# a = torch.cat((v_i, v_j), dim=0)
# x = self.first_linear(a)
# y = self.non_linearity(x)
# scores[i, j] = self.second_mlp(y)
# return scores
class SplittedMLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(SplittedMLP, self).__init__()
self.fc_h = nn.Linear(lstm_dim, mlp_hidden_dim, bias=True) # fully-connected to output mu
self.fc_m = nn.Linear(lstm_dim, mlp_hidden_dim, bias=False) # fully-connected to output mu
def forward(self, lstm_out):
heads_hidden = self.fc_h(lstm_out)
mods_hidden = self.fc_m(lstm_out)
return heads_hidden, mods_hidden
class MLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(MLP, self).__init__()
self.first_mlp = SplittedMLP(lstm_dim, mlp_hidden_dim)
self.non_linearity = nn.Tanh()
self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
def forward(self, lstm_out):
sentence_length = lstm_out.shape[0]
heads_hidden, mods_hidden = self.first_mlp(lstm_out)
scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# we will fill the table row by row, using broadcasting
for mod in range(sentence_length):
mod_hidden = mods_hidden[mod]
summed_values = mod_hidden + heads_hidden # a single mod with all heads possibilities
x = self.non_linearity(summed_values)
scores[:, mod] = torch.flatten(self.second_mlp(x))
scores[mod, mod] = -np.inf # a word cant be its head
return scores
class DnnDependencyParser(nn.Module):
def __init__(self, word_embedding_dim, pos_embedding_dim, hidden_dim, word_vocab_size, tag_vocab_size):
super(DnnDependencyParser, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get a tensor of size word_vocab_size and return a word embedding
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
# get a tensor of size tag_vocab_size and return a pos embedding
self.pos_embedding = nn.Embedding(tag_vocab_size, pos_embedding_dim)
self.lstm = nn.LSTM(input_size=word_embedding_dim + pos_embedding_dim, hidden_size=hidden_dim, num_layers=2,
bidirectional=True, batch_first=False)
self.mlp = MLP(2*hidden_dim, MLP_HIDDEN_DIM)
# self.mlp = not_efficientMLP(2*hidden_dim, MLP_HIDDEN_DIM)
def forward(self, word_idx_tensor, pos_idx_tensor):
# get x = concat(e(w), e(p))
e_w = self.word_embedding(word_idx_tensor.to(self.device)) # [batch_size, seq_length, e_w]
e_p = self.pos_embedding(pos_idx_tensor.to(self.device)) # [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
val_dataloader = DataLoader(val_set, shuffle=False)
test = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'test')
test_dataloader = DataLoader(test, shuffle=False)
a = next(iter(train_dataloader))
# a[0] -> word - idx of a sentence
# a[1] -> pos - idx of a sentence
# a[2] -> head token per sentence
assert len(a[0]) == len(a[1]) == len(a[2])
word_vocab_size = len(train.word2idx)
print(word_vocab_size)
tag_vocab_size = len(train.pos_idx_mappings)
print(tag_vocab_size)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model = DnnDependencyParser(WORD_EMBEDDING_DIM, POS_EMBEDDING_DIM, HIDDEN_DIM, word_vocab_size, tag_vocab_size).to(device)
if use_cuda:
model.cuda()
# Define the loss function as the Negative Log Likelihood loss (NLLLoss)
loss_function = nn.NLLLoss()
# We will be using a simple SGD optimizer to minimize the loss function
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
acumulate_grad_steps = 128 # This is the actual batch_size, while we officially use batch_size=1
# Training start
print("Training Started")
epoch_loss_list = []
epoch_train_acc_list = []
epoch_test_acc_list = []
best_val_acc = 0
num_epochs_wo_improvement = 0
for epoch in range(EPOCHS):
val_acc = evaluate(model, val_dataloader)
print("EPOCH = ", epoch)
print("EPOCH val acc = ", val_acc)
if val_acc < best_val_acc: # no improvement
num_epochs_wo_improvement += 1
if num_epochs_wo_improvement >= EARLY_STOPPING:
print("STOPPED TRAINING DUE TO EARLY STOPPING")
return
else: # improvement
print("saving model since it improved on validation :)")
torch.save(model.state_dict(), PATH)
num_epochs_wo_improvement = 0
best_val_acc = val_acc
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
print(epoch_train_acc_list)
plt.savefig('./basic_model_graphs.png')
# train
acc = 0 # to keep track of accuracy
printable_loss = 0 # To keep track of the loss value
i = 0
batch_loss = 0
batch_acc = 0
epoch_loss = 0
for batch_idx, input_data in enumerate(train_dataloader):
i += 1
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
loss = NLLL_function(tag_scores, heads_tensor[0].to(device))
# epoch statistics
epoch_loss += loss
#
loss = loss / acumulate_grad_steps
loss.backward()
batch_loss += loss
acc = (accuracy(heads_tensor[0].cpu(), tag_scores.cpu())) / acumulate_grad_steps
batch_acc += acc
if i % acumulate_grad_steps == 0:
optimizer.step()
model.zero_grad()
print("batch_loss = ", batch_loss.item())
print("batch_acc = ", batch_acc)
batch_loss = 0
batch_acc = 0
# end of epoch - get statistics
epoch_loss_list.append(epoch_loss / i)
epoch_train_acc_list.append(evaluate(model, train_dataloader))
epoch_test_acc_list.append(evaluate(model, test_dataloader))
# end of train - plot the two graphs
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
plt.show()
plt.savefig('basic_model_graphs.png')
if __name__ == "__main__" :
if HYPER_PARAMETER_TUNING:
|
else:
main() | hyper_parameter_tuning() | conditional_block |
basic_model.py | from collections import defaultdict
# from torchtext.vocab import Vocab
from torch.utils.data.dataset import Dataset, TensorDataset
from pathlib import Path
from collections import Counter
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from torch.utils.data.dataloader import DataLoader
from utils import *
import matplotlib.pyplot as plt
from chu_liu_edmonds import *
from os import path
# taken from the paper
MLP_HIDDEN_DIM = 100
EPOCHS = 150
WORD_EMBEDDING_DIM = 100
POS_EMBEDDING_DIM = 25
HIDDEN_DIM = 125
LEARNING_RATE = 0.01
EARLY_STOPPING = 10 # num epochs with no validation acc improvement to stop training
PATH = "./basic_model_best_params"
cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# class not_efficientMLP(nn.Module):
# def __init__(self, lstm_dim, mlp_hidden_dim):
# super(not_efficientMLP, self).__init__()
# self.first_linear = nn.Linear(2 * lstm_dim, mlp_hidden_dim)
# self.non_linearity = nn.ReLU()
# self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
#
# def forward(self, lstm_out):
# sentence_length = lstm_out.shape[0]
# scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# for i, v_i in enumerate(lstm_out):
# for j, v_j in enumerate(lstm_out):
# if i == j:
# scores[i, j] = 0
# else:
# a = torch.cat((v_i, v_j), dim=0)
# x = self.first_linear(a)
# y = self.non_linearity(x)
# scores[i, j] = self.second_mlp(y)
# return scores
class SplittedMLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(SplittedMLP, self).__init__()
self.fc_h = nn.Linear(lstm_dim, mlp_hidden_dim, bias=True) # fully-connected to output mu
self.fc_m = nn.Linear(lstm_dim, mlp_hidden_dim, bias=False) # fully-connected to output mu
def forward(self, lstm_out):
|
class MLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(MLP, self).__init__()
self.first_mlp = SplittedMLP(lstm_dim, mlp_hidden_dim)
self.non_linearity = nn.Tanh()
self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
def forward(self, lstm_out):
sentence_length = lstm_out.shape[0]
heads_hidden, mods_hidden = self.first_mlp(lstm_out)
scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# we will fill the table row by row, using broadcasting
for mod in range(sentence_length):
mod_hidden = mods_hidden[mod]
summed_values = mod_hidden + heads_hidden # a single mod with all heads possibilities
x = self.non_linearity(summed_values)
scores[:, mod] = torch.flatten(self.second_mlp(x))
scores[mod, mod] = -np.inf # a word cant be its head
return scores
class DnnDependencyParser(nn.Module):
def __init__(self, word_embedding_dim, pos_embedding_dim, hidden_dim, word_vocab_size, tag_vocab_size):
super(DnnDependencyParser, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get a tensor of size word_vocab_size and return a word embedding
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
# get a tensor of size tag_vocab_size and return a pos embedding
self.pos_embedding = nn.Embedding(tag_vocab_size, pos_embedding_dim)
self.lstm = nn.LSTM(input_size=word_embedding_dim + pos_embedding_dim, hidden_size=hidden_dim, num_layers=2,
bidirectional=True, batch_first=False)
self.mlp = MLP(2*hidden_dim, MLP_HIDDEN_DIM)
# self.mlp = not_efficientMLP(2*hidden_dim, MLP_HIDDEN_DIM)
def forward(self, word_idx_tensor, pos_idx_tensor):
# get x = concat(e(w), e(p))
e_w = self.word_embedding(word_idx_tensor.to(self.device)) # [batch_size, seq_length, e_w]
e_p = self.pos_embedding(pos_idx_tensor.to(self.device)) # [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
val_dataloader = DataLoader(val_set, shuffle=False)
test = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'test')
test_dataloader = DataLoader(test, shuffle=False)
a = next(iter(train_dataloader))
# a[0] -> word - idx of a sentence
# a[1] -> pos - idx of a sentence
# a[2] -> head token per sentence
assert len(a[0]) == len(a[1]) == len(a[2])
word_vocab_size = len(train.word2idx)
print(word_vocab_size)
tag_vocab_size = len(train.pos_idx_mappings)
print(tag_vocab_size)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model = DnnDependencyParser(WORD_EMBEDDING_DIM, POS_EMBEDDING_DIM, HIDDEN_DIM, word_vocab_size, tag_vocab_size).to(device)
if use_cuda:
model.cuda()
# Define the loss function as the Negative Log Likelihood loss (NLLLoss)
loss_function = nn.NLLLoss()
# We will be using a simple SGD optimizer to minimize the loss function
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
acumulate_grad_steps = 128 # This is the actual batch_size, while we officially use batch_size=1
# Training start
print("Training Started")
epoch_loss_list = []
epoch_train_acc_list = []
epoch_test_acc_list = []
best_val_acc = 0
num_epochs_wo_improvement = 0
for epoch in range(EPOCHS):
val_acc = evaluate(model, val_dataloader)
print("EPOCH = ", epoch)
print("EPOCH val acc = ", val_acc)
if val_acc < best_val_acc: # no improvement
num_epochs_wo_improvement += 1
if num_epochs_wo_improvement >= EARLY_STOPPING:
print("STOPPED TRAINING DUE TO EARLY STOPPING")
return
else: # improvement
print("saving model since it improved on validation :)")
torch.save(model.state_dict(), PATH)
num_epochs_wo_improvement = 0
best_val_acc = val_acc
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
print(epoch_train_acc_list)
plt.savefig('./basic_model_graphs.png')
# train
acc = 0 # to keep track of accuracy
printable_loss = 0 # To keep track of the loss value
i = 0
batch_loss = 0
batch_acc = 0
epoch_loss = 0
for batch_idx, input_data in enumerate(train_dataloader):
i += 1
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
loss = NLLL_function(tag_scores, heads_tensor[0].to(device))
# epoch statistics
epoch_loss += loss
#
loss = loss / acumulate_grad_steps
loss.backward()
batch_loss += loss
acc = (accuracy(heads_tensor[0].cpu(), tag_scores.cpu())) / acumulate_grad_steps
batch_acc += acc
if i % acumulate_grad_steps == 0:
optimizer.step()
model.zero_grad()
print("batch_loss = ", batch_loss.item())
print("batch_acc = ", batch_acc)
batch_loss = 0
batch_acc = 0
# end of epoch - get statistics
epoch_loss_list.append(epoch_loss / i)
epoch_train_acc_list.append(evaluate(model, train_dataloader))
epoch_test_acc_list.append(evaluate(model, test_dataloader))
# end of train - plot the two graphs
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
plt.show()
plt.savefig('basic_model_graphs.png')
if __name__ == "__main__" :
if HYPER_PARAMETER_TUNING:
hyper_parameter_tuning()
else:
main() | heads_hidden = self.fc_h(lstm_out)
mods_hidden = self.fc_m(lstm_out)
return heads_hidden, mods_hidden | identifier_body |
main.go | package main
import (
"encoding/json"
"fmt"
"math"
"math/rand"
"net/http"
"os"
"sort"
"strings"
"time"
m3o "github.com/micro/services/clients/go"
db "github.com/micro/services/clients/go/db"
user "github.com/micro/services/clients/go/user"
uuid "github.com/satori/go.uuid"
)
var client = m3o.NewClient(os.Getenv("MICRO_API_TOKEN"))
// csv of user ids
var mods = os.Getenv("TIREDD_MODS")
// Types
type Post struct {
Id string `json:"id"`
UserId string `json:"userId"`
UserName string `json:"userName"`
Content string `json:"content"`
Created string `json:"created"`
Upvotes float32 `json:"upvotes"`
Downvotes float32 `json:"downvotes"`
Score float32 `json:"score"`
Title string `json:"title"`
Url string `json:"url"`
Sub string `json:"sub"`
CommentCount float32 `json:"commentCount"`
}
type Comment struct {
Content string `json:"content"`
Parent string `json:"sub"`
Upvotes float32 `json:"upvotes"`
Downvotes float32 `json:"downvotes"`
PostId string `json:"postId"`
UserName string `json:"usernName"`
UserId string `json:"userId"`
}
type PostRequest struct {
Post Post `json:"post"`
SessionID string `json:"sessionId"`
}
type VoteRequest struct {
Id string `json:"id"`
SessionID string `json:"sessionId"`
}
type LoginRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
type CommentRequest struct {
Comment Comment `json:"comment"`
SessionID string `json:"sessionId"`
}
type CommentsRequest struct {
PostId string `json:"postId"`
}
type PostsRequest struct {
Min int32 `json:"min"`
Max int32 `json:"max"`
Limit int32 `json:"limit"`
Sub string `json:"sub"`
}
// Endpoints
// upvote or downvote a post or a comment
func vote(w http.ResponseWriter, req *http.Request, upvote bool, isComment bool, t VoteRequest) error {
if t.Id == "" {
return fmt.Errorf("missing post id")
}
table := "posts"
if isComment {
table = "comments"
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: table,
Id: t.Id,
})
if err != nil {
return err
}
if len(rsp.Records) == 0 {
return fmt.Errorf("post or comment not found")
}
// auth
sessionRsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
return err
}
if sessionRsp.Session.UserId == "" {
return fmt.Errorf("user id not found")
}
// prevent double votes
checkTable := table + "votecheck"
checkId := t.Id + sessionRsp.Session.UserId
checkRsp, err := client.DbService.Read(&db.ReadRequest{
Table: checkTable,
Id: checkId,
})
mod := isMod(sessionRsp.Session.UserId, mods)
if err == nil && (checkRsp != nil && len(checkRsp.Records) > 0) {
if !mod {
return fmt.Errorf("already voted")
}
}
val := float64(1)
if mod {
rand.Seed(time.Now().UnixNano())
val = float64(rand.Intn(17-4) + 4)
}
if !mod {
_, err = client.DbService.Create(&db.CreateRequest{
Table: checkTable,
Record: map[string]interface{}{
"id": checkId,
},
})
if err != nil {
return err
}
}
obj := rsp.Records[0]
key := "upvotes"
if !upvote {
key = "downvotes"
}
if _, ok := obj["upvotes"].(float64); !ok {
obj["upvotes"] = float64(0)
}
if _, ok := obj["downvotes"].(float64); !ok {
obj["downvotes"] = float64(0)
}
obj[key] = obj[key].(float64) + val
obj["score"] = obj["upvotes"].(float64) - obj["downvotes"].(float64)
_, err = client.DbService.Update(&db.UpdateRequest{
Table: table,
Id: t.Id,
Record: obj,
})
return err
}
func isMod(userId, s string) bool {
arr := strings.Split(s, ",")
for _, v := range arr {
if v == userId {
return true
}
}
return false
}
func voteWrapper(upvote bool, isComment bool) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t VoteRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
err = vote(w, req, upvote, isComment, t)
respond(w, nil, err)
}
}
func login(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t LoginRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, err, err)
return
}
_, err = client.UserService.Read(&user.ReadRequest{
Username: t.Username,
})
if err != nil {
createRsp, err := client.UserService.Create(&user.CreateRequest{
Username: t.Username,
Email: t.Username + "@" + t.Username + ".com",
Password: t.Password,
})
if err != nil {
respond(w, createRsp, err)
return
}
}
logRsp, err := client.UserService.Login(&user.LoginRequest{
Username: t.Username,
Password: t.Password,
})
respond(w, logRsp, err)
}
func readSession(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t user.ReadSessionRequest
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.UserService.ReadSession(&t)
if err != nil {
respond(w, rsp, err)
return
}
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: rsp.Session.UserId,
})
respond(w, map[string]interface{}{
"session": rsp.Session,
"account": readRsp.Account,
}, err)
}
func post(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t PostRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
if t.Post.Sub == "" || t.Post.Title == "" {
respond(w, nil, fmt.Errorf("both title and sub are required"))
return
}
if t.Post.Url == "" && t.Post.Content == "" {
respond(w, nil, fmt.Errorf("url or content required"))
return
}
if len(t.Post.Title) > 200 || len(t.Post.Url) > 200 {
respond(w, nil, fmt.Errorf("post url or title too long"))
return
}
if len(t.Post.Sub) > 50 {
respond(w, nil, fmt.Errorf("post sub too long"))
return
}
if len(t.Post.Content) > 3000 {
respond(w, nil, fmt.Errorf("post content too long"))
return
}
userID := ""
userName := ""
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
client.DbService.Create(&db.CreateRequest{
Table: "posts",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Post.Content,
"url": t.Post.Url,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"sub": t.Post.Sub,
"title": t.Post.Title,
"created": time.Now(),
},
})
}
func comment(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t CommentRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
userID := ""
userName := ""
// get user if available
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
if t.Comment.PostId == "" {
respond(w, nil, fmt.Errorf("no post id"))
return
}
// get post to update comment counter
readRsp, err := client.DbService.Read(&db.ReadRequest{
Table: "posts",
Id: t.Comment.PostId,
})
if err != nil {
respond(w, nil, err)
return
}
if readRsp == nil || len(readRsp.Records) == 0 {
respond(w, nil, fmt.Errorf("post not found"))
return
}
if len(readRsp.Records) > 1 {
respond(w, nil, fmt.Errorf("multiple posts found"))
return
}
// create comment
_, err = client.DbService.Create(&db.CreateRequest{
Table: "comments",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Comment.Content,
"parent": t.Comment.Parent,
"postId": t.Comment.PostId,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"created": time.Now(),
},
})
if err != nil {
respond(w, nil, err)
return
}
// update counter
oldCount, ok := readRsp.Records[0]["commentCount"].(float64)
if !ok {
oldCount = 0
}
oldCount++
readRsp.Records[0]["commentCount"] = oldCount
_, err = client.DbService.Update(&db.UpdateRequest{
Table: "posts",
Id: t.Comment.PostId,
Record: readRsp.Records[0],
})
respond(w, nil, err)
}
func score(m map[string]interface{}) float64 {
score, ok := m["score"].(float64)
if !ok {
return -10000
}
sign := float64(1)
if score == 0 {
sign = 0
}
if score < 0 {
sign = -1
}
order := math.Log10(math.Max(math.Abs(score), 1))
var created int64
switch v := m["created"].(type) {
case string:
t, err := time.Parse(time.RFC3339, v)
if err != nil {
fmt.Println(err)
}
created = t.Unix()
case float64:
created = int64(v)
case int64:
created = v
}
seconds := created - 1134028003
return sign*order + float64(seconds)/45000
}
func posts(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t PostsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
r := &db.ReadRequest{
Table: "posts",
Order: "desc",
OrderBy: "created",
Limit: 1000,
}
query := ""
// @TODO this should be != 0 but that causes an empty new page
if t.Min > 0 {
query += "score >= " + fmt.Sprintf("%v", t.Min)
}
if t.Max > 0 {
if query != "" {
query += " and "
}
query += "score <= " + fmt.Sprintf("%v", t.Max)
}
if t.Sub != "all" && t.Sub != "" {
if query != "" {
query += " and "
}
query += fmt.Sprintf("sub == '%v'", t.Sub)
}
if query != "" {
r.Query = query
}
rsp, err := client.DbService.Read(r)
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
func comments(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t CommentsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: "comments",
Order: "desc",
Query: "postId == '" + t.PostId + "'",
OrderBy: "created",
})
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
// Utils
func cors(w http.ResponseWriter, req *http.Request) bool {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Content-Type", "application/json")
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return true
}
return false
}
func respond(w http.ResponseWriter, i interface{}, err error) |
func main() {
http.HandleFunc("/upvotePost", voteWrapper(true, false))
http.HandleFunc("/downvotePost", voteWrapper(false, false))
http.HandleFunc("/upvoteComment", voteWrapper(true, true))
http.HandleFunc("/downvoteComment", voteWrapper(false, true))
http.HandleFunc("/posts", posts)
http.HandleFunc("/post", post)
http.HandleFunc("/comment", comment)
http.HandleFunc("/comments", comments)
http.HandleFunc("/login", login)
http.HandleFunc("/readSession", readSession)
http.ListenAndServe(":8090", nil)
}
| {
if err != nil {
w.WriteHeader(500)
fmt.Println(err)
}
if i == nil {
i = map[string]interface{}{}
}
if err != nil {
i = map[string]interface{}{
"error": err.Error(),
}
}
bs, _ := json.Marshal(i)
fmt.Fprintf(w, fmt.Sprintf("%v", string(bs)))
} | identifier_body |
main.go | package main
import (
"encoding/json"
"fmt"
"math"
"math/rand"
"net/http"
"os"
"sort"
"strings"
"time"
m3o "github.com/micro/services/clients/go"
db "github.com/micro/services/clients/go/db"
user "github.com/micro/services/clients/go/user"
uuid "github.com/satori/go.uuid"
)
var client = m3o.NewClient(os.Getenv("MICRO_API_TOKEN"))
// csv of user ids
var mods = os.Getenv("TIREDD_MODS")
// Types
type Post struct {
Id string `json:"id"`
UserId string `json:"userId"`
UserName string `json:"userName"`
Content string `json:"content"`
Created string `json:"created"`
Upvotes float32 `json:"upvotes"`
Downvotes float32 `json:"downvotes"`
Score float32 `json:"score"`
Title string `json:"title"`
Url string `json:"url"`
Sub string `json:"sub"`
CommentCount float32 `json:"commentCount"`
}
type Comment struct {
Content string `json:"content"`
Parent string `json:"sub"`
Upvotes float32 `json:"upvotes"`
Downvotes float32 `json:"downvotes"`
PostId string `json:"postId"`
UserName string `json:"usernName"`
UserId string `json:"userId"`
}
type PostRequest struct {
Post Post `json:"post"`
SessionID string `json:"sessionId"`
}
type VoteRequest struct {
Id string `json:"id"`
SessionID string `json:"sessionId"`
}
type LoginRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
type CommentRequest struct {
Comment Comment `json:"comment"`
SessionID string `json:"sessionId"`
}
type CommentsRequest struct {
PostId string `json:"postId"`
}
type PostsRequest struct {
Min int32 `json:"min"`
Max int32 `json:"max"`
Limit int32 `json:"limit"`
Sub string `json:"sub"`
}
// Endpoints
// upvote or downvote a post or a comment
func vote(w http.ResponseWriter, req *http.Request, upvote bool, isComment bool, t VoteRequest) error {
if t.Id == "" {
return fmt.Errorf("missing post id")
}
table := "posts"
if isComment {
table = "comments"
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: table,
Id: t.Id,
})
if err != nil {
return err
}
if len(rsp.Records) == 0 {
return fmt.Errorf("post or comment not found")
}
// auth
sessionRsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
return err
}
if sessionRsp.Session.UserId == "" {
return fmt.Errorf("user id not found")
}
// prevent double votes
checkTable := table + "votecheck"
checkId := t.Id + sessionRsp.Session.UserId
checkRsp, err := client.DbService.Read(&db.ReadRequest{
Table: checkTable,
Id: checkId,
})
mod := isMod(sessionRsp.Session.UserId, mods)
if err == nil && (checkRsp != nil && len(checkRsp.Records) > 0) {
if !mod {
return fmt.Errorf("already voted")
}
}
val := float64(1)
if mod {
rand.Seed(time.Now().UnixNano())
val = float64(rand.Intn(17-4) + 4)
}
if !mod {
_, err = client.DbService.Create(&db.CreateRequest{
Table: checkTable,
Record: map[string]interface{}{
"id": checkId,
},
})
if err != nil {
return err
}
}
obj := rsp.Records[0]
key := "upvotes"
if !upvote {
key = "downvotes"
}
if _, ok := obj["upvotes"].(float64); !ok {
obj["upvotes"] = float64(0)
}
if _, ok := obj["downvotes"].(float64); !ok {
obj["downvotes"] = float64(0)
}
obj[key] = obj[key].(float64) + val
obj["score"] = obj["upvotes"].(float64) - obj["downvotes"].(float64)
_, err = client.DbService.Update(&db.UpdateRequest{
Table: table,
Id: t.Id,
Record: obj,
})
return err
}
func isMod(userId, s string) bool {
arr := strings.Split(s, ",")
for _, v := range arr {
if v == userId {
return true
}
}
return false
}
func voteWrapper(upvote bool, isComment bool) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t VoteRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
err = vote(w, req, upvote, isComment, t)
respond(w, nil, err)
}
}
func login(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t LoginRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, err, err)
return
}
_, err = client.UserService.Read(&user.ReadRequest{
Username: t.Username,
})
if err != nil {
createRsp, err := client.UserService.Create(&user.CreateRequest{
Username: t.Username,
Email: t.Username + "@" + t.Username + ".com",
Password: t.Password,
})
if err != nil {
respond(w, createRsp, err)
return
}
}
logRsp, err := client.UserService.Login(&user.LoginRequest{
Username: t.Username,
Password: t.Password,
})
respond(w, logRsp, err)
}
func readSession(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t user.ReadSessionRequest
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.UserService.ReadSession(&t)
if err != nil {
respond(w, rsp, err)
return
}
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: rsp.Session.UserId,
})
respond(w, map[string]interface{}{
"session": rsp.Session,
"account": readRsp.Account, |
func post(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t PostRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
if t.Post.Sub == "" || t.Post.Title == "" {
respond(w, nil, fmt.Errorf("both title and sub are required"))
return
}
if t.Post.Url == "" && t.Post.Content == "" {
respond(w, nil, fmt.Errorf("url or content required"))
return
}
if len(t.Post.Title) > 200 || len(t.Post.Url) > 200 {
respond(w, nil, fmt.Errorf("post url or title too long"))
return
}
if len(t.Post.Sub) > 50 {
respond(w, nil, fmt.Errorf("post sub too long"))
return
}
if len(t.Post.Content) > 3000 {
respond(w, nil, fmt.Errorf("post content too long"))
return
}
userID := ""
userName := ""
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
client.DbService.Create(&db.CreateRequest{
Table: "posts",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Post.Content,
"url": t.Post.Url,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"sub": t.Post.Sub,
"title": t.Post.Title,
"created": time.Now(),
},
})
}
func comment(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t CommentRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
userID := ""
userName := ""
// get user if available
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
if t.Comment.PostId == "" {
respond(w, nil, fmt.Errorf("no post id"))
return
}
// get post to update comment counter
readRsp, err := client.DbService.Read(&db.ReadRequest{
Table: "posts",
Id: t.Comment.PostId,
})
if err != nil {
respond(w, nil, err)
return
}
if readRsp == nil || len(readRsp.Records) == 0 {
respond(w, nil, fmt.Errorf("post not found"))
return
}
if len(readRsp.Records) > 1 {
respond(w, nil, fmt.Errorf("multiple posts found"))
return
}
// create comment
_, err = client.DbService.Create(&db.CreateRequest{
Table: "comments",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Comment.Content,
"parent": t.Comment.Parent,
"postId": t.Comment.PostId,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"created": time.Now(),
},
})
if err != nil {
respond(w, nil, err)
return
}
// update counter
oldCount, ok := readRsp.Records[0]["commentCount"].(float64)
if !ok {
oldCount = 0
}
oldCount++
readRsp.Records[0]["commentCount"] = oldCount
_, err = client.DbService.Update(&db.UpdateRequest{
Table: "posts",
Id: t.Comment.PostId,
Record: readRsp.Records[0],
})
respond(w, nil, err)
}
func score(m map[string]interface{}) float64 {
score, ok := m["score"].(float64)
if !ok {
return -10000
}
sign := float64(1)
if score == 0 {
sign = 0
}
if score < 0 {
sign = -1
}
order := math.Log10(math.Max(math.Abs(score), 1))
var created int64
switch v := m["created"].(type) {
case string:
t, err := time.Parse(time.RFC3339, v)
if err != nil {
fmt.Println(err)
}
created = t.Unix()
case float64:
created = int64(v)
case int64:
created = v
}
seconds := created - 1134028003
return sign*order + float64(seconds)/45000
}
func posts(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t PostsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
r := &db.ReadRequest{
Table: "posts",
Order: "desc",
OrderBy: "created",
Limit: 1000,
}
query := ""
// @TODO this should be != 0 but that causes an empty new page
if t.Min > 0 {
query += "score >= " + fmt.Sprintf("%v", t.Min)
}
if t.Max > 0 {
if query != "" {
query += " and "
}
query += "score <= " + fmt.Sprintf("%v", t.Max)
}
if t.Sub != "all" && t.Sub != "" {
if query != "" {
query += " and "
}
query += fmt.Sprintf("sub == '%v'", t.Sub)
}
if query != "" {
r.Query = query
}
rsp, err := client.DbService.Read(r)
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
func comments(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t CommentsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: "comments",
Order: "desc",
Query: "postId == '" + t.PostId + "'",
OrderBy: "created",
})
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
// Utils
func cors(w http.ResponseWriter, req *http.Request) bool {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Content-Type", "application/json")
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return true
}
return false
}
func respond(w http.ResponseWriter, i interface{}, err error) {
if err != nil {
w.WriteHeader(500)
fmt.Println(err)
}
if i == nil {
i = map[string]interface{}{}
}
if err != nil {
i = map[string]interface{}{
"error": err.Error(),
}
}
bs, _ := json.Marshal(i)
fmt.Fprintf(w, fmt.Sprintf("%v", string(bs)))
}
func main() {
http.HandleFunc("/upvotePost", voteWrapper(true, false))
http.HandleFunc("/downvotePost", voteWrapper(false, false))
http.HandleFunc("/upvoteComment", voteWrapper(true, true))
http.HandleFunc("/downvoteComment", voteWrapper(false, true))
http.HandleFunc("/posts", posts)
http.HandleFunc("/post", post)
http.HandleFunc("/comment", comment)
http.HandleFunc("/comments", comments)
http.HandleFunc("/login", login)
http.HandleFunc("/readSession", readSession)
http.ListenAndServe(":8090", nil)
} | }, err)
} | random_line_split |
main.go | package main
import (
"encoding/json"
"fmt"
"math"
"math/rand"
"net/http"
"os"
"sort"
"strings"
"time"
m3o "github.com/micro/services/clients/go"
db "github.com/micro/services/clients/go/db"
user "github.com/micro/services/clients/go/user"
uuid "github.com/satori/go.uuid"
)
var client = m3o.NewClient(os.Getenv("MICRO_API_TOKEN"))
// csv of user ids
var mods = os.Getenv("TIREDD_MODS")
// Types
type Post struct {
Id string `json:"id"`
UserId string `json:"userId"`
UserName string `json:"userName"`
Content string `json:"content"`
Created string `json:"created"`
Upvotes float32 `json:"upvotes"`
Downvotes float32 `json:"downvotes"`
Score float32 `json:"score"`
Title string `json:"title"`
Url string `json:"url"`
Sub string `json:"sub"`
CommentCount float32 `json:"commentCount"`
}
type Comment struct {
Content string `json:"content"`
Parent string `json:"sub"`
Upvotes float32 `json:"upvotes"`
Downvotes float32 `json:"downvotes"`
PostId string `json:"postId"`
UserName string `json:"usernName"`
UserId string `json:"userId"`
}
type PostRequest struct {
Post Post `json:"post"`
SessionID string `json:"sessionId"`
}
type VoteRequest struct {
Id string `json:"id"`
SessionID string `json:"sessionId"`
}
type LoginRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
type CommentRequest struct {
Comment Comment `json:"comment"`
SessionID string `json:"sessionId"`
}
type CommentsRequest struct {
PostId string `json:"postId"`
}
type PostsRequest struct {
Min int32 `json:"min"`
Max int32 `json:"max"`
Limit int32 `json:"limit"`
Sub string `json:"sub"`
}
// Endpoints
// upvote or downvote a post or a comment
func vote(w http.ResponseWriter, req *http.Request, upvote bool, isComment bool, t VoteRequest) error {
if t.Id == "" {
return fmt.Errorf("missing post id")
}
table := "posts"
if isComment {
table = "comments"
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: table,
Id: t.Id,
})
if err != nil {
return err
}
if len(rsp.Records) == 0 {
return fmt.Errorf("post or comment not found")
}
// auth
sessionRsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
return err
}
if sessionRsp.Session.UserId == "" {
return fmt.Errorf("user id not found")
}
// prevent double votes
checkTable := table + "votecheck"
checkId := t.Id + sessionRsp.Session.UserId
checkRsp, err := client.DbService.Read(&db.ReadRequest{
Table: checkTable,
Id: checkId,
})
mod := isMod(sessionRsp.Session.UserId, mods)
if err == nil && (checkRsp != nil && len(checkRsp.Records) > 0) {
if !mod {
return fmt.Errorf("already voted")
}
}
val := float64(1)
if mod {
rand.Seed(time.Now().UnixNano())
val = float64(rand.Intn(17-4) + 4)
}
if !mod {
_, err = client.DbService.Create(&db.CreateRequest{
Table: checkTable,
Record: map[string]interface{}{
"id": checkId,
},
})
if err != nil {
return err
}
}
obj := rsp.Records[0]
key := "upvotes"
if !upvote {
key = "downvotes"
}
if _, ok := obj["upvotes"].(float64); !ok {
obj["upvotes"] = float64(0)
}
if _, ok := obj["downvotes"].(float64); !ok {
obj["downvotes"] = float64(0)
}
obj[key] = obj[key].(float64) + val
obj["score"] = obj["upvotes"].(float64) - obj["downvotes"].(float64)
_, err = client.DbService.Update(&db.UpdateRequest{
Table: table,
Id: t.Id,
Record: obj,
})
return err
}
func isMod(userId, s string) bool {
arr := strings.Split(s, ",")
for _, v := range arr {
if v == userId {
return true
}
}
return false
}
func voteWrapper(upvote bool, isComment bool) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t VoteRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
err = vote(w, req, upvote, isComment, t)
respond(w, nil, err)
}
}
func login(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t LoginRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, err, err)
return
}
_, err = client.UserService.Read(&user.ReadRequest{
Username: t.Username,
})
if err != nil {
createRsp, err := client.UserService.Create(&user.CreateRequest{
Username: t.Username,
Email: t.Username + "@" + t.Username + ".com",
Password: t.Password,
})
if err != nil {
respond(w, createRsp, err)
return
}
}
logRsp, err := client.UserService.Login(&user.LoginRequest{
Username: t.Username,
Password: t.Password,
})
respond(w, logRsp, err)
}
func readSession(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t user.ReadSessionRequest
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.UserService.ReadSession(&t)
if err != nil {
respond(w, rsp, err)
return
}
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: rsp.Session.UserId,
})
respond(w, map[string]interface{}{
"session": rsp.Session,
"account": readRsp.Account,
}, err)
}
func post(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t PostRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
if t.Post.Sub == "" || t.Post.Title == "" {
respond(w, nil, fmt.Errorf("both title and sub are required"))
return
}
if t.Post.Url == "" && t.Post.Content == "" {
respond(w, nil, fmt.Errorf("url or content required"))
return
}
if len(t.Post.Title) > 200 || len(t.Post.Url) > 200 {
respond(w, nil, fmt.Errorf("post url or title too long"))
return
}
if len(t.Post.Sub) > 50 {
respond(w, nil, fmt.Errorf("post sub too long"))
return
}
if len(t.Post.Content) > 3000 {
respond(w, nil, fmt.Errorf("post content too long"))
return
}
userID := ""
userName := ""
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
client.DbService.Create(&db.CreateRequest{
Table: "posts",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Post.Content,
"url": t.Post.Url,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"sub": t.Post.Sub,
"title": t.Post.Title,
"created": time.Now(),
},
})
}
func comment(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t CommentRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
userID := ""
userName := ""
// get user if available
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
if t.Comment.PostId == "" {
respond(w, nil, fmt.Errorf("no post id"))
return
}
// get post to update comment counter
readRsp, err := client.DbService.Read(&db.ReadRequest{
Table: "posts",
Id: t.Comment.PostId,
})
if err != nil {
respond(w, nil, err)
return
}
if readRsp == nil || len(readRsp.Records) == 0 {
respond(w, nil, fmt.Errorf("post not found"))
return
}
if len(readRsp.Records) > 1 {
respond(w, nil, fmt.Errorf("multiple posts found"))
return
}
// create comment
_, err = client.DbService.Create(&db.CreateRequest{
Table: "comments",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Comment.Content,
"parent": t.Comment.Parent,
"postId": t.Comment.PostId,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"created": time.Now(),
},
})
if err != nil {
respond(w, nil, err)
return
}
// update counter
oldCount, ok := readRsp.Records[0]["commentCount"].(float64)
if !ok {
oldCount = 0
}
oldCount++
readRsp.Records[0]["commentCount"] = oldCount
_, err = client.DbService.Update(&db.UpdateRequest{
Table: "posts",
Id: t.Comment.PostId,
Record: readRsp.Records[0],
})
respond(w, nil, err)
}
func score(m map[string]interface{}) float64 {
score, ok := m["score"].(float64)
if !ok {
return -10000
}
sign := float64(1)
if score == 0 {
sign = 0
}
if score < 0 |
order := math.Log10(math.Max(math.Abs(score), 1))
var created int64
switch v := m["created"].(type) {
case string:
t, err := time.Parse(time.RFC3339, v)
if err != nil {
fmt.Println(err)
}
created = t.Unix()
case float64:
created = int64(v)
case int64:
created = v
}
seconds := created - 1134028003
return sign*order + float64(seconds)/45000
}
func posts(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t PostsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
r := &db.ReadRequest{
Table: "posts",
Order: "desc",
OrderBy: "created",
Limit: 1000,
}
query := ""
// @TODO this should be != 0 but that causes an empty new page
if t.Min > 0 {
query += "score >= " + fmt.Sprintf("%v", t.Min)
}
if t.Max > 0 {
if query != "" {
query += " and "
}
query += "score <= " + fmt.Sprintf("%v", t.Max)
}
if t.Sub != "all" && t.Sub != "" {
if query != "" {
query += " and "
}
query += fmt.Sprintf("sub == '%v'", t.Sub)
}
if query != "" {
r.Query = query
}
rsp, err := client.DbService.Read(r)
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
func comments(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t CommentsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: "comments",
Order: "desc",
Query: "postId == '" + t.PostId + "'",
OrderBy: "created",
})
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
// Utils
func cors(w http.ResponseWriter, req *http.Request) bool {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Content-Type", "application/json")
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return true
}
return false
}
func respond(w http.ResponseWriter, i interface{}, err error) {
if err != nil {
w.WriteHeader(500)
fmt.Println(err)
}
if i == nil {
i = map[string]interface{}{}
}
if err != nil {
i = map[string]interface{}{
"error": err.Error(),
}
}
bs, _ := json.Marshal(i)
fmt.Fprintf(w, fmt.Sprintf("%v", string(bs)))
}
func main() {
http.HandleFunc("/upvotePost", voteWrapper(true, false))
http.HandleFunc("/downvotePost", voteWrapper(false, false))
http.HandleFunc("/upvoteComment", voteWrapper(true, true))
http.HandleFunc("/downvoteComment", voteWrapper(false, true))
http.HandleFunc("/posts", posts)
http.HandleFunc("/post", post)
http.HandleFunc("/comment", comment)
http.HandleFunc("/comments", comments)
http.HandleFunc("/login", login)
http.HandleFunc("/readSession", readSession)
http.ListenAndServe(":8090", nil)
}
| {
sign = -1
} | conditional_block |
main.go | package main
import (
"encoding/json"
"fmt"
"math"
"math/rand"
"net/http"
"os"
"sort"
"strings"
"time"
m3o "github.com/micro/services/clients/go"
db "github.com/micro/services/clients/go/db"
user "github.com/micro/services/clients/go/user"
uuid "github.com/satori/go.uuid"
)
var client = m3o.NewClient(os.Getenv("MICRO_API_TOKEN"))
// csv of user ids
var mods = os.Getenv("TIREDD_MODS")
// Types
type Post struct {
Id string `json:"id"`
UserId string `json:"userId"`
UserName string `json:"userName"`
Content string `json:"content"`
Created string `json:"created"`
Upvotes float32 `json:"upvotes"`
Downvotes float32 `json:"downvotes"`
Score float32 `json:"score"`
Title string `json:"title"`
Url string `json:"url"`
Sub string `json:"sub"`
CommentCount float32 `json:"commentCount"`
}
type Comment struct {
Content string `json:"content"`
Parent string `json:"sub"`
Upvotes float32 `json:"upvotes"`
Downvotes float32 `json:"downvotes"`
PostId string `json:"postId"`
UserName string `json:"usernName"`
UserId string `json:"userId"`
}
type PostRequest struct {
Post Post `json:"post"`
SessionID string `json:"sessionId"`
}
type VoteRequest struct {
Id string `json:"id"`
SessionID string `json:"sessionId"`
}
type LoginRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
type CommentRequest struct {
Comment Comment `json:"comment"`
SessionID string `json:"sessionId"`
}
type CommentsRequest struct {
PostId string `json:"postId"`
}
type PostsRequest struct {
Min int32 `json:"min"`
Max int32 `json:"max"`
Limit int32 `json:"limit"`
Sub string `json:"sub"`
}
// Endpoints
// upvote or downvote a post or a comment
func vote(w http.ResponseWriter, req *http.Request, upvote bool, isComment bool, t VoteRequest) error {
if t.Id == "" {
return fmt.Errorf("missing post id")
}
table := "posts"
if isComment {
table = "comments"
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: table,
Id: t.Id,
})
if err != nil {
return err
}
if len(rsp.Records) == 0 {
return fmt.Errorf("post or comment not found")
}
// auth
sessionRsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
return err
}
if sessionRsp.Session.UserId == "" {
return fmt.Errorf("user id not found")
}
// prevent double votes
checkTable := table + "votecheck"
checkId := t.Id + sessionRsp.Session.UserId
checkRsp, err := client.DbService.Read(&db.ReadRequest{
Table: checkTable,
Id: checkId,
})
mod := isMod(sessionRsp.Session.UserId, mods)
if err == nil && (checkRsp != nil && len(checkRsp.Records) > 0) {
if !mod {
return fmt.Errorf("already voted")
}
}
val := float64(1)
if mod {
rand.Seed(time.Now().UnixNano())
val = float64(rand.Intn(17-4) + 4)
}
if !mod {
_, err = client.DbService.Create(&db.CreateRequest{
Table: checkTable,
Record: map[string]interface{}{
"id": checkId,
},
})
if err != nil {
return err
}
}
obj := rsp.Records[0]
key := "upvotes"
if !upvote {
key = "downvotes"
}
if _, ok := obj["upvotes"].(float64); !ok {
obj["upvotes"] = float64(0)
}
if _, ok := obj["downvotes"].(float64); !ok {
obj["downvotes"] = float64(0)
}
obj[key] = obj[key].(float64) + val
obj["score"] = obj["upvotes"].(float64) - obj["downvotes"].(float64)
_, err = client.DbService.Update(&db.UpdateRequest{
Table: table,
Id: t.Id,
Record: obj,
})
return err
}
func isMod(userId, s string) bool {
arr := strings.Split(s, ",")
for _, v := range arr {
if v == userId {
return true
}
}
return false
}
func voteWrapper(upvote bool, isComment bool) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t VoteRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
err = vote(w, req, upvote, isComment, t)
respond(w, nil, err)
}
}
func login(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t LoginRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, err, err)
return
}
_, err = client.UserService.Read(&user.ReadRequest{
Username: t.Username,
})
if err != nil {
createRsp, err := client.UserService.Create(&user.CreateRequest{
Username: t.Username,
Email: t.Username + "@" + t.Username + ".com",
Password: t.Password,
})
if err != nil {
respond(w, createRsp, err)
return
}
}
logRsp, err := client.UserService.Login(&user.LoginRequest{
Username: t.Username,
Password: t.Password,
})
respond(w, logRsp, err)
}
func readSession(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t user.ReadSessionRequest
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.UserService.ReadSession(&t)
if err != nil {
respond(w, rsp, err)
return
}
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: rsp.Session.UserId,
})
respond(w, map[string]interface{}{
"session": rsp.Session,
"account": readRsp.Account,
}, err)
}
func post(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t PostRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
if t.Post.Sub == "" || t.Post.Title == "" {
respond(w, nil, fmt.Errorf("both title and sub are required"))
return
}
if t.Post.Url == "" && t.Post.Content == "" {
respond(w, nil, fmt.Errorf("url or content required"))
return
}
if len(t.Post.Title) > 200 || len(t.Post.Url) > 200 {
respond(w, nil, fmt.Errorf("post url or title too long"))
return
}
if len(t.Post.Sub) > 50 {
respond(w, nil, fmt.Errorf("post sub too long"))
return
}
if len(t.Post.Content) > 3000 {
respond(w, nil, fmt.Errorf("post content too long"))
return
}
userID := ""
userName := ""
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
client.DbService.Create(&db.CreateRequest{
Table: "posts",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Post.Content,
"url": t.Post.Url,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"sub": t.Post.Sub,
"title": t.Post.Title,
"created": time.Now(),
},
})
}
func comment(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t CommentRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
userID := ""
userName := ""
// get user if available
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
if t.Comment.PostId == "" {
respond(w, nil, fmt.Errorf("no post id"))
return
}
// get post to update comment counter
readRsp, err := client.DbService.Read(&db.ReadRequest{
Table: "posts",
Id: t.Comment.PostId,
})
if err != nil {
respond(w, nil, err)
return
}
if readRsp == nil || len(readRsp.Records) == 0 {
respond(w, nil, fmt.Errorf("post not found"))
return
}
if len(readRsp.Records) > 1 {
respond(w, nil, fmt.Errorf("multiple posts found"))
return
}
// create comment
_, err = client.DbService.Create(&db.CreateRequest{
Table: "comments",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Comment.Content,
"parent": t.Comment.Parent,
"postId": t.Comment.PostId,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"created": time.Now(),
},
})
if err != nil {
respond(w, nil, err)
return
}
// update counter
oldCount, ok := readRsp.Records[0]["commentCount"].(float64)
if !ok {
oldCount = 0
}
oldCount++
readRsp.Records[0]["commentCount"] = oldCount
_, err = client.DbService.Update(&db.UpdateRequest{
Table: "posts",
Id: t.Comment.PostId,
Record: readRsp.Records[0],
})
respond(w, nil, err)
}
func score(m map[string]interface{}) float64 {
score, ok := m["score"].(float64)
if !ok {
return -10000
}
sign := float64(1)
if score == 0 {
sign = 0
}
if score < 0 {
sign = -1
}
order := math.Log10(math.Max(math.Abs(score), 1))
var created int64
switch v := m["created"].(type) {
case string:
t, err := time.Parse(time.RFC3339, v)
if err != nil {
fmt.Println(err)
}
created = t.Unix()
case float64:
created = int64(v)
case int64:
created = v
}
seconds := created - 1134028003
return sign*order + float64(seconds)/45000
}
func posts(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t PostsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
r := &db.ReadRequest{
Table: "posts",
Order: "desc",
OrderBy: "created",
Limit: 1000,
}
query := ""
// @TODO this should be != 0 but that causes an empty new page
if t.Min > 0 {
query += "score >= " + fmt.Sprintf("%v", t.Min)
}
if t.Max > 0 {
if query != "" {
query += " and "
}
query += "score <= " + fmt.Sprintf("%v", t.Max)
}
if t.Sub != "all" && t.Sub != "" {
if query != "" {
query += " and "
}
query += fmt.Sprintf("sub == '%v'", t.Sub)
}
if query != "" {
r.Query = query
}
rsp, err := client.DbService.Read(r)
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
func comments(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t CommentsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: "comments",
Order: "desc",
Query: "postId == '" + t.PostId + "'",
OrderBy: "created",
})
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
// Utils
func cors(w http.ResponseWriter, req *http.Request) bool {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Content-Type", "application/json")
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return true
}
return false
}
func | (w http.ResponseWriter, i interface{}, err error) {
if err != nil {
w.WriteHeader(500)
fmt.Println(err)
}
if i == nil {
i = map[string]interface{}{}
}
if err != nil {
i = map[string]interface{}{
"error": err.Error(),
}
}
bs, _ := json.Marshal(i)
fmt.Fprintf(w, fmt.Sprintf("%v", string(bs)))
}
func main() {
http.HandleFunc("/upvotePost", voteWrapper(true, false))
http.HandleFunc("/downvotePost", voteWrapper(false, false))
http.HandleFunc("/upvoteComment", voteWrapper(true, true))
http.HandleFunc("/downvoteComment", voteWrapper(false, true))
http.HandleFunc("/posts", posts)
http.HandleFunc("/post", post)
http.HandleFunc("/comment", comment)
http.HandleFunc("/comments", comments)
http.HandleFunc("/login", login)
http.HandleFunc("/readSession", readSession)
http.ListenAndServe(":8090", nil)
}
| respond | identifier_name |
tcp.rs | // "Tifflin" Kernel - Networking Stack
// - By John Hodge (thePowersGang)
//
// Modules/network/tcp.rs
//! Transmission Control Protocol (Layer 4)
use shared_map::SharedMap;
use kernel::sync::Mutex;
use kernel::lib::ring_buffer::{RingBuf,AtomicRingBuf};
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::nic::SparsePacket;
use crate::Address;
const IPV4_PROTO_TCP: u8 = 6;
const MAX_WINDOW_SIZE: u32 = 0x100000; // 4MiB
const DEF_WINDOW_SIZE: u32 = 0x4000; // 16KiB
pub fn init()
{
::ipv4::register_handler(IPV4_PROTO_TCP, rx_handler_v4).unwrap();
}
#[path="tcp-lib/"]
/// Library types just for TCP
mod lib {
pub mod rx_buffer;
}
use self::lib::rx_buffer::RxBuffer;
static CONNECTIONS: SharedMap<Quad, Mutex<Connection>> = SharedMap::new();
static PROTO_CONNECTIONS: SharedMap<Quad, ProtoConnection> = SharedMap::new();
static SERVERS: SharedMap<(Option<Address>,u16), Server> = SharedMap::new();
static S_PORTS: Mutex<PortPool> = Mutex::new(PortPool::new());
/// Find the local source address for the given remote address
// TODO: Shouldn't this get an interface handle instead?
fn get_outbound_ip_for(addr: &Address) -> Option<Address>
{
match addr
{
Address::Ipv4(addr) => crate::ipv4::route_lookup(crate::ipv4::Address::zero(), *addr).map(|(laddr, _, _)| Address::Ipv4(laddr)),
}
}
/// Allocate a port for the given local address
fn allocate_port(_addr: &Address) -> Option<u16>
{
// TODO: Could store bitmap against the interface (having a separate bitmap for each interface)
S_PORTS.lock().allocate()
}
fn release_port(_addr: &Address, idx: u16)
{
S_PORTS.lock().release(idx)
}
fn rx_handler_v4(int: &::ipv4::Interface, src_addr: ::ipv4::Address, pkt: ::nic::PacketReader)
{
rx_handler(Address::Ipv4(src_addr), Address::Ipv4(int.addr()), pkt)
}
fn rx_handler(src_addr: Address, dest_addr: Address, mut pkt: ::nic::PacketReader)
{
let pre_header_reader = pkt.clone();
let hdr = match PktHeader::read(&mut pkt)
{
Ok(v) => v,
Err(_) => {
log_error!("Undersized packet: Ran out of data reading header");
return ;
},
};
log_debug!("hdr = {:?}", hdr);
let hdr_len = hdr.get_header_size();
if hdr_len < pre_header_reader.remain() {
log_error!("Undersized or invalid packet: Header length is {} but packet length is {}", hdr_len, pre_header_reader.remain());
return ;
}
// TODO: Validate checksum.
{
let packet_len = pre_header_reader.remain();
// Pseudo header for checksum
let sum_pseudo = match (src_addr,dest_addr)
{
(Address::Ipv4(s), Address::Ipv4(d)) =>
::ipv4::calculate_checksum([
// Big endian stores MSB first, so write the high word first
(s.as_u32() >> 16) as u16, (s.as_u32() >> 0) as u16,
(d.as_u32() >> 16) as u16, (d.as_u32() >> 0) as u16,
IPV4_PROTO_TCP as u16, packet_len as u16,
].iter().copied()),
};
let sum_header = hdr.checksum();
let sum_options_and_data = {
let mut pkt = pkt.clone();
let psum_whole = !::ipv4::calculate_checksum( (0 .. (pre_header_reader.remain() - hdr_len) / 2).map(|_| pkt.read_u16n().unwrap()) );
// Final byte is decoded as if there was a zero after it (so as 0x??00)
let psum_partial = if pkt.remain() > 0 { (pkt.read_u8().unwrap() as u16) << 8} else { 0 };
::ipv4::calculate_checksum([psum_whole, psum_partial].iter().copied())
};
let sum_total = ::ipv4::calculate_checksum([
!sum_pseudo, !sum_header, !sum_options_and_data
].iter().copied());
if sum_total != 0 {
log_error!("Incorrect checksum: 0x{:04x} != 0", sum_total);
}
}
// Options
while pkt.remain() > pre_header_reader.remain() - hdr_len
{
match pkt.read_u8().unwrap()
{
_ => {},
}
}
let quad = Quad::new(dest_addr, hdr.dest_port, src_addr, hdr.source_port);
// Search for active connections with this quad
if let Some(c) = CONNECTIONS.get(&quad)
{
c.lock().handle(&quad, &hdr, pkt);
}
// Search for proto-connections
// - Proto-connections are lighter weight than full-blown connections, reducing the impact of a SYN flood
else if hdr.flags == FLAG_ACK
{
if let Some(c) = PROTO_CONNECTIONS.take(&quad)
{
// Check the SEQ/ACK numbers, and create the actual connection
if hdr.sequence_number == c.seen_seq + 1 && hdr.acknowledgement_number == c.sent_seq
{
// Make the full connection struct
CONNECTIONS.insert(quad, Mutex::new(Connection::new_inbound(&hdr)));
// Add the connection onto the server's accept queue
let server = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) ).expect("Can't find server");
server.accept_queue.push(quad).expect("Acceped connection with full accept queue");
}
else
{
// - Bad ACK, put the proto connection back into the list
PROTO_CONNECTIONS.insert(quad, c);
}
}
}
// If none found, look for servers on the destination (if SYN)
else if hdr.flags & !FLAG_ACK == FLAG_SYN
{
if let Some(s) = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) )
{
// Decrement the server's accept space
if s.accept_space.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |v| if v == 0 { None } else { Some(v - 1) }).is_err() {
// Reject if no space
// - Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST, 0, &[]);
}
else {
// - Add the quad as a proto-connection and send the SYN-ACK
let pc = ProtoConnection::new(hdr.sequence_number);
quad.send_packet(pc.sent_seq, pc.seen_seq, FLAG_SYN|FLAG_ACK, hdr.window_size, &[]);
PROTO_CONNECTIONS.insert(quad, pc);
}
}
else
{
// Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST|(!hdr.flags & FLAG_ACK), 0, &[]);
}
}
// Otherwise, drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn new(local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
{
Quad {
local_addr, local_port, remote_addr, remote_port
}
}
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN != 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack != self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK != 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0 .. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size != hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN != 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK != 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST != 0 {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
}
else if hdr.flags & FLAG_FIN != 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq != hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x} != {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH != 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK != 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN != 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state, | ConnectionState::Finished => return,
};
self.state_update(quad, new_state);
}
fn state_update(&mut self, quad: &Quad, new_state: ConnectionState)
{
if self.state != new_state
{
log_trace!("{:?} {:?} -> {:?}", quad, self.state, new_state);
self.state = new_state;
// TODO: If transitioning to `Finished`, release the local port?
// - Only for client connections.
if let ConnectionState::Finished = self.state
{
release_port(&quad.local_addr, quad.local_port);
}
}
}
fn state_to_error(&self) -> Result<(), ConnError>
{
match self.state
{
ConnectionState::SynSent => {
todo!("(quad=?) send/recv before established");
},
ConnectionState::Established => Ok( () ),
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => Err( ConnError::LocalClosed ),
ConnectionState::ForceClose => Err( ConnError::RemoteReset ),
ConnectionState::CloseWait | ConnectionState::LastAck => Err( ConnError::RemoteClosed ),
ConnectionState::Finished => Err( ConnError::LocalClosed ),
}
}
fn send_data(&mut self, quad: &Quad, buf: &[u8]) -> Result<usize, ConnError>
{
// TODO: Is it valid to send before the connection is fully established?
self.state_to_error()?;
// 1. Determine how much data we can send (based on the TX window)
let max_len = usize::saturating_sub(self.tx_window_size as usize, self.tx_buffer.len());
let rv = ::core::cmp::min(buf.len(), max_len);
// Add the data to the TX buffer
for &b in &buf[..rv] {
self.tx_buffer.push_back(b).expect("Incorrectly calculated `max_len` in tcp::Connection::send_data");
}
// If the buffer is full enough, do a send
if self.tx_buffer.len() - self.tx_bytes_sent > 1400 /*|| self.first_tx_time.map(|t| now() - t > MAX_TX_DELAY).unwrap_or(false)*/
{
// Trigger a TX
self.flush_send(quad);
}
else
{
// Kick a short timer, which will send data after it expires
// - Keep kicking the timer as data flows through
// - Have a maximum elapsed time with no packet sent.
//if self.tx_timer.reset(MIN_TX_DELAY) == timer::ResetResult::WasStopped
//{
// self.first_tx_time = Some(now());
//}
}
todo!("{:?} send_data( min({}, {})={} )", quad, max_len, buf.len(), rv);
}
fn flush_send(&mut self, quad: &Quad)
{
loop
{
let nbytes = self.tx_buffer.len() - self.tx_bytes_sent;
todo!("{:?} tx {}", quad, nbytes);
}
//self.first_tx_time = None;
}
fn recv_data(&mut self, _quad: &Quad, buf: &mut [u8]) -> Result<usize, ConnError>
{
self.state_to_error()?;
//let valid_len = self.rx_buffer.valid_len();
//let acked_len = u32::wrapping_sub(self.next_rx_seq, self.rx_buffer_seq);
//let len = usize::min(valid_len, buf.len());
Ok( self.rx_buffer.take(buf) )
}
fn send_packet(&mut self, quad: &Quad, flags: u8, data: &[u8])
{
log_debug!("{:?} send_packet({:02x} {}b)", quad, flags, data.len());
quad.send_packet(self.last_tx_seq, self.next_rx_seq, flags, self.rx_window_size as u16, data);
}
fn send_ack(&mut self, quad: &Quad, msg: &str)
{
log_debug!("{:?} send_ack({:?})", quad, msg);
// - TODO: Cancel any pending ACK
// - Send a new ACK
self.send_packet(quad, FLAG_ACK, &[]);
}
fn close(&mut self, quad: &Quad) -> Result<(), ConnError>
{
let new_state = match self.state
{
ConnectionState::SynSent => {
todo!("{:?} close before established", quad);
},
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => return Err( ConnError::LocalClosed ),
ConnectionState::LastAck => return Err( ConnError::RemoteClosed ),
ConnectionState::Finished => return Err( ConnError::LocalClosed ),
ConnectionState::CloseWait => {
self.send_packet(quad, FLAG_FIN|FLAG_ACK, &[]);
ConnectionState::LastAck
},
ConnectionState::ForceClose => {
ConnectionState::Finished
},
ConnectionState::Established => {
self.send_packet(quad, FLAG_FIN, &[]);
ConnectionState::FinWait1
},
};
self.state_update(quad, new_state);
Ok( () )
}
}
struct ProtoConnection
{
seen_seq: u32,
sent_seq: u32,
}
impl ProtoConnection
{
fn new(seen_seq: u32) -> ProtoConnection
{
ProtoConnection {
seen_seq: seen_seq,
sent_seq: 1, // TODO: Random
}
}
}
struct Server
{
// Amount of connections that can still be accepted
accept_space: AtomicUsize,
// Established connections waiting for the user to accept
accept_queue: AtomicRingBuf<Quad>,
}
pub struct ConnectionHandle(Quad);
#[derive(Debug)]
pub enum ConnError
{
NoRoute,
LocalClosed,
RemoteRefused,
RemoteClosed,
RemoteReset,
NoPortAvailable,
}
impl ConnectionHandle
{
pub fn connect(addr: Address, port: u16) -> Result<ConnectionHandle, ConnError>
{
log_trace!("ConnectionHandle::connect({:?}, {})", addr, port);
// 1. Determine the local address for this remote address
let local_addr = match get_outbound_ip_for(&addr)
{
Some(a) => a,
None => return Err(ConnError::NoRoute),
};
// 2. Pick a local port
let local_port = match allocate_port(&local_addr)
{
Some(p) => p,
None => return Err(ConnError::NoPortAvailable),
};
// 3. Create the quad and allocate the connection structure
let quad = Quad::new(local_addr, local_port, addr, port, );
log_trace!("ConnectionHandle::connect: quad={:?}", quad);
// 4. Send the opening SYN (by creating the outbound connection structure)
let conn = Connection::new_outbound(&quad, 0x10000u32);
CONNECTIONS.insert(quad, Mutex::new(conn));
Ok( ConnectionHandle(quad) )
}
pub fn send_data(&self, buf: &[u8]) -> Result<usize, ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().send_data(&self.0, buf),
}
}
pub fn recv_data(&self, buf: &mut [u8]) -> Result<usize, ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().recv_data(&self.0, buf),
}
}
pub fn close(&mut self) -> Result<(), ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().close(&self.0),
}
}
}
impl ::core::ops::Drop for ConnectionHandle
{
fn drop(&mut self)
{
// Mark the connection to close
}
}
const MIN_DYN_PORT: u16 = 0xC000;
const N_DYN_PORTS: usize = (1<<16) - MIN_DYN_PORT as usize;
struct PortPool {
bitmap: [u32; N_DYN_PORTS / 32],
//n_free_ports: u16,
next_port: u16,
}
impl PortPool
{
const fn new() -> PortPool
{
PortPool {
bitmap: [0; N_DYN_PORTS / 32],
//n_free_ports: N_DYN_PORTS as u16,
next_port: MIN_DYN_PORT,
}
}
fn ofs_mask(idx: u16) -> Option<(usize, u32)>
{
if idx >= MIN_DYN_PORT
{
let ofs = (idx - MIN_DYN_PORT) as usize / 32;
let mask = 1 << (idx % 32);
Some( (ofs, mask) )
}
else
{
None
}
}
fn take(&mut self, idx: u16) -> Result<(),()>
{
let (ofs,mask) = match Self::ofs_mask(idx)
{
Some(v) => v,
None => return Ok(()),
};
if self.bitmap[ofs] & mask != 0 {
Err( () )
}
else {
self.bitmap[ofs] |= mask;
Ok( () )
}
}
fn release(&mut self, idx: u16)
{
let (ofs,mask) = match Self::ofs_mask(idx)
{
Some(v) => v,
None => return,
};
self.bitmap[ofs] &= !mask;
}
fn allocate(&mut self) -> Option<u16>
{
// Strategy: Linear ('cos it's easy)
for idx in self.next_port ..= 0xFFFF
{
match self.take(idx)
{
Ok(_) => { self.next_port = idx; return Some(idx); },
_ => {},
}
}
for idx in MIN_DYN_PORT .. self.next_port
{
match self.take(idx)
{
Ok(_) => { self.next_port = idx; return Some(idx); },
_ => {},
}
}
None
}
} | ConnectionState::TimeWait => self.state,
| random_line_split |
tcp.rs | // "Tifflin" Kernel - Networking Stack
// - By John Hodge (thePowersGang)
//
// Modules/network/tcp.rs
//! Transmission Control Protocol (Layer 4)
use shared_map::SharedMap;
use kernel::sync::Mutex;
use kernel::lib::ring_buffer::{RingBuf,AtomicRingBuf};
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::nic::SparsePacket;
use crate::Address;
const IPV4_PROTO_TCP: u8 = 6;
const MAX_WINDOW_SIZE: u32 = 0x100000; // 4MiB
const DEF_WINDOW_SIZE: u32 = 0x4000; // 16KiB
pub fn init()
{
::ipv4::register_handler(IPV4_PROTO_TCP, rx_handler_v4).unwrap();
}
#[path="tcp-lib/"]
/// Library types just for TCP
mod lib {
pub mod rx_buffer;
}
use self::lib::rx_buffer::RxBuffer;
static CONNECTIONS: SharedMap<Quad, Mutex<Connection>> = SharedMap::new();
static PROTO_CONNECTIONS: SharedMap<Quad, ProtoConnection> = SharedMap::new();
static SERVERS: SharedMap<(Option<Address>,u16), Server> = SharedMap::new();
static S_PORTS: Mutex<PortPool> = Mutex::new(PortPool::new());
/// Find the local source address for the given remote address
// TODO: Shouldn't this get an interface handle instead?
fn get_outbound_ip_for(addr: &Address) -> Option<Address>
{
match addr
{
Address::Ipv4(addr) => crate::ipv4::route_lookup(crate::ipv4::Address::zero(), *addr).map(|(laddr, _, _)| Address::Ipv4(laddr)),
}
}
/// Allocate a port for the given local address
fn allocate_port(_addr: &Address) -> Option<u16>
{
// TODO: Could store bitmap against the interface (having a separate bitmap for each interface)
S_PORTS.lock().allocate()
}
fn release_port(_addr: &Address, idx: u16)
{
S_PORTS.lock().release(idx)
}
fn rx_handler_v4(int: &::ipv4::Interface, src_addr: ::ipv4::Address, pkt: ::nic::PacketReader)
{
rx_handler(Address::Ipv4(src_addr), Address::Ipv4(int.addr()), pkt)
}
fn rx_handler(src_addr: Address, dest_addr: Address, mut pkt: ::nic::PacketReader)
{
let pre_header_reader = pkt.clone();
let hdr = match PktHeader::read(&mut pkt)
{
Ok(v) => v,
Err(_) => {
log_error!("Undersized packet: Ran out of data reading header");
return ;
},
};
log_debug!("hdr = {:?}", hdr);
let hdr_len = hdr.get_header_size();
if hdr_len < pre_header_reader.remain() {
log_error!("Undersized or invalid packet: Header length is {} but packet length is {}", hdr_len, pre_header_reader.remain());
return ;
}
// TODO: Validate checksum.
{
let packet_len = pre_header_reader.remain();
// Pseudo header for checksum
let sum_pseudo = match (src_addr,dest_addr)
{
(Address::Ipv4(s), Address::Ipv4(d)) =>
::ipv4::calculate_checksum([
// Big endian stores MSB first, so write the high word first
(s.as_u32() >> 16) as u16, (s.as_u32() >> 0) as u16,
(d.as_u32() >> 16) as u16, (d.as_u32() >> 0) as u16,
IPV4_PROTO_TCP as u16, packet_len as u16,
].iter().copied()),
};
let sum_header = hdr.checksum();
let sum_options_and_data = {
let mut pkt = pkt.clone();
let psum_whole = !::ipv4::calculate_checksum( (0 .. (pre_header_reader.remain() - hdr_len) / 2).map(|_| pkt.read_u16n().unwrap()) );
// Final byte is decoded as if there was a zero after it (so as 0x??00)
let psum_partial = if pkt.remain() > 0 { (pkt.read_u8().unwrap() as u16) << 8} else { 0 };
::ipv4::calculate_checksum([psum_whole, psum_partial].iter().copied())
};
let sum_total = ::ipv4::calculate_checksum([
!sum_pseudo, !sum_header, !sum_options_and_data
].iter().copied());
if sum_total != 0 {
log_error!("Incorrect checksum: 0x{:04x} != 0", sum_total);
}
}
// Options
while pkt.remain() > pre_header_reader.remain() - hdr_len
{
match pkt.read_u8().unwrap()
{
_ => {},
}
}
let quad = Quad::new(dest_addr, hdr.dest_port, src_addr, hdr.source_port);
// Search for active connections with this quad
if let Some(c) = CONNECTIONS.get(&quad)
{
c.lock().handle(&quad, &hdr, pkt);
}
// Search for proto-connections
// - Proto-connections are lighter weight than full-blown connections, reducing the impact of a SYN flood
else if hdr.flags == FLAG_ACK
{
if let Some(c) = PROTO_CONNECTIONS.take(&quad)
{
// Check the SEQ/ACK numbers, and create the actual connection
if hdr.sequence_number == c.seen_seq + 1 && hdr.acknowledgement_number == c.sent_seq
{
// Make the full connection struct
CONNECTIONS.insert(quad, Mutex::new(Connection::new_inbound(&hdr)));
// Add the connection onto the server's accept queue
let server = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) ).expect("Can't find server");
server.accept_queue.push(quad).expect("Acceped connection with full accept queue");
}
else
{
// - Bad ACK, put the proto connection back into the list
PROTO_CONNECTIONS.insert(quad, c);
}
}
}
// If none found, look for servers on the destination (if SYN)
else if hdr.flags & !FLAG_ACK == FLAG_SYN
{
if let Some(s) = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) )
{
// Decrement the server's accept space
if s.accept_space.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |v| if v == 0 { None } else { Some(v - 1) }).is_err() {
// Reject if no space
// - Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST, 0, &[]);
}
else {
// - Add the quad as a proto-connection and send the SYN-ACK
let pc = ProtoConnection::new(hdr.sequence_number);
quad.send_packet(pc.sent_seq, pc.seen_seq, FLAG_SYN|FLAG_ACK, hdr.window_size, &[]);
PROTO_CONNECTIONS.insert(quad, pc);
}
}
else
{
// Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST|(!hdr.flags & FLAG_ACK), 0, &[]);
}
}
// Otherwise, drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn new(local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
{
Quad {
local_addr, local_port, remote_addr, remote_port
}
}
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN != 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack != self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK != 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0 .. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size != hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN != 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK != 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST != 0 |
else if hdr.flags & FLAG_FIN != 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq != hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x} != {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH != 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK != 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN != 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state,
ConnectionState::TimeWait => self.state,
ConnectionState::Finished => return,
};
self.state_update(quad, new_state);
}
fn state_update(&mut self, quad: &Quad, new_state: ConnectionState)
{
if self.state != new_state
{
log_trace!("{:?} {:?} -> {:?}", quad, self.state, new_state);
self.state = new_state;
// TODO: If transitioning to `Finished`, release the local port?
// - Only for client connections.
if let ConnectionState::Finished = self.state
{
release_port(&quad.local_addr, quad.local_port);
}
}
}
fn state_to_error(&self) -> Result<(), ConnError>
{
match self.state
{
ConnectionState::SynSent => {
todo!("(quad=?) send/recv before established");
},
ConnectionState::Established => Ok( () ),
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => Err( ConnError::LocalClosed ),
ConnectionState::ForceClose => Err( ConnError::RemoteReset ),
ConnectionState::CloseWait | ConnectionState::LastAck => Err( ConnError::RemoteClosed ),
ConnectionState::Finished => Err( ConnError::LocalClosed ),
}
}
fn send_data(&mut self, quad: &Quad, buf: &[u8]) -> Result<usize, ConnError>
{
// TODO: Is it valid to send before the connection is fully established?
self.state_to_error()?;
// 1. Determine how much data we can send (based on the TX window)
let max_len = usize::saturating_sub(self.tx_window_size as usize, self.tx_buffer.len());
let rv = ::core::cmp::min(buf.len(), max_len);
// Add the data to the TX buffer
for &b in &buf[..rv] {
self.tx_buffer.push_back(b).expect("Incorrectly calculated `max_len` in tcp::Connection::send_data");
}
// If the buffer is full enough, do a send
if self.tx_buffer.len() - self.tx_bytes_sent > 1400 /*|| self.first_tx_time.map(|t| now() - t > MAX_TX_DELAY).unwrap_or(false)*/
{
// Trigger a TX
self.flush_send(quad);
}
else
{
// Kick a short timer, which will send data after it expires
// - Keep kicking the timer as data flows through
// - Have a maximum elapsed time with no packet sent.
//if self.tx_timer.reset(MIN_TX_DELAY) == timer::ResetResult::WasStopped
//{
// self.first_tx_time = Some(now());
//}
}
todo!("{:?} send_data( min({}, {})={} )", quad, max_len, buf.len(), rv);
}
fn flush_send(&mut self, quad: &Quad)
{
loop
{
let nbytes = self.tx_buffer.len() - self.tx_bytes_sent;
todo!("{:?} tx {}", quad, nbytes);
}
//self.first_tx_time = None;
}
fn recv_data(&mut self, _quad: &Quad, buf: &mut [u8]) -> Result<usize, ConnError>
{
self.state_to_error()?;
//let valid_len = self.rx_buffer.valid_len();
//let acked_len = u32::wrapping_sub(self.next_rx_seq, self.rx_buffer_seq);
//let len = usize::min(valid_len, buf.len());
Ok( self.rx_buffer.take(buf) )
}
fn send_packet(&mut self, quad: &Quad, flags: u8, data: &[u8])
{
log_debug!("{:?} send_packet({:02x} {}b)", quad, flags, data.len());
quad.send_packet(self.last_tx_seq, self.next_rx_seq, flags, self.rx_window_size as u16, data);
}
fn send_ack(&mut self, quad: &Quad, msg: &str)
{
log_debug!("{:?} send_ack({:?})", quad, msg);
// - TODO: Cancel any pending ACK
// - Send a new ACK
self.send_packet(quad, FLAG_ACK, &[]);
}
fn close(&mut self, quad: &Quad) -> Result<(), ConnError>
{
let new_state = match self.state
{
ConnectionState::SynSent => {
todo!("{:?} close before established", quad);
},
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => return Err( ConnError::LocalClosed ),
ConnectionState::LastAck => return Err( ConnError::RemoteClosed ),
ConnectionState::Finished => return Err( ConnError::LocalClosed ),
ConnectionState::CloseWait => {
self.send_packet(quad, FLAG_FIN|FLAG_ACK, &[]);
ConnectionState::LastAck
},
ConnectionState::ForceClose => {
ConnectionState::Finished
},
ConnectionState::Established => {
self.send_packet(quad, FLAG_FIN, &[]);
ConnectionState::FinWait1
},
};
self.state_update(quad, new_state);
Ok( () )
}
}
struct ProtoConnection
{
seen_seq: u32,
sent_seq: u32,
}
impl ProtoConnection
{
fn new(seen_seq: u32) -> ProtoConnection
{
ProtoConnection {
seen_seq: seen_seq,
sent_seq: 1, // TODO: Random
}
}
}
struct Server
{
// Amount of connections that can still be accepted
accept_space: AtomicUsize,
// Established connections waiting for the user to accept
accept_queue: AtomicRingBuf<Quad>,
}
pub struct ConnectionHandle(Quad);
#[derive(Debug)]
pub enum ConnError
{
NoRoute,
LocalClosed,
RemoteRefused,
RemoteClosed,
RemoteReset,
NoPortAvailable,
}
impl ConnectionHandle
{
pub fn connect(addr: Address, port: u16) -> Result<ConnectionHandle, ConnError>
{
log_trace!("ConnectionHandle::connect({:?}, {})", addr, port);
// 1. Determine the local address for this remote address
let local_addr = match get_outbound_ip_for(&addr)
{
Some(a) => a,
None => return Err(ConnError::NoRoute),
};
// 2. Pick a local port
let local_port = match allocate_port(&local_addr)
{
Some(p) => p,
None => return Err(ConnError::NoPortAvailable),
};
// 3. Create the quad and allocate the connection structure
let quad = Quad::new(local_addr, local_port, addr, port, );
log_trace!("ConnectionHandle::connect: quad={:?}", quad);
// 4. Send the opening SYN (by creating the outbound connection structure)
let conn = Connection::new_outbound(&quad, 0x10000u32);
CONNECTIONS.insert(quad, Mutex::new(conn));
Ok( ConnectionHandle(quad) )
}
pub fn send_data(&self, buf: &[u8]) -> Result<usize, ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().send_data(&self.0, buf),
}
}
pub fn recv_data(&self, buf: &mut [u8]) -> Result<usize, ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().recv_data(&self.0, buf),
}
}
pub fn close(&mut self) -> Result<(), ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().close(&self.0),
}
}
}
impl ::core::ops::Drop for ConnectionHandle
{
fn drop(&mut self)
{
// Mark the connection to close
}
}
const MIN_DYN_PORT: u16 = 0xC000;
const N_DYN_PORTS: usize = (1<<16) - MIN_DYN_PORT as usize;
struct PortPool {
bitmap: [u32; N_DYN_PORTS / 32],
//n_free_ports: u16,
next_port: u16,
}
impl PortPool
{
const fn new() -> PortPool
{
PortPool {
bitmap: [0; N_DYN_PORTS / 32],
//n_free_ports: N_DYN_PORTS as u16,
next_port: MIN_DYN_PORT,
}
}
fn ofs_mask(idx: u16) -> Option<(usize, u32)>
{
if idx >= MIN_DYN_PORT
{
let ofs = (idx - MIN_DYN_PORT) as usize / 32;
let mask = 1 << (idx % 32);
Some( (ofs, mask) )
}
else
{
None
}
}
fn take(&mut self, idx: u16) -> Result<(),()>
{
let (ofs,mask) = match Self::ofs_mask(idx)
{
Some(v) => v,
None => return Ok(()),
};
if self.bitmap[ofs] & mask != 0 {
Err( () )
}
else {
self.bitmap[ofs] |= mask;
Ok( () )
}
}
fn release(&mut self, idx: u16)
{
let (ofs,mask) = match Self::ofs_mask(idx)
{
Some(v) => v,
None => return,
};
self.bitmap[ofs] &= !mask;
}
fn allocate(&mut self) -> Option<u16>
{
// Strategy: Linear ('cos it's easy)
for idx in self.next_port ..= 0xFFFF
{
match self.take(idx)
{
Ok(_) => { self.next_port = idx; return Some(idx); },
_ => {},
}
}
for idx in MIN_DYN_PORT .. self.next_port
{
match self.take(idx)
{
Ok(_) => { self.next_port = idx; return Some(idx); },
_ => {},
}
}
None
}
}
| {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
} | conditional_block |
tcp.rs | // "Tifflin" Kernel - Networking Stack
// - By John Hodge (thePowersGang)
//
// Modules/network/tcp.rs
//! Transmission Control Protocol (Layer 4)
use shared_map::SharedMap;
use kernel::sync::Mutex;
use kernel::lib::ring_buffer::{RingBuf,AtomicRingBuf};
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::nic::SparsePacket;
use crate::Address;
const IPV4_PROTO_TCP: u8 = 6;
const MAX_WINDOW_SIZE: u32 = 0x100000; // 4MiB
const DEF_WINDOW_SIZE: u32 = 0x4000; // 16KiB
pub fn init()
{
::ipv4::register_handler(IPV4_PROTO_TCP, rx_handler_v4).unwrap();
}
#[path="tcp-lib/"]
/// Library types just for TCP
mod lib {
pub mod rx_buffer;
}
use self::lib::rx_buffer::RxBuffer;
static CONNECTIONS: SharedMap<Quad, Mutex<Connection>> = SharedMap::new();
static PROTO_CONNECTIONS: SharedMap<Quad, ProtoConnection> = SharedMap::new();
static SERVERS: SharedMap<(Option<Address>,u16), Server> = SharedMap::new();
static S_PORTS: Mutex<PortPool> = Mutex::new(PortPool::new());
/// Find the local source address for the given remote address
// TODO: Shouldn't this get an interface handle instead?
fn get_outbound_ip_for(addr: &Address) -> Option<Address>
{
match addr
{
Address::Ipv4(addr) => crate::ipv4::route_lookup(crate::ipv4::Address::zero(), *addr).map(|(laddr, _, _)| Address::Ipv4(laddr)),
}
}
/// Allocate a port for the given local address
fn allocate_port(_addr: &Address) -> Option<u16>
{
// TODO: Could store bitmap against the interface (having a separate bitmap for each interface)
S_PORTS.lock().allocate()
}
fn release_port(_addr: &Address, idx: u16)
{
S_PORTS.lock().release(idx)
}
fn rx_handler_v4(int: &::ipv4::Interface, src_addr: ::ipv4::Address, pkt: ::nic::PacketReader)
{
rx_handler(Address::Ipv4(src_addr), Address::Ipv4(int.addr()), pkt)
}
fn rx_handler(src_addr: Address, dest_addr: Address, mut pkt: ::nic::PacketReader)
{
let pre_header_reader = pkt.clone();
let hdr = match PktHeader::read(&mut pkt)
{
Ok(v) => v,
Err(_) => {
log_error!("Undersized packet: Ran out of data reading header");
return ;
},
};
log_debug!("hdr = {:?}", hdr);
let hdr_len = hdr.get_header_size();
if hdr_len < pre_header_reader.remain() {
log_error!("Undersized or invalid packet: Header length is {} but packet length is {}", hdr_len, pre_header_reader.remain());
return ;
}
// TODO: Validate checksum.
{
let packet_len = pre_header_reader.remain();
// Pseudo header for checksum
let sum_pseudo = match (src_addr,dest_addr)
{
(Address::Ipv4(s), Address::Ipv4(d)) =>
::ipv4::calculate_checksum([
// Big endian stores MSB first, so write the high word first
(s.as_u32() >> 16) as u16, (s.as_u32() >> 0) as u16,
(d.as_u32() >> 16) as u16, (d.as_u32() >> 0) as u16,
IPV4_PROTO_TCP as u16, packet_len as u16,
].iter().copied()),
};
let sum_header = hdr.checksum();
let sum_options_and_data = {
let mut pkt = pkt.clone();
let psum_whole = !::ipv4::calculate_checksum( (0 .. (pre_header_reader.remain() - hdr_len) / 2).map(|_| pkt.read_u16n().unwrap()) );
// Final byte is decoded as if there was a zero after it (so as 0x??00)
let psum_partial = if pkt.remain() > 0 { (pkt.read_u8().unwrap() as u16) << 8} else { 0 };
::ipv4::calculate_checksum([psum_whole, psum_partial].iter().copied())
};
let sum_total = ::ipv4::calculate_checksum([
!sum_pseudo, !sum_header, !sum_options_and_data
].iter().copied());
if sum_total != 0 {
log_error!("Incorrect checksum: 0x{:04x} != 0", sum_total);
}
}
// Options
while pkt.remain() > pre_header_reader.remain() - hdr_len
{
match pkt.read_u8().unwrap()
{
_ => {},
}
}
let quad = Quad::new(dest_addr, hdr.dest_port, src_addr, hdr.source_port);
// Search for active connections with this quad
if let Some(c) = CONNECTIONS.get(&quad)
{
c.lock().handle(&quad, &hdr, pkt);
}
// Search for proto-connections
// - Proto-connections are lighter weight than full-blown connections, reducing the impact of a SYN flood
else if hdr.flags == FLAG_ACK
{
if let Some(c) = PROTO_CONNECTIONS.take(&quad)
{
// Check the SEQ/ACK numbers, and create the actual connection
if hdr.sequence_number == c.seen_seq + 1 && hdr.acknowledgement_number == c.sent_seq
{
// Make the full connection struct
CONNECTIONS.insert(quad, Mutex::new(Connection::new_inbound(&hdr)));
// Add the connection onto the server's accept queue
let server = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) ).expect("Can't find server");
server.accept_queue.push(quad).expect("Acceped connection with full accept queue");
}
else
{
// - Bad ACK, put the proto connection back into the list
PROTO_CONNECTIONS.insert(quad, c);
}
}
}
// If none found, look for servers on the destination (if SYN)
else if hdr.flags & !FLAG_ACK == FLAG_SYN
{
if let Some(s) = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) )
{
// Decrement the server's accept space
if s.accept_space.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |v| if v == 0 { None } else { Some(v - 1) }).is_err() {
// Reject if no space
// - Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST, 0, &[]);
}
else {
// - Add the quad as a proto-connection and send the SYN-ACK
let pc = ProtoConnection::new(hdr.sequence_number);
quad.send_packet(pc.sent_seq, pc.seen_seq, FLAG_SYN|FLAG_ACK, hdr.window_size, &[]);
PROTO_CONNECTIONS.insert(quad, pc);
}
}
else
{
// Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST|(!hdr.flags & FLAG_ACK), 0, &[]);
}
}
// Otherwise, drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn | (local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
{
Quad {
local_addr, local_port, remote_addr, remote_port
}
}
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN != 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack != self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK != 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0 .. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size != hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN != 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK != 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST != 0 {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
}
else if hdr.flags & FLAG_FIN != 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq != hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x} != {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH != 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK != 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN != 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state,
ConnectionState::TimeWait => self.state,
ConnectionState::Finished => return,
};
self.state_update(quad, new_state);
}
fn state_update(&mut self, quad: &Quad, new_state: ConnectionState)
{
if self.state != new_state
{
log_trace!("{:?} {:?} -> {:?}", quad, self.state, new_state);
self.state = new_state;
// TODO: If transitioning to `Finished`, release the local port?
// - Only for client connections.
if let ConnectionState::Finished = self.state
{
release_port(&quad.local_addr, quad.local_port);
}
}
}
fn state_to_error(&self) -> Result<(), ConnError>
{
match self.state
{
ConnectionState::SynSent => {
todo!("(quad=?) send/recv before established");
},
ConnectionState::Established => Ok( () ),
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => Err( ConnError::LocalClosed ),
ConnectionState::ForceClose => Err( ConnError::RemoteReset ),
ConnectionState::CloseWait | ConnectionState::LastAck => Err( ConnError::RemoteClosed ),
ConnectionState::Finished => Err( ConnError::LocalClosed ),
}
}
fn send_data(&mut self, quad: &Quad, buf: &[u8]) -> Result<usize, ConnError>
{
// TODO: Is it valid to send before the connection is fully established?
self.state_to_error()?;
// 1. Determine how much data we can send (based on the TX window)
let max_len = usize::saturating_sub(self.tx_window_size as usize, self.tx_buffer.len());
let rv = ::core::cmp::min(buf.len(), max_len);
// Add the data to the TX buffer
for &b in &buf[..rv] {
self.tx_buffer.push_back(b).expect("Incorrectly calculated `max_len` in tcp::Connection::send_data");
}
// If the buffer is full enough, do a send
if self.tx_buffer.len() - self.tx_bytes_sent > 1400 /*|| self.first_tx_time.map(|t| now() - t > MAX_TX_DELAY).unwrap_or(false)*/
{
// Trigger a TX
self.flush_send(quad);
}
else
{
// Kick a short timer, which will send data after it expires
// - Keep kicking the timer as data flows through
// - Have a maximum elapsed time with no packet sent.
//if self.tx_timer.reset(MIN_TX_DELAY) == timer::ResetResult::WasStopped
//{
// self.first_tx_time = Some(now());
//}
}
todo!("{:?} send_data( min({}, {})={} )", quad, max_len, buf.len(), rv);
}
fn flush_send(&mut self, quad: &Quad)
{
loop
{
let nbytes = self.tx_buffer.len() - self.tx_bytes_sent;
todo!("{:?} tx {}", quad, nbytes);
}
//self.first_tx_time = None;
}
fn recv_data(&mut self, _quad: &Quad, buf: &mut [u8]) -> Result<usize, ConnError>
{
self.state_to_error()?;
//let valid_len = self.rx_buffer.valid_len();
//let acked_len = u32::wrapping_sub(self.next_rx_seq, self.rx_buffer_seq);
//let len = usize::min(valid_len, buf.len());
Ok( self.rx_buffer.take(buf) )
}
fn send_packet(&mut self, quad: &Quad, flags: u8, data: &[u8])
{
log_debug!("{:?} send_packet({:02x} {}b)", quad, flags, data.len());
quad.send_packet(self.last_tx_seq, self.next_rx_seq, flags, self.rx_window_size as u16, data);
}
fn send_ack(&mut self, quad: &Quad, msg: &str)
{
log_debug!("{:?} send_ack({:?})", quad, msg);
// - TODO: Cancel any pending ACK
// - Send a new ACK
self.send_packet(quad, FLAG_ACK, &[]);
}
fn close(&mut self, quad: &Quad) -> Result<(), ConnError>
{
let new_state = match self.state
{
ConnectionState::SynSent => {
todo!("{:?} close before established", quad);
},
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => return Err( ConnError::LocalClosed ),
ConnectionState::LastAck => return Err( ConnError::RemoteClosed ),
ConnectionState::Finished => return Err( ConnError::LocalClosed ),
ConnectionState::CloseWait => {
self.send_packet(quad, FLAG_FIN|FLAG_ACK, &[]);
ConnectionState::LastAck
},
ConnectionState::ForceClose => {
ConnectionState::Finished
},
ConnectionState::Established => {
self.send_packet(quad, FLAG_FIN, &[]);
ConnectionState::FinWait1
},
};
self.state_update(quad, new_state);
Ok( () )
}
}
struct ProtoConnection
{
seen_seq: u32,
sent_seq: u32,
}
impl ProtoConnection
{
fn new(seen_seq: u32) -> ProtoConnection
{
ProtoConnection {
seen_seq: seen_seq,
sent_seq: 1, // TODO: Random
}
}
}
struct Server
{
// Amount of connections that can still be accepted
accept_space: AtomicUsize,
// Established connections waiting for the user to accept
accept_queue: AtomicRingBuf<Quad>,
}
pub struct ConnectionHandle(Quad);
#[derive(Debug)]
pub enum ConnError
{
NoRoute,
LocalClosed,
RemoteRefused,
RemoteClosed,
RemoteReset,
NoPortAvailable,
}
impl ConnectionHandle
{
pub fn connect(addr: Address, port: u16) -> Result<ConnectionHandle, ConnError>
{
log_trace!("ConnectionHandle::connect({:?}, {})", addr, port);
// 1. Determine the local address for this remote address
let local_addr = match get_outbound_ip_for(&addr)
{
Some(a) => a,
None => return Err(ConnError::NoRoute),
};
// 2. Pick a local port
let local_port = match allocate_port(&local_addr)
{
Some(p) => p,
None => return Err(ConnError::NoPortAvailable),
};
// 3. Create the quad and allocate the connection structure
let quad = Quad::new(local_addr, local_port, addr, port, );
log_trace!("ConnectionHandle::connect: quad={:?}", quad);
// 4. Send the opening SYN (by creating the outbound connection structure)
let conn = Connection::new_outbound(&quad, 0x10000u32);
CONNECTIONS.insert(quad, Mutex::new(conn));
Ok( ConnectionHandle(quad) )
}
pub fn send_data(&self, buf: &[u8]) -> Result<usize, ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().send_data(&self.0, buf),
}
}
pub fn recv_data(&self, buf: &mut [u8]) -> Result<usize, ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().recv_data(&self.0, buf),
}
}
pub fn close(&mut self) -> Result<(), ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().close(&self.0),
}
}
}
impl ::core::ops::Drop for ConnectionHandle
{
fn drop(&mut self)
{
// Mark the connection to close
}
}
const MIN_DYN_PORT: u16 = 0xC000;
const N_DYN_PORTS: usize = (1<<16) - MIN_DYN_PORT as usize;
struct PortPool {
bitmap: [u32; N_DYN_PORTS / 32],
//n_free_ports: u16,
next_port: u16,
}
impl PortPool
{
const fn new() -> PortPool
{
PortPool {
bitmap: [0; N_DYN_PORTS / 32],
//n_free_ports: N_DYN_PORTS as u16,
next_port: MIN_DYN_PORT,
}
}
fn ofs_mask(idx: u16) -> Option<(usize, u32)>
{
if idx >= MIN_DYN_PORT
{
let ofs = (idx - MIN_DYN_PORT) as usize / 32;
let mask = 1 << (idx % 32);
Some( (ofs, mask) )
}
else
{
None
}
}
fn take(&mut self, idx: u16) -> Result<(),()>
{
let (ofs,mask) = match Self::ofs_mask(idx)
{
Some(v) => v,
None => return Ok(()),
};
if self.bitmap[ofs] & mask != 0 {
Err( () )
}
else {
self.bitmap[ofs] |= mask;
Ok( () )
}
}
fn release(&mut self, idx: u16)
{
let (ofs,mask) = match Self::ofs_mask(idx)
{
Some(v) => v,
None => return,
};
self.bitmap[ofs] &= !mask;
}
fn allocate(&mut self) -> Option<u16>
{
// Strategy: Linear ('cos it's easy)
for idx in self.next_port ..= 0xFFFF
{
match self.take(idx)
{
Ok(_) => { self.next_port = idx; return Some(idx); },
_ => {},
}
}
for idx in MIN_DYN_PORT .. self.next_port
{
match self.take(idx)
{
Ok(_) => { self.next_port = idx; return Some(idx); },
_ => {},
}
}
None
}
}
| new | identifier_name |
tcp.rs | // "Tifflin" Kernel - Networking Stack
// - By John Hodge (thePowersGang)
//
// Modules/network/tcp.rs
//! Transmission Control Protocol (Layer 4)
use shared_map::SharedMap;
use kernel::sync::Mutex;
use kernel::lib::ring_buffer::{RingBuf,AtomicRingBuf};
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::nic::SparsePacket;
use crate::Address;
const IPV4_PROTO_TCP: u8 = 6;
const MAX_WINDOW_SIZE: u32 = 0x100000; // 4MiB
const DEF_WINDOW_SIZE: u32 = 0x4000; // 16KiB
pub fn init()
{
::ipv4::register_handler(IPV4_PROTO_TCP, rx_handler_v4).unwrap();
}
#[path="tcp-lib/"]
/// Library types just for TCP
mod lib {
pub mod rx_buffer;
}
use self::lib::rx_buffer::RxBuffer;
static CONNECTIONS: SharedMap<Quad, Mutex<Connection>> = SharedMap::new();
static PROTO_CONNECTIONS: SharedMap<Quad, ProtoConnection> = SharedMap::new();
static SERVERS: SharedMap<(Option<Address>,u16), Server> = SharedMap::new();
static S_PORTS: Mutex<PortPool> = Mutex::new(PortPool::new());
/// Find the local source address for the given remote address
// TODO: Shouldn't this get an interface handle instead?
fn get_outbound_ip_for(addr: &Address) -> Option<Address>
{
match addr
{
Address::Ipv4(addr) => crate::ipv4::route_lookup(crate::ipv4::Address::zero(), *addr).map(|(laddr, _, _)| Address::Ipv4(laddr)),
}
}
/// Allocate a port for the given local address
fn allocate_port(_addr: &Address) -> Option<u16>
{
// TODO: Could store bitmap against the interface (having a separate bitmap for each interface)
S_PORTS.lock().allocate()
}
fn release_port(_addr: &Address, idx: u16)
{
S_PORTS.lock().release(idx)
}
fn rx_handler_v4(int: &::ipv4::Interface, src_addr: ::ipv4::Address, pkt: ::nic::PacketReader)
{
rx_handler(Address::Ipv4(src_addr), Address::Ipv4(int.addr()), pkt)
}
fn rx_handler(src_addr: Address, dest_addr: Address, mut pkt: ::nic::PacketReader)
{
let pre_header_reader = pkt.clone();
let hdr = match PktHeader::read(&mut pkt)
{
Ok(v) => v,
Err(_) => {
log_error!("Undersized packet: Ran out of data reading header");
return ;
},
};
log_debug!("hdr = {:?}", hdr);
let hdr_len = hdr.get_header_size();
if hdr_len < pre_header_reader.remain() {
log_error!("Undersized or invalid packet: Header length is {} but packet length is {}", hdr_len, pre_header_reader.remain());
return ;
}
// TODO: Validate checksum.
{
let packet_len = pre_header_reader.remain();
// Pseudo header for checksum
let sum_pseudo = match (src_addr,dest_addr)
{
(Address::Ipv4(s), Address::Ipv4(d)) =>
::ipv4::calculate_checksum([
// Big endian stores MSB first, so write the high word first
(s.as_u32() >> 16) as u16, (s.as_u32() >> 0) as u16,
(d.as_u32() >> 16) as u16, (d.as_u32() >> 0) as u16,
IPV4_PROTO_TCP as u16, packet_len as u16,
].iter().copied()),
};
let sum_header = hdr.checksum();
let sum_options_and_data = {
let mut pkt = pkt.clone();
let psum_whole = !::ipv4::calculate_checksum( (0 .. (pre_header_reader.remain() - hdr_len) / 2).map(|_| pkt.read_u16n().unwrap()) );
// Final byte is decoded as if there was a zero after it (so as 0x??00)
let psum_partial = if pkt.remain() > 0 { (pkt.read_u8().unwrap() as u16) << 8} else { 0 };
::ipv4::calculate_checksum([psum_whole, psum_partial].iter().copied())
};
let sum_total = ::ipv4::calculate_checksum([
!sum_pseudo, !sum_header, !sum_options_and_data
].iter().copied());
if sum_total != 0 {
log_error!("Incorrect checksum: 0x{:04x} != 0", sum_total);
}
}
// Options
while pkt.remain() > pre_header_reader.remain() - hdr_len
{
match pkt.read_u8().unwrap()
{
_ => {},
}
}
let quad = Quad::new(dest_addr, hdr.dest_port, src_addr, hdr.source_port);
// Search for active connections with this quad
if let Some(c) = CONNECTIONS.get(&quad)
{
c.lock().handle(&quad, &hdr, pkt);
}
// Search for proto-connections
// - Proto-connections are lighter weight than full-blown connections, reducing the impact of a SYN flood
else if hdr.flags == FLAG_ACK
{
if let Some(c) = PROTO_CONNECTIONS.take(&quad)
{
// Check the SEQ/ACK numbers, and create the actual connection
if hdr.sequence_number == c.seen_seq + 1 && hdr.acknowledgement_number == c.sent_seq
{
// Make the full connection struct
CONNECTIONS.insert(quad, Mutex::new(Connection::new_inbound(&hdr)));
// Add the connection onto the server's accept queue
let server = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) ).expect("Can't find server");
server.accept_queue.push(quad).expect("Acceped connection with full accept queue");
}
else
{
// - Bad ACK, put the proto connection back into the list
PROTO_CONNECTIONS.insert(quad, c);
}
}
}
// If none found, look for servers on the destination (if SYN)
else if hdr.flags & !FLAG_ACK == FLAG_SYN
{
if let Some(s) = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) )
{
// Decrement the server's accept space
if s.accept_space.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |v| if v == 0 { None } else { Some(v - 1) }).is_err() {
// Reject if no space
// - Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST, 0, &[]);
}
else {
// - Add the quad as a proto-connection and send the SYN-ACK
let pc = ProtoConnection::new(hdr.sequence_number);
quad.send_packet(pc.sent_seq, pc.seen_seq, FLAG_SYN|FLAG_ACK, hdr.window_size, &[]);
PROTO_CONNECTIONS.insert(quad, pc);
}
}
else
{
// Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST|(!hdr.flags & FLAG_ACK), 0, &[]);
}
}
// Otherwise, drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn new(local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
|
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN != 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack != self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK != 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0 .. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size != hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN != 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK != 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST != 0 {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
}
else if hdr.flags & FLAG_FIN != 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq != hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x} != {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH != 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK != 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN != 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state,
ConnectionState::TimeWait => self.state,
ConnectionState::Finished => return,
};
self.state_update(quad, new_state);
}
fn state_update(&mut self, quad: &Quad, new_state: ConnectionState)
{
if self.state != new_state
{
log_trace!("{:?} {:?} -> {:?}", quad, self.state, new_state);
self.state = new_state;
// TODO: If transitioning to `Finished`, release the local port?
// - Only for client connections.
if let ConnectionState::Finished = self.state
{
release_port(&quad.local_addr, quad.local_port);
}
}
}
fn state_to_error(&self) -> Result<(), ConnError>
{
match self.state
{
ConnectionState::SynSent => {
todo!("(quad=?) send/recv before established");
},
ConnectionState::Established => Ok( () ),
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => Err( ConnError::LocalClosed ),
ConnectionState::ForceClose => Err( ConnError::RemoteReset ),
ConnectionState::CloseWait | ConnectionState::LastAck => Err( ConnError::RemoteClosed ),
ConnectionState::Finished => Err( ConnError::LocalClosed ),
}
}
fn send_data(&mut self, quad: &Quad, buf: &[u8]) -> Result<usize, ConnError>
{
// TODO: Is it valid to send before the connection is fully established?
self.state_to_error()?;
// 1. Determine how much data we can send (based on the TX window)
let max_len = usize::saturating_sub(self.tx_window_size as usize, self.tx_buffer.len());
let rv = ::core::cmp::min(buf.len(), max_len);
// Add the data to the TX buffer
for &b in &buf[..rv] {
self.tx_buffer.push_back(b).expect("Incorrectly calculated `max_len` in tcp::Connection::send_data");
}
// If the buffer is full enough, do a send
if self.tx_buffer.len() - self.tx_bytes_sent > 1400 /*|| self.first_tx_time.map(|t| now() - t > MAX_TX_DELAY).unwrap_or(false)*/
{
// Trigger a TX
self.flush_send(quad);
}
else
{
// Kick a short timer, which will send data after it expires
// - Keep kicking the timer as data flows through
// - Have a maximum elapsed time with no packet sent.
//if self.tx_timer.reset(MIN_TX_DELAY) == timer::ResetResult::WasStopped
//{
// self.first_tx_time = Some(now());
//}
}
todo!("{:?} send_data( min({}, {})={} )", quad, max_len, buf.len(), rv);
}
fn flush_send(&mut self, quad: &Quad)
{
loop
{
let nbytes = self.tx_buffer.len() - self.tx_bytes_sent;
todo!("{:?} tx {}", quad, nbytes);
}
//self.first_tx_time = None;
}
fn recv_data(&mut self, _quad: &Quad, buf: &mut [u8]) -> Result<usize, ConnError>
{
self.state_to_error()?;
//let valid_len = self.rx_buffer.valid_len();
//let acked_len = u32::wrapping_sub(self.next_rx_seq, self.rx_buffer_seq);
//let len = usize::min(valid_len, buf.len());
Ok( self.rx_buffer.take(buf) )
}
fn send_packet(&mut self, quad: &Quad, flags: u8, data: &[u8])
{
log_debug!("{:?} send_packet({:02x} {}b)", quad, flags, data.len());
quad.send_packet(self.last_tx_seq, self.next_rx_seq, flags, self.rx_window_size as u16, data);
}
fn send_ack(&mut self, quad: &Quad, msg: &str)
{
log_debug!("{:?} send_ack({:?})", quad, msg);
// - TODO: Cancel any pending ACK
// - Send a new ACK
self.send_packet(quad, FLAG_ACK, &[]);
}
fn close(&mut self, quad: &Quad) -> Result<(), ConnError>
{
let new_state = match self.state
{
ConnectionState::SynSent => {
todo!("{:?} close before established", quad);
},
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => return Err( ConnError::LocalClosed ),
ConnectionState::LastAck => return Err( ConnError::RemoteClosed ),
ConnectionState::Finished => return Err( ConnError::LocalClosed ),
ConnectionState::CloseWait => {
self.send_packet(quad, FLAG_FIN|FLAG_ACK, &[]);
ConnectionState::LastAck
},
ConnectionState::ForceClose => {
ConnectionState::Finished
},
ConnectionState::Established => {
self.send_packet(quad, FLAG_FIN, &[]);
ConnectionState::FinWait1
},
};
self.state_update(quad, new_state);
Ok( () )
}
}
struct ProtoConnection
{
seen_seq: u32,
sent_seq: u32,
}
impl ProtoConnection
{
fn new(seen_seq: u32) -> ProtoConnection
{
ProtoConnection {
seen_seq: seen_seq,
sent_seq: 1, // TODO: Random
}
}
}
struct Server
{
// Amount of connections that can still be accepted
accept_space: AtomicUsize,
// Established connections waiting for the user to accept
accept_queue: AtomicRingBuf<Quad>,
}
pub struct ConnectionHandle(Quad);
#[derive(Debug)]
pub enum ConnError
{
NoRoute,
LocalClosed,
RemoteRefused,
RemoteClosed,
RemoteReset,
NoPortAvailable,
}
impl ConnectionHandle
{
pub fn connect(addr: Address, port: u16) -> Result<ConnectionHandle, ConnError>
{
log_trace!("ConnectionHandle::connect({:?}, {})", addr, port);
// 1. Determine the local address for this remote address
let local_addr = match get_outbound_ip_for(&addr)
{
Some(a) => a,
None => return Err(ConnError::NoRoute),
};
// 2. Pick a local port
let local_port = match allocate_port(&local_addr)
{
Some(p) => p,
None => return Err(ConnError::NoPortAvailable),
};
// 3. Create the quad and allocate the connection structure
let quad = Quad::new(local_addr, local_port, addr, port, );
log_trace!("ConnectionHandle::connect: quad={:?}", quad);
// 4. Send the opening SYN (by creating the outbound connection structure)
let conn = Connection::new_outbound(&quad, 0x10000u32);
CONNECTIONS.insert(quad, Mutex::new(conn));
Ok( ConnectionHandle(quad) )
}
pub fn send_data(&self, buf: &[u8]) -> Result<usize, ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().send_data(&self.0, buf),
}
}
pub fn recv_data(&self, buf: &mut [u8]) -> Result<usize, ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().recv_data(&self.0, buf),
}
}
pub fn close(&mut self) -> Result<(), ConnError>
{
match CONNECTIONS.get(&self.0)
{
None => panic!("Connection {:?} removed before handle dropped", self.0),
Some(v) => v.lock().close(&self.0),
}
}
}
impl ::core::ops::Drop for ConnectionHandle
{
fn drop(&mut self)
{
// Mark the connection to close
}
}
const MIN_DYN_PORT: u16 = 0xC000;
const N_DYN_PORTS: usize = (1<<16) - MIN_DYN_PORT as usize;
struct PortPool {
bitmap: [u32; N_DYN_PORTS / 32],
//n_free_ports: u16,
next_port: u16,
}
impl PortPool
{
const fn new() -> PortPool
{
PortPool {
bitmap: [0; N_DYN_PORTS / 32],
//n_free_ports: N_DYN_PORTS as u16,
next_port: MIN_DYN_PORT,
}
}
fn ofs_mask(idx: u16) -> Option<(usize, u32)>
{
if idx >= MIN_DYN_PORT
{
let ofs = (idx - MIN_DYN_PORT) as usize / 32;
let mask = 1 << (idx % 32);
Some( (ofs, mask) )
}
else
{
None
}
}
fn take(&mut self, idx: u16) -> Result<(),()>
{
let (ofs,mask) = match Self::ofs_mask(idx)
{
Some(v) => v,
None => return Ok(()),
};
if self.bitmap[ofs] & mask != 0 {
Err( () )
}
else {
self.bitmap[ofs] |= mask;
Ok( () )
}
}
fn release(&mut self, idx: u16)
{
let (ofs,mask) = match Self::ofs_mask(idx)
{
Some(v) => v,
None => return,
};
self.bitmap[ofs] &= !mask;
}
fn allocate(&mut self) -> Option<u16>
{
// Strategy: Linear ('cos it's easy)
for idx in self.next_port ..= 0xFFFF
{
match self.take(idx)
{
Ok(_) => { self.next_port = idx; return Some(idx); },
_ => {},
}
}
for idx in MIN_DYN_PORT .. self.next_port
{
match self.take(idx)
{
Ok(_) => { self.next_port = idx; return Some(idx); },
_ => {},
}
}
None
}
}
| {
Quad {
local_addr, local_port, remote_addr, remote_port
}
} | identifier_body |
kegweblib.py | from builtins import str
import pytz
from django.conf import settings
from django.template import (
Library,
Node,
TemplateSyntaxError,
Variable,
VariableDoesNotExist,
)
from django.template.defaultfilters import pluralize
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from pykeg.core import models
from pykeg.core.util import CtoF
from pykeg.util import kbjson, units
from pykeg.web.charts import charts
register = Library()
@register.inclusion_tag("kegweb/mugshot_box.html", takes_context=True)
def mugshot_box(context, user, boxsize=0):
return {
"user": user,
"boxsize": boxsize,
"guest_info": context.get("guest_info", None),
"STATIC_URL": context.get("STATIC_URL"),
}
@register.inclusion_tag("kegweb/picture-gallery.html")
def gallery(picture_or_pictures, thumb_size="span2", gallery_id=""):
c = {}
if not hasattr(picture_or_pictures, "__iter__"):
c["gallery_pictures"] = [picture_or_pictures]
else:
c["gallery_pictures"] = picture_or_pictures
c["thumb_size"] = thumb_size
c["gallery_id"] = gallery_id
return c
@register.inclusion_tag("kegweb/badge.html")
def badge(amount, caption, style="", is_volume=False, do_pluralize=False):
if is_volume:
amount = mark_safe(VolumeNode.format(amount, "mL"))
if do_pluralize:
caption += pluralize(amount)
return {
"badge_amount": amount,
"badge_caption": caption,
"badge_style": style,
}
@register.inclusion_tag("kegweb/includes/progress_bar.html")
def progress_bar(progress_int, extra_css=""):
c = {}
try:
progress_int = max(int(progress_int), 0)
except ValueError:
progress_int = 0
progress_int = min(progress_int, 100)
c["progress_int"] = progress_int
c["extra_css"] = extra_css
if progress_int < 10:
bar_type = "bar-danger"
elif progress_int < 25:
bar_type = "bar-warning"
else:
bar_type = "bar-success"
c["bar_type"] = bar_type
return c
# navitem
@register.tag("navitem")
def navitem(parser, token):
"""{% navitem <viewname> <title> [exact] %}"""
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("%s requires at least 3 tokens" % tokens[0])
return NavitemNode(*tokens[1:])
class NavitemNode(Node):
def __init__(self, *args):
self._viewname = args[0]
self._title = args[1]
self._exact = "exact" in args[2:]
def render(self, context):
viewname = Variable(self._viewname).resolve(context)
title = Variable(self._title).resolve(context)
if viewname.startswith("/"):
urlbase = viewname
else:
urlbase = reverse(viewname)
request_path = context["request_path"]
if self._exact:
active = request_path == urlbase
else:
active = request_path.startswith(urlbase)
if active:
res = '<li class="active">'
else:
res = "<li>"
res += '<a href="%s">%s</a></li>' % (urlbase, title)
return res
# timeago
@register.tag("timeago")
def | (parser, token):
"""{% timeago <timestamp> %}"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError("%s requires 2 tokens" % tokens[0])
return TimeagoNode(tokens[1])
class TimeagoNode(Node):
def __init__(self, timestamp_varname):
self._timestamp_varname = timestamp_varname
def render(self, context):
tv = Variable(self._timestamp_varname)
ts = tv.resolve(context)
# Try to set time zone information.
if settings.TIME_ZONE and not settings.USE_TZ:
try:
tz = pytz.timezone(settings.TIME_ZONE)
ts = tz.localize(ts)
except pytz.UnknownTimeZoneError:
pass
iso = ts.isoformat()
alt = timezone.localtime(ts).strftime("%A, %B %d, %Y %I:%M%p")
return '<abbr class="timeago" title="%s">%s</abbr>' % (iso, alt)
# temperature
@register.tag("temperature")
def temperature_tag(parser, token):
"""{% temperature <temp_c> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return TemperatureNode(tokens[1])
class TemperatureNode(Node):
TEMPLATE = "%(amount)s° %(unit)s"
def __init__(self, varname):
self.varname = varname
def render(self, context):
v = Variable(self.varname)
try:
amount = v.resolve(context)
except (VariableDoesNotExist, ValueError):
raise
amount = "unknown"
unit = "C"
kbsite = models.KegbotSite.get()
if kbsite.temperature_display_units == "f":
unit = "F"
amount = CtoF(amount)
return self.TEMPLATE % {"amount": amount, "unit": unit}
# volume
@register.tag("volume")
def volumetag(parser, token):
"""{% volume <amount> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return VolumeNode(tokens[1], tokens[2:])
class VolumeNode(Node):
TEMPLATE = """
<span class="hmeasure %(extra_css)s" title="%(title)s">
<span class="num">%(amount)s</span>
<span class="unit">%(units)s</span>
</span>""".strip()
def __init__(self, volume_varname, extra_args):
self._volume_varname = volume_varname
self._extra_args = extra_args
def render(self, context):
tv = Variable(self._volume_varname)
try:
num = float(tv.resolve(context))
except (VariableDoesNotExist, ValueError):
num = "unknown"
unit = "mL"
make_badge = "badge" in self._extra_args
return self.format(num, unit, make_badge)
@classmethod
def format(cls, amount, units, make_badge=False):
if amount < 0:
amount = 0
ctx = {
"units": units,
"amount": amount,
"title": "%s %s" % (amount, units),
"extra_css": "badge " if make_badge else "",
}
return cls.TEMPLATE % ctx
# drinker
@register.tag("drinker_name")
def drinker_name_tag(parser, token):
"""{% drinker_name <drink_or_user_obj> [nolink] %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return DrinkerNameNode(tokens[1], tokens[2:])
class DrinkerNameNode(Node):
def __init__(self, drink_varname, extra_args):
self._varname = drink_varname
self._extra_args = extra_args
def render(self, context):
obj = Variable(self._varname)
try:
obj = obj.resolve(context)
except (VariableDoesNotExist, ValueError):
obj = None
user = None
if obj:
if isinstance(obj, models.Drink) or isinstance(obj, models.SystemEvent):
user = obj.user
elif isinstance(obj, models.User):
user = obj
if user:
if "nolink" in self._extra_args:
return user.get_full_name()
else:
return '<a href="%s">%s</a>' % (
reverse("kb-drinker", args=[user.username]),
user.get_full_name(),
)
return context["guest_info"]["name"]
# chart
@register.tag("chart")
def chart(parser, tokens):
"""{% chart <charttype> <obj> width height %}"""
tokens = tokens.contents.split()
if len(tokens) < 4:
raise TemplateSyntaxError("chart requires at least 4 arguments")
charttype = tokens[1]
try:
width = int(tokens[-2])
height = int(tokens[-1])
except ValueError:
raise TemplateSyntaxError("invalid width or height")
args = tokens[2:-2]
return ChartNode(charttype, width, height, args)
class ChartNode(Node):
CHART_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox"></div>
<script type="text/javascript">
var chart_%(chart_id)s;
$(document).ready(function() {
var chart_data = %(chart_data)s;
chart_%(chart_id)s = new Highcharts.Chart(chart_data);
});
</script>
<!-- end chart %(chart_id)s -->
"""
ERROR_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox-error">
%(error_str)s
</div>
<!-- end chart %(chart_id)s -->
"""
def __init__(self, charttype, width, height, args):
self._charttype = charttype
self._width = width
self._height = height
self._args = args
self._chart_fn = getattr(charts, "chart_%s" % (self._charttype,), None)
def _get_chart_id(self, context):
# TODO(mikey): Is there a better way to store _CHART_ID?
if not hasattr(context, "_CHART_ID"):
context._CHART_ID = 0
context._CHART_ID += 1
return context._CHART_ID
def show_error(self, error_str):
ctx = {
"error_str": error_str,
"chart_id": 0,
"width": self._width,
"height": self._height,
}
return ChartNode.ERROR_TMPL % ctx
def render(self, context):
if not self._chart_fn:
return self.show_error("Unknown chart type: %s" % self._charttype)
chart_id = self._get_chart_id(context)
obj = Variable(self._args[0]).resolve(context)
metric_volumes = context.get("metric_volumes", False)
temperature_units = context.get("temperature_display_units", "f")
try:
chart_result = self._chart_fn(
obj, metric_volumes=metric_volumes, temperature_units=temperature_units
)
except charts.ChartError as e:
return self.show_error(str(e))
chart_base = {
"chart": {
"borderColor": "#eeeeff",
"borderWidth": 0,
"renderTo": "chart-%s-container" % chart_id,
},
"credits": {
"enabled": False,
},
"legend": {
"enabled": False,
},
"margin": [0, 0, 0, 0],
"title": {
"text": None,
},
"yAxis": {
"labels": {"align": "left"},
"title": {
"text": None,
},
},
}
chart_data = chart_base
for k, v in list(chart_result.items()):
if k not in chart_data:
chart_data[k] = v
elif isinstance(v, dict):
chart_data[k].update(v)
else:
chart_data[k] = v
chart_data = kbjson.dumps(chart_data, indent=None)
ctx = {
"chart_id": chart_id,
"width": self._width,
"height": self._height,
"chart_data": chart_data,
}
return ChartNode.CHART_TMPL % ctx
@register.filter
def volume(text, fmt="pints"):
try:
vol = units.Quantity(float(text))
except ValueError:
return text
if fmt == "pints":
res = vol.InPints()
elif fmt == "liters":
res = vol.InLiters()
elif fmt == "ounces":
res = vol.InOunces()
elif fmt == "gallons":
res = vol.InUSGallons()
elif fmt == "twelveounces":
res = vol.InTwelveOunceBeers()
elif fmt == "halfbarrels":
res = vol.InHalfBarrelKegs()
else:
raise TemplateSyntaxError("Unknown volume format: %s" % fmt)
return float(res)
| timeago | identifier_name |
kegweblib.py | from builtins import str
import pytz
from django.conf import settings
from django.template import (
Library,
Node,
TemplateSyntaxError,
Variable,
VariableDoesNotExist,
)
from django.template.defaultfilters import pluralize
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from pykeg.core import models
from pykeg.core.util import CtoF
from pykeg.util import kbjson, units
from pykeg.web.charts import charts
register = Library()
@register.inclusion_tag("kegweb/mugshot_box.html", takes_context=True)
def mugshot_box(context, user, boxsize=0):
return {
"user": user,
"boxsize": boxsize,
"guest_info": context.get("guest_info", None),
"STATIC_URL": context.get("STATIC_URL"),
}
@register.inclusion_tag("kegweb/picture-gallery.html")
def gallery(picture_or_pictures, thumb_size="span2", gallery_id=""):
c = {}
if not hasattr(picture_or_pictures, "__iter__"):
c["gallery_pictures"] = [picture_or_pictures]
else:
c["gallery_pictures"] = picture_or_pictures
c["thumb_size"] = thumb_size
c["gallery_id"] = gallery_id
return c
@register.inclusion_tag("kegweb/badge.html")
def badge(amount, caption, style="", is_volume=False, do_pluralize=False):
if is_volume:
amount = mark_safe(VolumeNode.format(amount, "mL"))
if do_pluralize:
caption += pluralize(amount)
return {
"badge_amount": amount,
"badge_caption": caption,
"badge_style": style,
}
@register.inclusion_tag("kegweb/includes/progress_bar.html")
def progress_bar(progress_int, extra_css=""):
c = {}
try:
progress_int = max(int(progress_int), 0)
except ValueError:
progress_int = 0
progress_int = min(progress_int, 100)
c["progress_int"] = progress_int
c["extra_css"] = extra_css
if progress_int < 10:
bar_type = "bar-danger"
elif progress_int < 25:
bar_type = "bar-warning"
else:
bar_type = "bar-success"
c["bar_type"] = bar_type
return c
# navitem
@register.tag("navitem")
def navitem(parser, token):
"""{% navitem <viewname> <title> [exact] %}"""
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("%s requires at least 3 tokens" % tokens[0])
return NavitemNode(*tokens[1:])
class NavitemNode(Node):
def __init__(self, *args):
self._viewname = args[0]
self._title = args[1]
self._exact = "exact" in args[2:]
def render(self, context):
viewname = Variable(self._viewname).resolve(context)
title = Variable(self._title).resolve(context)
if viewname.startswith("/"):
urlbase = viewname
else:
urlbase = reverse(viewname)
request_path = context["request_path"]
if self._exact:
active = request_path == urlbase
else:
active = request_path.startswith(urlbase)
if active:
res = '<li class="active">'
else:
res = "<li>"
res += '<a href="%s">%s</a></li>' % (urlbase, title)
return res
# timeago
@register.tag("timeago")
def timeago(parser, token):
"""{% timeago <timestamp> %}"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError("%s requires 2 tokens" % tokens[0])
return TimeagoNode(tokens[1])
class TimeagoNode(Node):
def __init__(self, timestamp_varname):
self._timestamp_varname = timestamp_varname
def render(self, context):
tv = Variable(self._timestamp_varname)
ts = tv.resolve(context)
# Try to set time zone information.
if settings.TIME_ZONE and not settings.USE_TZ:
try:
tz = pytz.timezone(settings.TIME_ZONE)
ts = tz.localize(ts)
except pytz.UnknownTimeZoneError:
pass
iso = ts.isoformat()
alt = timezone.localtime(ts).strftime("%A, %B %d, %Y %I:%M%p")
return '<abbr class="timeago" title="%s">%s</abbr>' % (iso, alt)
# temperature
@register.tag("temperature")
def temperature_tag(parser, token):
"""{% temperature <temp_c> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return TemperatureNode(tokens[1])
class TemperatureNode(Node):
TEMPLATE = "%(amount)s° %(unit)s"
def __init__(self, varname):
self.varname = varname
def render(self, context):
v = Variable(self.varname)
try:
amount = v.resolve(context)
except (VariableDoesNotExist, ValueError):
raise
amount = "unknown"
unit = "C"
kbsite = models.KegbotSite.get()
if kbsite.temperature_display_units == "f":
unit = "F"
amount = CtoF(amount)
return self.TEMPLATE % {"amount": amount, "unit": unit}
# volume
@register.tag("volume")
def volumetag(parser, token):
"""{% volume <amount> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return VolumeNode(tokens[1], tokens[2:])
class VolumeNode(Node):
TEMPLATE = """
<span class="hmeasure %(extra_css)s" title="%(title)s">
<span class="num">%(amount)s</span>
<span class="unit">%(units)s</span>
</span>""".strip()
def __init__(self, volume_varname, extra_args):
self._volume_varname = volume_varname
self._extra_args = extra_args
def render(self, context):
|
@classmethod
def format(cls, amount, units, make_badge=False):
if amount < 0:
amount = 0
ctx = {
"units": units,
"amount": amount,
"title": "%s %s" % (amount, units),
"extra_css": "badge " if make_badge else "",
}
return cls.TEMPLATE % ctx
# drinker
@register.tag("drinker_name")
def drinker_name_tag(parser, token):
"""{% drinker_name <drink_or_user_obj> [nolink] %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return DrinkerNameNode(tokens[1], tokens[2:])
class DrinkerNameNode(Node):
def __init__(self, drink_varname, extra_args):
self._varname = drink_varname
self._extra_args = extra_args
def render(self, context):
obj = Variable(self._varname)
try:
obj = obj.resolve(context)
except (VariableDoesNotExist, ValueError):
obj = None
user = None
if obj:
if isinstance(obj, models.Drink) or isinstance(obj, models.SystemEvent):
user = obj.user
elif isinstance(obj, models.User):
user = obj
if user:
if "nolink" in self._extra_args:
return user.get_full_name()
else:
return '<a href="%s">%s</a>' % (
reverse("kb-drinker", args=[user.username]),
user.get_full_name(),
)
return context["guest_info"]["name"]
# chart
@register.tag("chart")
def chart(parser, tokens):
"""{% chart <charttype> <obj> width height %}"""
tokens = tokens.contents.split()
if len(tokens) < 4:
raise TemplateSyntaxError("chart requires at least 4 arguments")
charttype = tokens[1]
try:
width = int(tokens[-2])
height = int(tokens[-1])
except ValueError:
raise TemplateSyntaxError("invalid width or height")
args = tokens[2:-2]
return ChartNode(charttype, width, height, args)
class ChartNode(Node):
CHART_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox"></div>
<script type="text/javascript">
var chart_%(chart_id)s;
$(document).ready(function() {
var chart_data = %(chart_data)s;
chart_%(chart_id)s = new Highcharts.Chart(chart_data);
});
</script>
<!-- end chart %(chart_id)s -->
"""
ERROR_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox-error">
%(error_str)s
</div>
<!-- end chart %(chart_id)s -->
"""
def __init__(self, charttype, width, height, args):
self._charttype = charttype
self._width = width
self._height = height
self._args = args
self._chart_fn = getattr(charts, "chart_%s" % (self._charttype,), None)
def _get_chart_id(self, context):
# TODO(mikey): Is there a better way to store _CHART_ID?
if not hasattr(context, "_CHART_ID"):
context._CHART_ID = 0
context._CHART_ID += 1
return context._CHART_ID
def show_error(self, error_str):
ctx = {
"error_str": error_str,
"chart_id": 0,
"width": self._width,
"height": self._height,
}
return ChartNode.ERROR_TMPL % ctx
def render(self, context):
if not self._chart_fn:
return self.show_error("Unknown chart type: %s" % self._charttype)
chart_id = self._get_chart_id(context)
obj = Variable(self._args[0]).resolve(context)
metric_volumes = context.get("metric_volumes", False)
temperature_units = context.get("temperature_display_units", "f")
try:
chart_result = self._chart_fn(
obj, metric_volumes=metric_volumes, temperature_units=temperature_units
)
except charts.ChartError as e:
return self.show_error(str(e))
chart_base = {
"chart": {
"borderColor": "#eeeeff",
"borderWidth": 0,
"renderTo": "chart-%s-container" % chart_id,
},
"credits": {
"enabled": False,
},
"legend": {
"enabled": False,
},
"margin": [0, 0, 0, 0],
"title": {
"text": None,
},
"yAxis": {
"labels": {"align": "left"},
"title": {
"text": None,
},
},
}
chart_data = chart_base
for k, v in list(chart_result.items()):
if k not in chart_data:
chart_data[k] = v
elif isinstance(v, dict):
chart_data[k].update(v)
else:
chart_data[k] = v
chart_data = kbjson.dumps(chart_data, indent=None)
ctx = {
"chart_id": chart_id,
"width": self._width,
"height": self._height,
"chart_data": chart_data,
}
return ChartNode.CHART_TMPL % ctx
@register.filter
def volume(text, fmt="pints"):
try:
vol = units.Quantity(float(text))
except ValueError:
return text
if fmt == "pints":
res = vol.InPints()
elif fmt == "liters":
res = vol.InLiters()
elif fmt == "ounces":
res = vol.InOunces()
elif fmt == "gallons":
res = vol.InUSGallons()
elif fmt == "twelveounces":
res = vol.InTwelveOunceBeers()
elif fmt == "halfbarrels":
res = vol.InHalfBarrelKegs()
else:
raise TemplateSyntaxError("Unknown volume format: %s" % fmt)
return float(res)
| tv = Variable(self._volume_varname)
try:
num = float(tv.resolve(context))
except (VariableDoesNotExist, ValueError):
num = "unknown"
unit = "mL"
make_badge = "badge" in self._extra_args
return self.format(num, unit, make_badge) | identifier_body |
kegweblib.py | from builtins import str
import pytz
from django.conf import settings
from django.template import (
Library,
Node,
TemplateSyntaxError,
Variable,
VariableDoesNotExist,
)
from django.template.defaultfilters import pluralize
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from pykeg.core import models
from pykeg.core.util import CtoF
from pykeg.util import kbjson, units
from pykeg.web.charts import charts
register = Library()
@register.inclusion_tag("kegweb/mugshot_box.html", takes_context=True)
def mugshot_box(context, user, boxsize=0):
return {
"user": user,
"boxsize": boxsize,
"guest_info": context.get("guest_info", None),
"STATIC_URL": context.get("STATIC_URL"),
}
@register.inclusion_tag("kegweb/picture-gallery.html")
def gallery(picture_or_pictures, thumb_size="span2", gallery_id=""):
c = {}
if not hasattr(picture_or_pictures, "__iter__"):
c["gallery_pictures"] = [picture_or_pictures]
else:
c["gallery_pictures"] = picture_or_pictures
c["thumb_size"] = thumb_size
c["gallery_id"] = gallery_id
return c
@register.inclusion_tag("kegweb/badge.html")
def badge(amount, caption, style="", is_volume=False, do_pluralize=False):
if is_volume:
amount = mark_safe(VolumeNode.format(amount, "mL"))
if do_pluralize:
caption += pluralize(amount)
return {
"badge_amount": amount,
"badge_caption": caption,
"badge_style": style,
}
@register.inclusion_tag("kegweb/includes/progress_bar.html")
def progress_bar(progress_int, extra_css=""):
c = {}
try:
progress_int = max(int(progress_int), 0)
except ValueError:
progress_int = 0
progress_int = min(progress_int, 100)
c["progress_int"] = progress_int
c["extra_css"] = extra_css
if progress_int < 10:
bar_type = "bar-danger"
elif progress_int < 25:
bar_type = "bar-warning"
else:
bar_type = "bar-success"
c["bar_type"] = bar_type
return c
# navitem
@register.tag("navitem")
def navitem(parser, token):
"""{% navitem <viewname> <title> [exact] %}"""
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("%s requires at least 3 tokens" % tokens[0])
return NavitemNode(*tokens[1:])
class NavitemNode(Node):
def __init__(self, *args):
self._viewname = args[0]
self._title = args[1]
self._exact = "exact" in args[2:]
def render(self, context):
viewname = Variable(self._viewname).resolve(context)
title = Variable(self._title).resolve(context)
if viewname.startswith("/"):
urlbase = viewname
else:
urlbase = reverse(viewname)
request_path = context["request_path"]
if self._exact:
active = request_path == urlbase
else:
active = request_path.startswith(urlbase)
if active:
res = '<li class="active">'
else:
res = "<li>"
res += '<a href="%s">%s</a></li>' % (urlbase, title)
return res
# timeago
@register.tag("timeago")
def timeago(parser, token):
"""{% timeago <timestamp> %}"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError("%s requires 2 tokens" % tokens[0])
return TimeagoNode(tokens[1])
class TimeagoNode(Node):
def __init__(self, timestamp_varname):
self._timestamp_varname = timestamp_varname
def render(self, context):
tv = Variable(self._timestamp_varname)
ts = tv.resolve(context)
# Try to set time zone information.
if settings.TIME_ZONE and not settings.USE_TZ:
try:
tz = pytz.timezone(settings.TIME_ZONE)
ts = tz.localize(ts)
except pytz.UnknownTimeZoneError:
pass
iso = ts.isoformat()
alt = timezone.localtime(ts).strftime("%A, %B %d, %Y %I:%M%p")
return '<abbr class="timeago" title="%s">%s</abbr>' % (iso, alt)
# temperature
@register.tag("temperature")
def temperature_tag(parser, token):
"""{% temperature <temp_c> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return TemperatureNode(tokens[1])
class TemperatureNode(Node):
TEMPLATE = "%(amount)s° %(unit)s"
def __init__(self, varname):
self.varname = varname
def render(self, context):
v = Variable(self.varname)
try:
amount = v.resolve(context)
except (VariableDoesNotExist, ValueError):
raise
amount = "unknown"
unit = "C"
kbsite = models.KegbotSite.get()
if kbsite.temperature_display_units == "f":
unit = "F"
amount = CtoF(amount)
return self.TEMPLATE % {"amount": amount, "unit": unit}
# volume
@register.tag("volume")
def volumetag(parser, token):
"""{% volume <amount> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return VolumeNode(tokens[1], tokens[2:])
class VolumeNode(Node):
TEMPLATE = """
<span class="hmeasure %(extra_css)s" title="%(title)s">
<span class="num">%(amount)s</span>
<span class="unit">%(units)s</span>
</span>""".strip()
def __init__(self, volume_varname, extra_args):
self._volume_varname = volume_varname
self._extra_args = extra_args
def render(self, context):
tv = Variable(self._volume_varname)
try:
num = float(tv.resolve(context))
except (VariableDoesNotExist, ValueError):
num = "unknown"
unit = "mL"
make_badge = "badge" in self._extra_args
return self.format(num, unit, make_badge)
@classmethod
def format(cls, amount, units, make_badge=False):
if amount < 0:
amount = 0
ctx = {
"units": units,
"amount": amount,
"title": "%s %s" % (amount, units),
"extra_css": "badge " if make_badge else "",
}
return cls.TEMPLATE % ctx
# drinker
@register.tag("drinker_name")
def drinker_name_tag(parser, token):
"""{% drinker_name <drink_or_user_obj> [nolink] %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return DrinkerNameNode(tokens[1], tokens[2:])
class DrinkerNameNode(Node):
def __init__(self, drink_varname, extra_args):
self._varname = drink_varname
self._extra_args = extra_args
def render(self, context):
obj = Variable(self._varname)
try:
obj = obj.resolve(context)
except (VariableDoesNotExist, ValueError):
obj = None
user = None
if obj:
if isinstance(obj, models.Drink) or isinstance(obj, models.SystemEvent):
user = obj.user
elif isinstance(obj, models.User):
user = obj
if user:
if "nolink" in self._extra_args:
return user.get_full_name()
else:
|
return context["guest_info"]["name"]
# chart
@register.tag("chart")
def chart(parser, tokens):
"""{% chart <charttype> <obj> width height %}"""
tokens = tokens.contents.split()
if len(tokens) < 4:
raise TemplateSyntaxError("chart requires at least 4 arguments")
charttype = tokens[1]
try:
width = int(tokens[-2])
height = int(tokens[-1])
except ValueError:
raise TemplateSyntaxError("invalid width or height")
args = tokens[2:-2]
return ChartNode(charttype, width, height, args)
class ChartNode(Node):
CHART_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox"></div>
<script type="text/javascript">
var chart_%(chart_id)s;
$(document).ready(function() {
var chart_data = %(chart_data)s;
chart_%(chart_id)s = new Highcharts.Chart(chart_data);
});
</script>
<!-- end chart %(chart_id)s -->
"""
ERROR_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox-error">
%(error_str)s
</div>
<!-- end chart %(chart_id)s -->
"""
def __init__(self, charttype, width, height, args):
self._charttype = charttype
self._width = width
self._height = height
self._args = args
self._chart_fn = getattr(charts, "chart_%s" % (self._charttype,), None)
def _get_chart_id(self, context):
# TODO(mikey): Is there a better way to store _CHART_ID?
if not hasattr(context, "_CHART_ID"):
context._CHART_ID = 0
context._CHART_ID += 1
return context._CHART_ID
def show_error(self, error_str):
ctx = {
"error_str": error_str,
"chart_id": 0,
"width": self._width,
"height": self._height,
}
return ChartNode.ERROR_TMPL % ctx
def render(self, context):
if not self._chart_fn:
return self.show_error("Unknown chart type: %s" % self._charttype)
chart_id = self._get_chart_id(context)
obj = Variable(self._args[0]).resolve(context)
metric_volumes = context.get("metric_volumes", False)
temperature_units = context.get("temperature_display_units", "f")
try:
chart_result = self._chart_fn(
obj, metric_volumes=metric_volumes, temperature_units=temperature_units
)
except charts.ChartError as e:
return self.show_error(str(e))
chart_base = {
"chart": {
"borderColor": "#eeeeff",
"borderWidth": 0,
"renderTo": "chart-%s-container" % chart_id,
},
"credits": {
"enabled": False,
},
"legend": {
"enabled": False,
},
"margin": [0, 0, 0, 0],
"title": {
"text": None,
},
"yAxis": {
"labels": {"align": "left"},
"title": {
"text": None,
},
},
}
chart_data = chart_base
for k, v in list(chart_result.items()):
if k not in chart_data:
chart_data[k] = v
elif isinstance(v, dict):
chart_data[k].update(v)
else:
chart_data[k] = v
chart_data = kbjson.dumps(chart_data, indent=None)
ctx = {
"chart_id": chart_id,
"width": self._width,
"height": self._height,
"chart_data": chart_data,
}
return ChartNode.CHART_TMPL % ctx
@register.filter
def volume(text, fmt="pints"):
try:
vol = units.Quantity(float(text))
except ValueError:
return text
if fmt == "pints":
res = vol.InPints()
elif fmt == "liters":
res = vol.InLiters()
elif fmt == "ounces":
res = vol.InOunces()
elif fmt == "gallons":
res = vol.InUSGallons()
elif fmt == "twelveounces":
res = vol.InTwelveOunceBeers()
elif fmt == "halfbarrels":
res = vol.InHalfBarrelKegs()
else:
raise TemplateSyntaxError("Unknown volume format: %s" % fmt)
return float(res)
| return '<a href="%s">%s</a>' % (
reverse("kb-drinker", args=[user.username]),
user.get_full_name(),
) | conditional_block |
kegweblib.py | from builtins import str
import pytz
from django.conf import settings
from django.template import (
Library,
Node,
TemplateSyntaxError,
Variable,
VariableDoesNotExist,
)
from django.template.defaultfilters import pluralize
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from pykeg.core import models
from pykeg.core.util import CtoF
from pykeg.util import kbjson, units
from pykeg.web.charts import charts
register = Library()
@register.inclusion_tag("kegweb/mugshot_box.html", takes_context=True)
def mugshot_box(context, user, boxsize=0):
return {
"user": user,
"boxsize": boxsize,
"guest_info": context.get("guest_info", None),
"STATIC_URL": context.get("STATIC_URL"),
}
| c = {}
if not hasattr(picture_or_pictures, "__iter__"):
c["gallery_pictures"] = [picture_or_pictures]
else:
c["gallery_pictures"] = picture_or_pictures
c["thumb_size"] = thumb_size
c["gallery_id"] = gallery_id
return c
@register.inclusion_tag("kegweb/badge.html")
def badge(amount, caption, style="", is_volume=False, do_pluralize=False):
if is_volume:
amount = mark_safe(VolumeNode.format(amount, "mL"))
if do_pluralize:
caption += pluralize(amount)
return {
"badge_amount": amount,
"badge_caption": caption,
"badge_style": style,
}
@register.inclusion_tag("kegweb/includes/progress_bar.html")
def progress_bar(progress_int, extra_css=""):
c = {}
try:
progress_int = max(int(progress_int), 0)
except ValueError:
progress_int = 0
progress_int = min(progress_int, 100)
c["progress_int"] = progress_int
c["extra_css"] = extra_css
if progress_int < 10:
bar_type = "bar-danger"
elif progress_int < 25:
bar_type = "bar-warning"
else:
bar_type = "bar-success"
c["bar_type"] = bar_type
return c
# navitem
@register.tag("navitem")
def navitem(parser, token):
"""{% navitem <viewname> <title> [exact] %}"""
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("%s requires at least 3 tokens" % tokens[0])
return NavitemNode(*tokens[1:])
class NavitemNode(Node):
def __init__(self, *args):
self._viewname = args[0]
self._title = args[1]
self._exact = "exact" in args[2:]
def render(self, context):
viewname = Variable(self._viewname).resolve(context)
title = Variable(self._title).resolve(context)
if viewname.startswith("/"):
urlbase = viewname
else:
urlbase = reverse(viewname)
request_path = context["request_path"]
if self._exact:
active = request_path == urlbase
else:
active = request_path.startswith(urlbase)
if active:
res = '<li class="active">'
else:
res = "<li>"
res += '<a href="%s">%s</a></li>' % (urlbase, title)
return res
# timeago
@register.tag("timeago")
def timeago(parser, token):
"""{% timeago <timestamp> %}"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError("%s requires 2 tokens" % tokens[0])
return TimeagoNode(tokens[1])
class TimeagoNode(Node):
def __init__(self, timestamp_varname):
self._timestamp_varname = timestamp_varname
def render(self, context):
tv = Variable(self._timestamp_varname)
ts = tv.resolve(context)
# Try to set time zone information.
if settings.TIME_ZONE and not settings.USE_TZ:
try:
tz = pytz.timezone(settings.TIME_ZONE)
ts = tz.localize(ts)
except pytz.UnknownTimeZoneError:
pass
iso = ts.isoformat()
alt = timezone.localtime(ts).strftime("%A, %B %d, %Y %I:%M%p")
return '<abbr class="timeago" title="%s">%s</abbr>' % (iso, alt)
# temperature
@register.tag("temperature")
def temperature_tag(parser, token):
"""{% temperature <temp_c> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return TemperatureNode(tokens[1])
class TemperatureNode(Node):
TEMPLATE = "%(amount)s° %(unit)s"
def __init__(self, varname):
self.varname = varname
def render(self, context):
v = Variable(self.varname)
try:
amount = v.resolve(context)
except (VariableDoesNotExist, ValueError):
raise
amount = "unknown"
unit = "C"
kbsite = models.KegbotSite.get()
if kbsite.temperature_display_units == "f":
unit = "F"
amount = CtoF(amount)
return self.TEMPLATE % {"amount": amount, "unit": unit}
# volume
@register.tag("volume")
def volumetag(parser, token):
"""{% volume <amount> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return VolumeNode(tokens[1], tokens[2:])
class VolumeNode(Node):
TEMPLATE = """
<span class="hmeasure %(extra_css)s" title="%(title)s">
<span class="num">%(amount)s</span>
<span class="unit">%(units)s</span>
</span>""".strip()
def __init__(self, volume_varname, extra_args):
self._volume_varname = volume_varname
self._extra_args = extra_args
def render(self, context):
tv = Variable(self._volume_varname)
try:
num = float(tv.resolve(context))
except (VariableDoesNotExist, ValueError):
num = "unknown"
unit = "mL"
make_badge = "badge" in self._extra_args
return self.format(num, unit, make_badge)
@classmethod
def format(cls, amount, units, make_badge=False):
if amount < 0:
amount = 0
ctx = {
"units": units,
"amount": amount,
"title": "%s %s" % (amount, units),
"extra_css": "badge " if make_badge else "",
}
return cls.TEMPLATE % ctx
# drinker
@register.tag("drinker_name")
def drinker_name_tag(parser, token):
"""{% drinker_name <drink_or_user_obj> [nolink] %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return DrinkerNameNode(tokens[1], tokens[2:])
class DrinkerNameNode(Node):
def __init__(self, drink_varname, extra_args):
self._varname = drink_varname
self._extra_args = extra_args
def render(self, context):
obj = Variable(self._varname)
try:
obj = obj.resolve(context)
except (VariableDoesNotExist, ValueError):
obj = None
user = None
if obj:
if isinstance(obj, models.Drink) or isinstance(obj, models.SystemEvent):
user = obj.user
elif isinstance(obj, models.User):
user = obj
if user:
if "nolink" in self._extra_args:
return user.get_full_name()
else:
return '<a href="%s">%s</a>' % (
reverse("kb-drinker", args=[user.username]),
user.get_full_name(),
)
return context["guest_info"]["name"]
# chart
@register.tag("chart")
def chart(parser, tokens):
"""{% chart <charttype> <obj> width height %}"""
tokens = tokens.contents.split()
if len(tokens) < 4:
raise TemplateSyntaxError("chart requires at least 4 arguments")
charttype = tokens[1]
try:
width = int(tokens[-2])
height = int(tokens[-1])
except ValueError:
raise TemplateSyntaxError("invalid width or height")
args = tokens[2:-2]
return ChartNode(charttype, width, height, args)
class ChartNode(Node):
CHART_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox"></div>
<script type="text/javascript">
var chart_%(chart_id)s;
$(document).ready(function() {
var chart_data = %(chart_data)s;
chart_%(chart_id)s = new Highcharts.Chart(chart_data);
});
</script>
<!-- end chart %(chart_id)s -->
"""
ERROR_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox-error">
%(error_str)s
</div>
<!-- end chart %(chart_id)s -->
"""
def __init__(self, charttype, width, height, args):
self._charttype = charttype
self._width = width
self._height = height
self._args = args
self._chart_fn = getattr(charts, "chart_%s" % (self._charttype,), None)
def _get_chart_id(self, context):
# TODO(mikey): Is there a better way to store _CHART_ID?
if not hasattr(context, "_CHART_ID"):
context._CHART_ID = 0
context._CHART_ID += 1
return context._CHART_ID
def show_error(self, error_str):
ctx = {
"error_str": error_str,
"chart_id": 0,
"width": self._width,
"height": self._height,
}
return ChartNode.ERROR_TMPL % ctx
def render(self, context):
if not self._chart_fn:
return self.show_error("Unknown chart type: %s" % self._charttype)
chart_id = self._get_chart_id(context)
obj = Variable(self._args[0]).resolve(context)
metric_volumes = context.get("metric_volumes", False)
temperature_units = context.get("temperature_display_units", "f")
try:
chart_result = self._chart_fn(
obj, metric_volumes=metric_volumes, temperature_units=temperature_units
)
except charts.ChartError as e:
return self.show_error(str(e))
chart_base = {
"chart": {
"borderColor": "#eeeeff",
"borderWidth": 0,
"renderTo": "chart-%s-container" % chart_id,
},
"credits": {
"enabled": False,
},
"legend": {
"enabled": False,
},
"margin": [0, 0, 0, 0],
"title": {
"text": None,
},
"yAxis": {
"labels": {"align": "left"},
"title": {
"text": None,
},
},
}
chart_data = chart_base
for k, v in list(chart_result.items()):
if k not in chart_data:
chart_data[k] = v
elif isinstance(v, dict):
chart_data[k].update(v)
else:
chart_data[k] = v
chart_data = kbjson.dumps(chart_data, indent=None)
ctx = {
"chart_id": chart_id,
"width": self._width,
"height": self._height,
"chart_data": chart_data,
}
return ChartNode.CHART_TMPL % ctx
@register.filter
def volume(text, fmt="pints"):
try:
vol = units.Quantity(float(text))
except ValueError:
return text
if fmt == "pints":
res = vol.InPints()
elif fmt == "liters":
res = vol.InLiters()
elif fmt == "ounces":
res = vol.InOunces()
elif fmt == "gallons":
res = vol.InUSGallons()
elif fmt == "twelveounces":
res = vol.InTwelveOunceBeers()
elif fmt == "halfbarrels":
res = vol.InHalfBarrelKegs()
else:
raise TemplateSyntaxError("Unknown volume format: %s" % fmt)
return float(res) | @register.inclusion_tag("kegweb/picture-gallery.html")
def gallery(picture_or_pictures, thumb_size="span2", gallery_id=""): | random_line_split |
ddpg-per.py | import tensorflow as tf
import numpy as np
import gym
import os
##################### hyper parameters ####################
# Hyper Parameters
ENV_NAME = 'Pendulum-v0'
EPISODE = 200
STEP = 200
TEST = 5
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
MEMORY_SIZE = 10000
steps = []
episodes = []
with_noise = False # True = with_noise; False = without_noise
RENDER = False
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
REPLACE_TARGET_FREQ = 2 # Update frequency of the target network
class OU_noise(object):
def __init__(self, num_actions, action_low_bound= -2, action_high_bound= 2, dt= 0.001,
mu= 0.0, theta= 0.15, max_sigma= 0.2, min_sigma= 0.1):
self.mu = mu # 0.0
self.theta = theta # 0.15
self.sigma = max_sigma # 0.3
self.max_sigma = max_sigma # 0.3
self.min_sigma = min_sigma # 0.1
self.dt = dt # 0.001
self.num_actions = num_actions # 1
self.action_low = action_low_bound # -2
self.action_high = action_high_bound # 2
self.reset()
def reset(self):
self.state = np.zeros(self.num_actions)
# self.state = np.zeros(self.num_actions)
def state_update(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.num_actions) # np.random.randn()生成0,1的随机数
self.state = x + dx
def add_noise(self, action):
self.state_update()
state = self.state
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, self.dt)
return np.clip(action + state, self.action_low, self.action_high)
class SumTree(object):
data_pointer = 0
def __init__(self, capacity):
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = list(np.zeros(capacity, dtype=object)) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, transition):
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = transition # update data_frame
self.update(tree_idx, p) # update tree_frame
self.b = np.array(self.data)
self.data_pointer += 1
if self.data_pointer >= self.capacity: # replace when exceed the capacity
self.data_pointer = 0
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
epsilon = 0.001 # small amount to avoid zero priority
alpha = 0.5 # [0~1] convert the importance of TD error to priority
beta = 0.5 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.01
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTree(capacity)
self.full_flag = False
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), \
np.empty((n, self.tree.data[0].size)), \
np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
if min_prob == 0:
min_prob = 0.00001
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob/min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
###############################DDPG####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound, train_dir="./ddpg_models", batch_size=32, MEMORY_SIZE=10000):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = Memory(capacity=MEMORY_SIZE)
self.pointer = 0
self.per_batch_size = batch_size
self.learn_step = 0
self.explore_noise = OU_noise(self.a_dim)
self.sess = tf.Session()
self.train_dir = train_dir
if not os.path.isdir(self.train_dir):
os.mkdir(self.train_dir)
self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')
self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params,
self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
# td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.abs_errors = tf.reduce_sum(tf.abs(q_target - q), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(q_target, q))
self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.loss, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s, with_noise):
action = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
if with_noise:
noise = self.explore_noise.add_noise(action)
action = action + noise
return action
def learn(self, actor_lr_input, critic_lr_input, per_flag=True):
if per_flag:
tree_idx, batch_memory, ISWeights = self.memory.sample(self.per_batch_size) # sample for learning
batch_states = batch_memory[:,0:3]
batch_actions = batch_memory[:,3:4]
batch_rewards = [data[4] for data in batch_memory]
batch_states_ = batch_memory[:,5:8]
bs = np.array(batch_states)
ba = np.array(batch_actions)
br = np.array(batch_rewards)
bs_ = np.array(batch_states_)
br = br[:, np.newaxis] # Move the original (n,) to the row and add a new column
self.sess.run(self.atrain, {self.S: bs, self.actor_lr: actor_lr_input})
_, abs_errors, cost = self.sess.run([self.ctrain, self.abs_errors, self.loss],
{self.S: bs, self.a: ba, self.R: br, self.S_: bs_,
self.critic_lr: critic_lr_input,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
self.learn_step += 1
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, r, s_))
self.memory.store(transition)
self.pointer += 1
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)
# new_actor_layer = tf.layers.dense(net, 20, activation=tf.nn.relu, name='new_actor_layer', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, scope, trainable):
with tf.vari | date_target_q_network(self, episode):
# update target Q netowrk by soft_replace
if episode % REPLACE_TARGET_FREQ == 0:
self.sess.run(self.soft_replace)
# print('episode '+str(episode) +', target Q network params replaced!')
def load_network(self, saver, load_path):
checkpoint = tf.train.get_checkpoint_state(load_path)
if checkpoint and checkpoint.model_checkpoint_path:
# self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
saver.restore(self.sess, tf.train.latest_checkpoint(load_path))
print("Successfully loaded:", checkpoint.model_checkpoint_path)
self.learn_step = int(checkpoint.model_checkpoint_path.split('-')[-1])
else:
print("Could not find old network weights")
def save_network(self, time_step, saver, save_path):
saver.save(self.sess, save_path + 'network', global_step=time_step,
write_meta_graph=False)
############################### training ####################################
def main():
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
agent = DDPG(a_dim, s_dim, a_bound)
total_steps = 0
var = 3
for episode in range(EPISODE):
state = env.reset()
ep_reward = 0
# train
for step in range(STEP):
if RENDER:
env.render()
action = agent.choose_action(state, with_noise)
action = np.clip(np.random.normal(action, var), -2, 2)
next_state, reward, done, _ = env.step(action)
agent.store_transition(state,action,reward/10,next_state)
if agent.pointer > MEMORY_SIZE:
var *= .9995 # decay the action randomness
if episode >= 50:
yy = 0
agent.learn(LR_A, LR_C, per_flag=True)
state = next_state
ep_reward += reward
total_steps += 1
if done:
print('episode ', episode, ' finished')
steps.append(total_steps)
episodes.append(episode)
break
if episode % 1 == 0:
if step == STEP - 1:
print('Episode:', episode, ' Reward: %i' % int(ep_reward))
break
# Test every 100 episodes
if episode != 0 and episode % 100 ==0:
total_reward = 0
for i in range(TEST):
state = env.reset()
for j in range(STEP):
if RENDER:
env.render()
action = agent.choose_action(state, False)
state,reward,done,_ = env.step(action)
total_reward += reward
if done:
break
ave_reward = total_reward / TEST
print('episode: ',episode,'Evaluation Average Reward:',ave_reward)
agent.update_target_q_network(episode)
if __name__ == '__main__':
main() | able_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
# new_critic_layer = tf.layers.dense(net, 300, activation=tf.nn.relu, name='new_critic_layer',
# trainable=trainable)
return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)
def up | identifier_body |
ddpg-per.py | import tensorflow as tf
import numpy as np
import gym
import os
##################### hyper parameters ####################
# Hyper Parameters
ENV_NAME = 'Pendulum-v0'
EPISODE = 200
STEP = 200
TEST = 5
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
MEMORY_SIZE = 10000
steps = []
episodes = []
with_noise = False # True = with_noise; False = without_noise
RENDER = False
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
REPLACE_TARGET_FREQ = 2 # Update frequency of the target network
class OU_noise(object):
def __init__(self, num_actions, action_low_bound= -2, action_high_bound= 2, dt= 0.001,
mu= 0.0, theta= 0.15, max_sigma= 0.2, min_sigma= 0.1):
self.mu = mu # 0.0
self.theta = theta # 0.15
self.sigma = max_sigma # 0.3
self.max_sigma = max_sigma # 0.3
self.min_sigma = min_sigma # 0.1
self.dt = dt # 0.001
self.num_actions = num_actions # 1
self.action_low = action_low_bound # -2
self.action_high = action_high_bound # 2
self.reset()
def reset(self):
self.state = np.zeros(self.num_actions)
# self.state = np.zeros(self.num_actions)
def state_update(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.num_actions) # np.random.randn()生成0,1的随机数
self.state = x + dx
def add_noise(self, action):
self.state_update()
state = self.state
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, self.dt)
return np.clip(action + state, self.action_low, self.action_high)
class SumTree(object):
data_pointer = 0
def __init__(self, capacity):
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = list(np.zeros(capacity, dtype=object)) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, transition):
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = transition # update data_frame
self.update(tree_idx, p) # update tree_frame
self.b = np.array(self.data)
self.data_pointer += 1
if self.data_pointer >= self.capacity: # replace when exceed the capacity
self.data_pointer = 0
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
epsilon = 0.001 # small amount to avoid zero priority
alpha = 0.5 # [0~1] convert the importance of TD error to priority
beta = 0.5 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.01
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTree(capacity)
self.full_flag = False
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), \
np.empty((n, self.tree.data[0].size)), \
np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
if min_prob == 0:
min_prob = 0.00001
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob/min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
###############################DDPG####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound, train_dir="./ddpg_models", batch_size=32, MEMORY_SIZE=10000):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = Memory(capacity=MEMORY_SIZE)
self.pointer = 0
self.per_batch_size = batch_size
self.learn_step = 0
self.explore_noise = OU_noise(self.a_dim)
self.sess = tf.Session()
self.train_dir = train_dir
if not os.path.isdir(self.train_dir):
os.mkdir(self.train_dir)
self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')
self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params,
self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
# td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.abs_errors = tf.reduce_sum(tf.abs(q_target - q), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(q_target, q))
self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.loss, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s, with_noise):
action = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
if with_noise:
noise = self.explore_noise.add_noise(action)
action = action + noise
return action
def learn(self, actor_lr_input, critic_lr_input, per_flag=True):
if per_flag:
tree_idx, batch_memory, ISWeights = self.memory.sample(self.per_batch_size) # sample for learning
batch_states = batch_memory[:,0:3]
batch_actions = batch_memory[:,3:4]
batch_rewards = [data[4] for data in batch_memory]
batch_states_ = batch_memory[:,5:8]
bs = np.array(batch_states)
ba = np.array(batch_actions)
br = np.array(batch_rewards)
bs_ = np.array(batch_states_)
br = br[:, np.newaxis] # Move the original (n,) to the row and add a new column
self.sess.run(self.atrain, {self.S: bs, self.actor_lr: actor_lr_input})
_, abs_errors, cost = self.sess.run([self.ctrain, self.abs_errors, self.loss],
{self.S: bs, self.a: ba, self.R: br, self.S_: bs_,
self.critic_lr: critic_lr_input,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
self.learn_step += 1
def store_transi | r, s_):
transition = np.hstack((s, a, r, s_))
self.memory.store(transition)
self.pointer += 1
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)
# new_actor_layer = tf.layers.dense(net, 20, activation=tf.nn.relu, name='new_actor_layer', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
# new_critic_layer = tf.layers.dense(net, 300, activation=tf.nn.relu, name='new_critic_layer',
# trainable=trainable)
return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)
def update_target_q_network(self, episode):
# update target Q netowrk by soft_replace
if episode % REPLACE_TARGET_FREQ == 0:
self.sess.run(self.soft_replace)
# print('episode '+str(episode) +', target Q network params replaced!')
def load_network(self, saver, load_path):
checkpoint = tf.train.get_checkpoint_state(load_path)
if checkpoint and checkpoint.model_checkpoint_path:
# self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
saver.restore(self.sess, tf.train.latest_checkpoint(load_path))
print("Successfully loaded:", checkpoint.model_checkpoint_path)
self.learn_step = int(checkpoint.model_checkpoint_path.split('-')[-1])
else:
print("Could not find old network weights")
def save_network(self, time_step, saver, save_path):
saver.save(self.sess, save_path + 'network', global_step=time_step,
write_meta_graph=False)
############################### training ####################################
def main():
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
agent = DDPG(a_dim, s_dim, a_bound)
total_steps = 0
var = 3
for episode in range(EPISODE):
state = env.reset()
ep_reward = 0
# train
for step in range(STEP):
if RENDER:
env.render()
action = agent.choose_action(state, with_noise)
action = np.clip(np.random.normal(action, var), -2, 2)
next_state, reward, done, _ = env.step(action)
agent.store_transition(state,action,reward/10,next_state)
if agent.pointer > MEMORY_SIZE:
var *= .9995 # decay the action randomness
if episode >= 50:
yy = 0
agent.learn(LR_A, LR_C, per_flag=True)
state = next_state
ep_reward += reward
total_steps += 1
if done:
print('episode ', episode, ' finished')
steps.append(total_steps)
episodes.append(episode)
break
if episode % 1 == 0:
if step == STEP - 1:
print('Episode:', episode, ' Reward: %i' % int(ep_reward))
break
# Test every 100 episodes
if episode != 0 and episode % 100 ==0:
total_reward = 0
for i in range(TEST):
state = env.reset()
for j in range(STEP):
if RENDER:
env.render()
action = agent.choose_action(state, False)
state,reward,done,_ = env.step(action)
total_reward += reward
if done:
break
ave_reward = total_reward / TEST
print('episode: ',episode,'Evaluation Average Reward:',ave_reward)
agent.update_target_q_network(episode)
if __name__ == '__main__':
main() | tion(self, s, a, | identifier_name |
ddpg-per.py | import tensorflow as tf
import numpy as np
import gym
import os
##################### hyper parameters ####################
# Hyper Parameters
ENV_NAME = 'Pendulum-v0'
EPISODE = 200
STEP = 200
TEST = 5
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
MEMORY_SIZE = 10000
steps = []
episodes = []
with_noise = False # True = with_noise; False = without_noise
RENDER = False
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
REPLACE_TARGET_FREQ = 2 # Update frequency of the target network
class OU_noise(object):
def __init__(self, num_actions, action_low_bound= -2, action_high_bound= 2, dt= 0.001,
mu= 0.0, theta= 0.15, max_sigma= 0.2, min_sigma= 0.1):
self.mu = mu # 0.0
self.theta = theta # 0.15
self.sigma = max_sigma # 0.3
self.max_sigma = max_sigma # 0.3
self.min_sigma = min_sigma # 0.1
self.dt = dt # 0.001
self.num_actions = num_actions # 1
self.action_low = action_low_bound # -2
self.action_high = action_high_bound # 2
self.reset()
def reset(self):
self.state = np.zeros(self.num_actions)
# self.state = np.zeros(self.num_actions)
def state_update(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.num_actions) # np.random.randn()生成0,1的随机数
self.state = x + dx
def add_noise(self, action):
self.state_update()
state = self.state
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, self.dt)
return np.clip(action + state, self.action_low, self.action_high)
class SumTree(object):
data_pointer = 0
def __init__(self, capacity):
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = list(np.zeros(capacity, dtype=object)) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, transition):
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = transition # update data_frame
self.update(tree_idx, p) # update tree_frame
self.b = np.array(self.data)
self.data_pointer += 1
if self.data_pointer >= self.capacity: # replace when exceed the capacity
self.data_pointer = 0
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
epsilon = 0.001 # small amount to avoid zero priority
alpha = 0.5 # [0~1] convert the importance of TD error to priority
beta = 0.5 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.01
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTree(capacity)
self.full_flag = False
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), \
np.empty((n, self.tree.data[0].size)), \
np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
if min_prob == 0:
min_prob = 0.00001
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob/min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
###############################DDPG####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound, train_dir="./ddpg_models", batch_size=32, MEMORY_SIZE=10000):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = Memory(capacity=MEMORY_SIZE)
self.pointer = 0
self.per_batch_size = batch_size
self.learn_step = 0
self.explore_noise = OU_noise(self.a_dim)
self.sess = tf.Session()
self.train_dir = train_dir
if not os.path.isdir(self.train_dir):
os.mkdir(self.train_dir)
self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')
self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params,
self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
# td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.abs_errors = tf.reduce_sum(tf.abs(q_target - q), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(q_target, q))
self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.loss, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s, with_noise):
action = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
if with_noise:
noise = self.explore_noise.add_noise(action)
action = action + noise
return action
def learn(self, actor_lr_input, critic_lr_input, per_flag=True):
if per_flag:
tree_idx, batch_memory, ISWeights = self.memory.sample(self.per_batch_size) # sample for learning
batch_states = batch_memory[:,0:3]
batch_actions = batch_memory[:,3:4]
batch_rewards = [data[4] for data in batch_memory]
batch_states_ = batch_memory[:,5:8]
bs = np.array(batch_states)
ba = np.array(batch_actions)
br = np.array(batch_rewards)
bs_ = np.array(batch_states_)
br = br[:, np.newaxis] # Move the original (n,) to the row and add a new column
self.sess.run(self.atrain, {self.S: bs, self.actor_lr: actor_lr_input})
_, abs_errors, cost = self.sess.run([self.ctrain, self.abs_errors, self.loss],
{self.S: bs, self.a: ba, self.R: br, self.S_: bs_,
self.critic_lr: critic_lr_input,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
self.learn_step += 1
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, r, s_))
self.memory.store(transition)
self.pointer += 1
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)
# new_actor_layer = tf.layers.dense(net, 20, activation=tf.nn.relu, name='new_actor_layer', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
# new_critic_layer = tf.layers.dense(net, 300, activation=tf.nn.relu, name='new_critic_layer',
# trainable=trainable)
return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)
def update_target_q_network(self, episode):
# update target Q netowrk by soft_replace
if episode % REPLACE_TARGET_FREQ == 0:
self.sess.run(self.soft_replace)
# print('episode '+str(episode) +', target Q network params replaced!')
def load_network(self, saver, load_path):
checkpoint = tf.train.get_checkpoint_state(load_path)
if checkpoint and checkpoint.model_checkpoint_path:
# self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
saver.restore(self.sess, tf.train.latest_checkpoint(load_path))
print("Successfully loaded:", checkpoint.model_checkpoint_path)
self.learn_step = int(checkpoint.model_checkpoint_path.split('-')[-1])
else:
print("Could not find old network weights")
def save_network(self, time_step, saver, save_path):
saver.save(self.sess, save_path + 'network', global_step=time_step,
write_meta_graph=False)
############################### training ####################################
def main():
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high | for episode in range(EPISODE):
state = env.reset()
ep_reward = 0
# train
for step in range(STEP):
if RENDER:
env.render()
action = agent.choose_action(state, with_noise)
action = np.clip(np.random.normal(action, var), -2, 2)
next_state, reward, done, _ = env.step(action)
agent.store_transition(state,action,reward/10,next_state)
if agent.pointer > MEMORY_SIZE:
var *= .9995 # decay the action randomness
if episode >= 50:
yy = 0
agent.learn(LR_A, LR_C, per_flag=True)
state = next_state
ep_reward += reward
total_steps += 1
if done:
print('episode ', episode, ' finished')
steps.append(total_steps)
episodes.append(episode)
break
if episode % 1 == 0:
if step == STEP - 1:
print('Episode:', episode, ' Reward: %i' % int(ep_reward))
break
# Test every 100 episodes
if episode != 0 and episode % 100 ==0:
total_reward = 0
for i in range(TEST):
state = env.reset()
for j in range(STEP):
if RENDER:
env.render()
action = agent.choose_action(state, False)
state,reward,done,_ = env.step(action)
total_reward += reward
if done:
break
ave_reward = total_reward / TEST
print('episode: ',episode,'Evaluation Average Reward:',ave_reward)
agent.update_target_q_network(episode)
if __name__ == '__main__':
main() |
agent = DDPG(a_dim, s_dim, a_bound)
total_steps = 0
var = 3 | random_line_split |
ddpg-per.py | import tensorflow as tf
import numpy as np
import gym
import os
##################### hyper parameters ####################
# Hyper Parameters
ENV_NAME = 'Pendulum-v0'
EPISODE = 200
STEP = 200
TEST = 5
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
MEMORY_SIZE = 10000
steps = []
episodes = []
with_noise = False # True = with_noise; False = without_noise
RENDER = False
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
REPLACE_TARGET_FREQ = 2 # Update frequency of the target network
class OU_noise(object):
def __init__(self, num_actions, action_low_bound= -2, action_high_bound= 2, dt= 0.001,
mu= 0.0, theta= 0.15, max_sigma= 0.2, min_sigma= 0.1):
self.mu = mu # 0.0
self.theta = theta # 0.15
self.sigma = max_sigma # 0.3
self.max_sigma = max_sigma # 0.3
self.min_sigma = min_sigma # 0.1
self.dt = dt # 0.001
self.num_actions = num_actions # 1
self.action_low = action_low_bound # -2
self.action_high = action_high_bound # 2
self.reset()
def reset(self):
self.state = np.zeros(self.num_actions)
# self.state = np.zeros(self.num_actions)
def state_update(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.num_actions) # np.random.randn()生成0,1的随机数
self.state = x + dx
def add_noise(self, action):
self.state_update()
state = self.state
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, self.dt)
return np.clip(action + state, self.action_low, self.action_high)
class SumTree(object):
data_pointer = 0
def __init__(self, capacity):
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = list(np.zeros(capacity, dtype=object)) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, transition):
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = transition # update data_frame
self.update(tree_idx, p) # update tree_frame
self.b = np.array(self.data)
self.data_pointer += 1
if self.data_pointer >= self.capacity: # replace when exceed the capacity
self.data_pointer = 0
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
epsilon = 0.001 # small amount to avoid zero priority
alpha = 0.5 # [0~1] convert the importance of TD error to priority
beta = 0.5 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.01
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTree(capacity)
self.full_flag = False
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), \
np.empty((n, self.tree.data[0].size)), \
np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
if min_prob == 0:
min_prob = 0.00001
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob/min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
###############################DDPG####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound, train_dir="./ddpg_models", batch_size=32, MEMORY_SIZE=10000):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = Memory(capacity=MEMORY_SIZE)
self.pointer = 0
self.per_batch_size = batch_size
self.learn_step = 0
self.explore_noise = OU_noise(self.a_dim)
self.sess = tf.Session()
self.train_dir = train_dir
if not os.path.isdir(self.train_dir):
os.mkdir(self.train_dir)
self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')
self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params,
self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
# td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.abs_errors = tf.reduce_sum(tf.abs(q_target - q), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(q_target, q))
self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.loss, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s, with_noise):
action = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
if with_noise:
noise = self.explore_noise.add_noise(action)
action = action + noise
return action
def learn(self, actor_lr_input, critic_lr_input, per_flag=True):
if per_flag:
tree_idx, ba | lf.learn_step += 1
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, r, s_))
self.memory.store(transition)
self.pointer += 1
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)
# new_actor_layer = tf.layers.dense(net, 20, activation=tf.nn.relu, name='new_actor_layer', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
# new_critic_layer = tf.layers.dense(net, 300, activation=tf.nn.relu, name='new_critic_layer',
# trainable=trainable)
return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)
def update_target_q_network(self, episode):
# update target Q netowrk by soft_replace
if episode % REPLACE_TARGET_FREQ == 0:
self.sess.run(self.soft_replace)
# print('episode '+str(episode) +', target Q network params replaced!')
def load_network(self, saver, load_path):
checkpoint = tf.train.get_checkpoint_state(load_path)
if checkpoint and checkpoint.model_checkpoint_path:
# self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
saver.restore(self.sess, tf.train.latest_checkpoint(load_path))
print("Successfully loaded:", checkpoint.model_checkpoint_path)
self.learn_step = int(checkpoint.model_checkpoint_path.split('-')[-1])
else:
print("Could not find old network weights")
def save_network(self, time_step, saver, save_path):
saver.save(self.sess, save_path + 'network', global_step=time_step,
write_meta_graph=False)
############################### training ####################################
def main():
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
agent = DDPG(a_dim, s_dim, a_bound)
total_steps = 0
var = 3
for episode in range(EPISODE):
state = env.reset()
ep_reward = 0
# train
for step in range(STEP):
if RENDER:
env.render()
action = agent.choose_action(state, with_noise)
action = np.clip(np.random.normal(action, var), -2, 2)
next_state, reward, done, _ = env.step(action)
agent.store_transition(state,action,reward/10,next_state)
if agent.pointer > MEMORY_SIZE:
var *= .9995 # decay the action randomness
if episode >= 50:
yy = 0
agent.learn(LR_A, LR_C, per_flag=True)
state = next_state
ep_reward += reward
total_steps += 1
if done:
print('episode ', episode, ' finished')
steps.append(total_steps)
episodes.append(episode)
break
if episode % 1 == 0:
if step == STEP - 1:
print('Episode:', episode, ' Reward: %i' % int(ep_reward))
break
# Test every 100 episodes
if episode != 0 and episode % 100 ==0:
total_reward = 0
for i in range(TEST):
state = env.reset()
for j in range(STEP):
if RENDER:
env.render()
action = agent.choose_action(state, False)
state,reward,done,_ = env.step(action)
total_reward += reward
if done:
break
ave_reward = total_reward / TEST
print('episode: ',episode,'Evaluation Average Reward:',ave_reward)
agent.update_target_q_network(episode)
if __name__ == '__main__':
main() | tch_memory, ISWeights = self.memory.sample(self.per_batch_size) # sample for learning
batch_states = batch_memory[:,0:3]
batch_actions = batch_memory[:,3:4]
batch_rewards = [data[4] for data in batch_memory]
batch_states_ = batch_memory[:,5:8]
bs = np.array(batch_states)
ba = np.array(batch_actions)
br = np.array(batch_rewards)
bs_ = np.array(batch_states_)
br = br[:, np.newaxis] # Move the original (n,) to the row and add a new column
self.sess.run(self.atrain, {self.S: bs, self.actor_lr: actor_lr_input})
_, abs_errors, cost = self.sess.run([self.ctrain, self.abs_errors, self.loss],
{self.S: bs, self.a: ba, self.R: br, self.S_: bs_,
self.critic_lr: critic_lr_input,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
se | conditional_block |
bot.js | (function () {
"use strict";
const fs = require("fs");
const request = require("request");
require("/code/keys/load-keys.js")();
const Utils = require("/code/global-modules/utils.js");
let CytubeConstructor = require("./my-modules/cytube.js")(Utils);
const DiscordConstructor = require("./my-modules/discord.js");
let CONFIG = JSON.parse(fs.readFileSync("config.json"));
const LINK_REGEX = /(https?:\/\/(?:www\.|(?!www))[ @#a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]\.[^\s]{2,})/gi;
const WHISPERS_ONLY = Symbol.for("WHISPERS_ONLY");
const WHISPERS_AND_CHATS = Symbol.for("WHISPERS_AND_CHATS");
const NO_COOLDOWN_TRIGGERED = Symbol.for("NO_COOLDOWN");
const IRC = require("coffea");
let client = IRC({
host: CONFIG.CHAT_SERVER_URL,
port: CONFIG.CHAT_SERVER_PORT,
ssl: false,
nick: process.env.TWITCH_USERNAME,
username: process.env.TWITCH_USERNAME,
pass: process.env.TWITCH_OAUTH,
throttling: CONFIG.CLIENT_DEFAULT_THROTTLE,
prefix: CONFIG.COMMAND_PREFIX
});
client.CFG = CONFIG;
CONFIG = null;
client.DISCORD_LINK_COOLDOWNS = {};
client.USER_BAN_TIMERS = {};
client.USER_AFK_DATA = {};
client.USER_COOLDOWNS = {};
client.GLOBAL_COOLDOWNS = {};
client.BAN_EVASION_FLAGS = {};
client.TIMEOUT_POOL = {};
client.PERMABAN_POOL = {};
client.TIMEOUT_TIMER = {};
client.PERMABAN_TIMER = {};
client.HARD_RESET_TIMESTAMP = Number(process.argv[2]) || Date.now();
client.SOFT_RESET_TIMESTAMP = Date.now();
client.HYDRATION_MESSAGE = {};
client.latestTriviaAnswer = null;
let COMMANDS = require("./my-modules/commands.js")(Utils, client, fs);
let SUPINIC_CHANNEL = require("./my-modules/supinic.js")(client);
const DEBUG = COMMANDS.find(i => i.name === "__debug__");
const RESTART = COMMANDS.find(i => i.name === "restart");
const AFK = COMMANDS.find(i => i.name === "afk");
const checkAFK = async (user, chan, evt) => {
if (!client.USER_AFK_DATA[user]) {
return;
}
const data = client.USER_AFK_DATA[user];
if (chan && client.CFG.PAJLADIFIED_CHANNELS.has(chan)) {
evt.reply = pajladify(user, chan, 150, {name: "afk"}).bind(evt);
}
(!data.silent) && setTimeout(() => {
// console.log(user + " is no longer AFK: " + data.text + " (" + Utils.ago(data.date) + ")");
const ago = (new Date(data.date).valueOf() === 0) ? "unknown time" : Utils.ago(data.date);
evt.reply(user + " is no longer AFK: " + data.text + " (" + ago + ")");
}, 1000);
COMMANDS.unsetAFK(data.id).then(() => { client.USER_AFK_DATA[user] = null; });
};
const pajladify = (user, chan, msgLimit, command = {}) => (async function (msg) {
client.BAN_EVASION_FLAGS[chan] = !client.BAN_EVASION_FLAGS[chan];
msg += " " + (client.BAN_EVASION_FLAGS[chan] ? client.CFG.BAN_EVASION_CHARACTER : "");
if (client.CFG.NO_LINK_CHANNELS.has(chan)) {
msg = msg.replace(LINK_REGEX, "[LINK]"); // replace all links with some text
}
const now = new Date();
if (chan === "#forsen") {
msg = msg
.replace(/[ČĘÊÏÑŇŚ]/ig, char => char.normalize("NFD").replace(/[\u0300-\u036f]/g, "")) // replaces all "dangerous" local characters with safe ones
.replace(/poggers/ig, "[POOGERS]") // replace all poggers with something more palatable
.replace(/(PogU)/g, "Pog U") // replace all PogU with Pog U - for better readability
.replace(/twitch\.tv/gi, "[LINK]");
if (now.getDate() === 8 && now.getMonth() === 1 && now.getFullYear() === 2019) {
msg = msg.toLowerCase();
}
}
else if (chan === "#nymn") {
msg = msg
.replace(/https?:\/\/t\.co/g, "twitter") // replace twitter short links
.replace(/\u05E4/g, "9"); // replace "dangerous" hebrew characters
}
let finalMessage = "";
try {
const userBanned = await Utils.pajladaCheck(user, chan, client.CFG);
let fixedUser = (!userBanned) ? user : "[illegal name]";
let ping = "";
if (client.CFG.PING_CHANNELS.has(chan) && !client.CFG.PING_EXCLUDED_COMMANDS.has(command.name)) {
ping = fixedUser + ", ";
}
msg = Utils.safeWrap(ping + msg, msgLimit);
const msgBanned = await Utils.pajladaCheck(msg, chan, client.CFG);
(msgBanned) && console.log("COMMAND REQUEST FAILED - BANNED PHRASE", msg, msgBanned);
finalMessage = (!msgBanned) ? msg : msgBanned.reply;
}
catch (e) {
console.log("COMMAND REQUEST FAILED - NO API AVAILABLE");
console.log(e);
finalMessage = "Cannot comply, pajbot API failed to reply monkaS";
}
if (now.getDate() === 8 && now.getMonth() === 1) {
if (chan === "#nymn") finalMessage = finalMessage.toUpperCase();
if (chan === "#forsen") finalMessage = finalMessage.toLowerCase();
}
this._reply("send", finalMessage);
});
client.checkAFK = checkAFK;
client.restartFn = (msg) => setTimeout(() => RESTART(null, null, null, msg), 5000);
client.reloadModule = (type, evt) => {
switch (type) {
case "commands":
COMMANDS.destroy();
COMMANDS = null;
delete require.cache[require.resolve("./my-modules/commands.js")];
COMMANDS = require("./my-modules/commands.js")(Utils, client, fs);
client.CytubeClient.commands = COMMANDS;
client.DiscordClient.commands = COMMANDS;
break;
case "cytube":
client.CytubeClient.destroy();
client.CytubeClient = null;
delete require.cache[require.resolve("./my-modules/cytube.js")];
CytubeConstructor = require("./my-modules/cytube.js")(Utils);
client.CytubeClient = new CytubeConstructor(client, COMMANDS);
break;
default: return false;
}
evt.reply("Done.");
return true;
};
client.DiscordClient = new DiscordConstructor(client, Utils, process.env.DISCORD_BOT_TOKEN, client.CFG.DISCORD_LINK, COMMANDS);
client.CytubeClient = new CytubeConstructor(client, COMMANDS);
client.on("motd", () => {
client.join(client.CFG.JOIN_CHANNELS);
client.capReq(":twitch.tv/tags twitch.tv/commands twitch.tv/membership");
console.log("Bot: Ready!");
client.send("#supibot", "@Supinic I'm back MrDestructoid");
});
client.on("message", (evt) => {
const user = evt.user.getNick().toLowerCase();
const chan = evt.channel.getName();
const msg = evt.message;
const now = Date.now();
if (chan === "#supinic") {
SUPINIC_CHANNEL.message(
user,
msg,
client.CFG.USER_LEVELS[user] <= -1e6,
client.CFG.USER_LEVELS[user] < 1e6 && (client.GLOBAL_COOLDOWNS[chan] && now <= client.GLOBAL_COOLDOWNS[chan])
);
}
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
return;
}
// Declare AFK people as non AFK - silently, if necessary
checkAFK(user, chan, evt);
// If it's a stealth channel, skip everything
if (client.CFG.STEALTH_CHANNELS.indexOf(chan) !== -1) {
return;
}
// Mirror messages to discord, if it's a linked channel
if (chan === client.CFG.CHAN.CEREBOT && client.CFG.DISCORD_LINK_ENABLED) {
client.DiscordClient && client.DiscordClient.send(user, msg, chan, evt.tags);
}
// Return if global cooldown did not pass. Does not apply to supermods
if (client.CFG.USER_LEVELS[user] < 1e6 && (client.GLOBAL_COOLDOWNS[chan] && now <= client.GLOBAL_COOLDOWNS[chan])) {
return;
}
if (msg.indexOf("$debug") === 0 && client.CFG.USER_LEVELS[user] >= DEBUG.level) {
DEBUG.exec(user, msg.split("$debug")[1].split(" "), evt);
}
else if (msg === "bot" || msg.indexOf("!afk") === 0) {
let silent = false;
if (msg === "bot") {
evt.reply("smol bot made by @supinic supiniL my commands start with $ - try $help for a list of commands");
}
else if (msg.indexOf("!afk") === 0) {
silent = true;
AFK.exec(user, msg.split(" ").splice(1), evt, true);
}
if (!silent) {
client.GLOBAL_COOLDOWNS[chan] = client.GLOBAL_COOLDOWNS[chan] || 0;
client.GLOBAL_COOLDOWNS[chan] = now + (client.CFG.CHANNEL_GLOBAL_COOLDOWNS[chan] || client.CFG.DEFAULT_GLOBAL_COOLDOWN);
}
}
else if (chan === "#forsen" && (user === "forsenai" || user === "snusbot")) {
if (
(user === "forsenai" && msg.indexOf("forsenThink") !== -1) ||
(user === "snusbot" && msg.indexOf("question/hint/clue") !== -1)
) {
let query = msg
.replace(" forsenThink", "")
.replace(/.*clue is(.*)" OMGScoots(.*)/, "$1")
.replace(/ /g, "+");
let url = "http://www.j-archive.com/search.php?submit=Search&search=" + query;
request(url, (err, data, body) => {
let parsedData = body.match(/class="search_correct_response">(.*?)<\/span>/);
let answer = (parsedData && Utils.removeHTML(parsedData[1])) || null;
if (answer) {
client.latestTriviaAnswer = answer;
client.AUTO_TRIVIA && evt.reply(answer);
// console.log("[" + new Date().simpleDateTime() + "] Answer: ", answer);
}
else {
client.latestTriviaAnswer = "eShrug idk kev";
// console.log("idk");
}
});
}
}
else if (chan === "#forsen" && user === "gazatu2" && msg.has("question:")) {
const question = (msg.match(/question: (.*)/) || [])[1];
COMMANDS.autoGazatu(question)
.then(answer => {
if (client.AUTO_GAZ && answer) {
evt.reply(answer);
}
// console.log(`GAZATU TRIVIA [${answer || "<no answer found>"}] <- ${question}`);
})
.catch(err => console.log("[GAZATU TRIVIA ERROR] ", err));
}
});
client.on("command", (evt) => {
if (!evt.channel) {
console.log("An event with no channel?", evt);
return;
}
const args = (evt.args || []).map(i => i.replace(new RegExp(client.CFG.BAN_EVASION_CHARACTER, "g"), "").trim());
const cmd = evt.cmd.toLowerCase(); // @todo remove this, it is just temporary
const chan = evt.channel.getName().toLowerCase();
const user = evt.user.getNick().toLowerCase();
const now = Date.now();
client.CFG.USER_LEVELS[user] = client.CFG.USER_LEVELS[user] || 1;
const command = COMMANDS.find(i =>
cmd === i.name.toLowerCase() || (Array.isArray(i.aliases) && i.aliases.some(j => cmd === j.toLowerCase()))
);
if (!command) {
return;
}
// Skip own commands, if that would ever happen for some reason.
if (user === "supibot") return;
console.log(`CMD REQUEST (${chan}) [${new Date().simpleDateTime()}] <${user}>: ${client.CFG.COMMAND_PREFIX}${cmd} ${(args && args.join(" ")) || ""}`);
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
console.log("CMD REQUEST FAILED - BANNED");
return;
}
// Declare AFK people as non AFK - silently, if necessary
// checkAFK(user, chan, evt);
// If it's a stealth channel, skip everything
if (client.CFG.STEALTH_CHANNELS.indexOf(chan) !== -1) {
console.log("CMD REQUEST FAILED - STEALTH CHANNEL");
return;
}
// Skip if global cooldown hasn't passed yet. Doesn't apply to supermods.
// Also doesn't apply to read-only commands, those never reply - no global cooldown is needed.
if (!command.readOnly && client.CFG.USER_LEVELS[user] < 1e6 && now <= client.GLOBAL_COOLDOWNS[chan]) {
console.log("CMD REQUEST FAILED - GLOBAL COOLDOWN", (client.GLOBAL_COOLDOWNS[chan] - now));
return;
}
client.USER_COOLDOWNS[user] = client.USER_COOLDOWNS[user] || {};
// Skip execution if the user cooldown isn't expired
if (client.CFG.USER_LEVELS[user] < 1e6 && now <= client.USER_COOLDOWNS[user][command.name]) {
const time = (client.USER_COOLDOWNS[user][command.name] - now) / 1000;
client.send("#supibot",
".w " + user + " " +
"Your cooldown for " + client.CFG.COMMAND_PREFIX + cmd + " " +
"has not expired yet: " + time + " seconds remaining."
);
console.log("CMD REQUEST FAILED - USER COOLDOWN", (client.USER_COOLDOWNS[user][command.name] - now));
return;
}
// Set the global cooldown in all cases
client.GLOBAL_COOLDOWNS[chan] = now + (client.CFG.CHANNEL_GLOBAL_COOLDOWNS[chan] || client.CFG.DEFAULT_GLOBAL_COOLDOWN);
const msgLimit = client.CFG.CHANNEL_MSG_LIMIT[chan] || client.CFG.DEFAULT_MSG_LIMIT || 450;
// If it's a protected channel, pajbot-check it. This is done by overwriting the reply function with a call to the snusbot API, and checking its result
if (client.CFG.PAJLADIFIED_CHANNELS.indexOf(chan) !== -1) {
evt.reply = pajladify(user, chan, msgLimit, command).bind(evt);
}
// If it isn't, modify the reply function so that we always send the ban-evasion character and do some basic banphrase checking.
else {
evt.reply = (function (msg) {
const isDiscord = (Object.values(client.CFG.DISCORD_LINK).indexOf(chan) !== -1);
let ping = "";
if (client.CFG.PING_CHANNELS.has(chan) && !client.CFG.PING_EXCLUDED_COMMANDS.has(command.name)) {
ping = user + ", ";
}
client.BAN_EVASION_FLAGS[chan] = !client.BAN_EVASION_FLAGS[chan];
msg = ping
+ msg + " " | + (client.BAN_EVASION_FLAGS[chan] ? client.CFG.BAN_EVASION_CHARACTER : "");
if (Utils.globalCheck(msg, client.CFG.GLOBAL_BANPHRASES)) {
for (const phrase of client.CFG.GLOBAL_BANPHRASES) {
msg = msg.replace(new RegExp(phrase, "gi"), "[REDACTED]");
}
}
this._reply("send", Utils.safeWrap(msg, msgLimit));
(isDiscord) && setTimeout(() =>
client.DiscordClient.send(user, " used " + client.CFG.COMMAND_PREFIX + cmd + ": " + msg, chan, evt.tags),
500
);
}).bind(evt);
}
if (args.join(" ").length > 400) {
evt.reply(":z message too long.");
console.log("CMD REQUEST FAILED - MESSAGE TOO LONG", args.join(" ").length);
}
else if (command.blacklist && command.blacklist.some(i => i === chan)) {
evt.reply("This command cannot be executed in this channel.");
console.log("CMD REQUEST FAILED - CHANNEL BLACKLISTED");
}
else if (command.whitelist && !command.whitelist.some(i => i === chan)) {
evt.reply("This command cannot be executed in this channel.");
console.log("CMD REQUEST FAILED - CHANNEL NOT WHITELISTED");
}
else if (typeof command.level !== "undefined" && (client.CFG.USER_LEVELS[user] || 0) < command.level) {
evt.reply("You don't have the sufficient level to execute that command.");
console.log("CMD REQUEST FAILED - NO USER LEVEL");
}
else if (command.whispers === WHISPERS_ONLY) {
evt.reply("This command is available via whispers only");
console.log("CMD REQUEST FAILED - COMMAND IS WHISPER ONLY");
}
else {
let result = null;
if (user !== "supinic") {
client.send("#supibot", `CMD | ${chan} | ${user} | ${client.CFG.COMMAND_PREFIX}${cmd} ${args.join(" ")}`);
}
try {
result = command.exec(user, args, evt);
}
catch (e) {
evt.reply("monkaS command execution failed!");
console.log("CMD REQUEST FAILED - INTERNAL ERROR");
console.log(e.toString());
return;
}
// Apply a cooldown, if the command has one. Skip if the command requested for no specific cooldown to be triggered - usually happens in failed invocations
if (result !== NO_COOLDOWN_TRIGGERED && command.cooldown) {
if (typeof client.CFG.CHANNEL_USER_COOLDOWNS[chan] === "undefined") {
client.USER_COOLDOWNS[user][command.name] = now + (command.cooldown * 1000);
}
else {
// Apply the larger cooldown: channel-specific or command-specific.
const cd = Math.max((command.cooldown * 1000), client.CFG.CHANNEL_USER_COOLDOWNS[chan]);
client.USER_COOLDOWNS[user][command.name] = now + cd;
}
}
}
});
client.on("data", (evt) => {
const skipRegex = /ERR_UNKNOWNCOMMAND|USERSTATE|PRIVMSG|JOIN|PART|MODE|PING|RPL*|CAP/gim;
if (skipRegex.test(evt.command)) {
return;
}
if (evt.command === "CLEARCHAT") {
const now = Date.now();
const targetUser = evt.trailing;
const targetChannel = evt.params;
const logsURL = (usr, chan) => `https://api.gempir.com/channel/${chan.replace(/#/, "")}/user/${usr}`;
// Time out
if (evt.string.indexOf("ban-duration") !== -1) {
client.USER_BAN_TIMERS[targetUser] = client.USER_BAN_TIMERS[targetUser] || 0;
const time = evt.string.match(/ban-duration=(\d+)/);
const length = Number(time[1]);
const filterLength = client.CFG.CHANNEL_BAN_THRESHOLD[targetChannel] || client.CFG.DEFAULT_BAN_THRESHOLD;
if (
(!client.CFG.USERS_ALWAYS_SHOW_BAN.has(targetUser)) // if the target is NOT a user who should always be shown,
&& (length < filterLength)// and if the timeout length is lower than the channel threshold, or the default threshold if the channel has none,
) {
return; // then do not log the timeout.
}
client.TIMEOUT_TIMER[targetChannel] = client.TIMEOUT_TIMER[targetChannel] || 1;
// Only log the message if it has not been repeated again in a while
if ((now - client.USER_BAN_TIMERS[targetUser]) > 5000) {
let logsLink = "";
if ((targetChannel === "#forsen" || targetChannel === "#nymn") && length >= 7200) {
logsLink = " | " + logsURL(targetUser, targetChannel);
}
// No pooling necessary if the time passed between two timeouts is long enough
if (now - client.TIMEOUT_TIMER[targetChannel] > client.CFG.TIMEOUT_POOLING_TIMEOUT) {
client.TIMEOUT_TIMER[targetChannel] = now;
client.send("#supibot", "BAN | " + targetChannel + " | " + targetUser + " | " + length + logsLink);
}
// If not, and the timeout pool object doesn't exist, create it
else if (!client.TIMEOUT_POOL[targetChannel]) {
client.TIMEOUT_POOL[targetChannel] = {
timeout: setTimeout(() => client.TIMEOUT_POOL[targetChannel].fn(), client.CFG.TIMEOUT_POOLING_TIMEOUT),
users: [targetUser],
lengths: [length],
fn: () => {
const obj = client.TIMEOUT_POOL[targetChannel];
const joined = obj.users.map((i, ind) => i + " " + obj.lengths[ind]).join(", ");
const msg = (joined.length <= 450)
? joined
: (obj.users.length + "x for a total of " + obj.lengths.reduce((acc, cur) => acc += cur) + " sec");
client.send("#supibot", (obj.users.length > 1 ? "GROUP " : "") + "BAN | " + targetChannel + " | " + msg);
client.TIMEOUT_POOL[targetChannel] = null;
}
};
}
// If not, and the timeout pool object exists, append the user and timeout to it and reset the timeout and the timer
else {
const pool = client.TIMEOUT_POOL[targetChannel];
clearTimeout(pool.timeout);
pool.users.push(targetUser);
pool.lengths.push(length);
pool.timeout = setTimeout(() => pool.fn(), client.CFG.TIMEOUT_POOLING_TIMEOUT);
}
client.USER_BAN_TIMERS[targetUser] = now;
client.TIMEOUT_TIMER[targetChannel] = now;
}
// If the timeout is very long (>2 hours), it is rarely automated. In that case, add a file log
if (length >= 7200) {
console.log(`LONG TIMEOUT [${new Date().simpleDateTime()}] (${targetChannel}) ${targetUser} (length: ${length})`);
}
}
// Permaban
else if (targetUser) {
client.PERMABAN_TIMER[targetChannel] = client.PERMABAN_TIMER[targetChannel] || 1;
if (now - client.PERMABAN_TIMER[targetChannel] > client.CFG.TIMEOUT_POOLING_TIMEOUT) {
client.PERMABAN_TIMER[targetChannel] = now;
client.send("#supibot", "PERMABAN | " + targetChannel + " | " + targetUser);
}
// If not, and the timeout pool object doesn't exist, create it
else if (!client.PERMABAN_POOL[targetChannel]) {
client.PERMABAN_POOL[targetChannel] = {
timeout: setTimeout(() => client.PERMABAN_POOL[targetChannel].fn(), client.CFG.TIMEOUT_POOLING_TIMEOUT),
users: [targetUser],
fn: () => {
const obj = client.PERMABAN_POOL[targetChannel];
const joined = obj.users.join(", ");
const msg = (joined.length <= 450) ? joined : (obj.users.length + "x");
client.send("#supibot", (obj.users.length > 1 ? "GROUP " : "") + "PERMABAN | " + targetChannel + " | " + msg);
client.PERMABAN_POOL[targetChannel] = null;
}
};
}
else {
const pool = client.PERMABAN_POOL[targetChannel];
clearTimeout(pool.timeout);
pool.users.push(targetUser);
pool.timeout = setTimeout(() => pool.fn(), client.CFG.TIMEOUT_POOLING_TIMEOUT);
}
client.USER_BAN_TIMERS[targetUser] = now;
client.PERMABAN_TIMER[targetChannel] = now;
// Always log to file
console.log(`PERMABAN [${new Date().simpleDateTime()}] (${targetChannel}) ${targetUser}`);
}
// Clear chat
else {
// client.send("#supibot", "CLR | " + evt.params);
}
}
else if (evt.command === "RECONNECT") {
RESTART.exec(null, null, evt, true);
}
else if (evt.params === "#supinic") {
SUPINIC_CHANNEL.data(evt.command, evt.string, evt);
}
else if (evt.command === "HOSTTARGET") {
const params = evt.trailing.split(" ");
if (params[0] === "-") {
return;
}
client.send("#supibot", "HOST | FROM " + evt.params.substr(1) + " | TO " + params[0] + " | VIEWERS " + params[1]);
}
else if (evt.command === "USERNOTICE") {
const type = evt.string.replace(/.*;msg-id=(.*?);.*/, "$1");
const user = evt.string.replace(/.*;display-name=(.*?);.*/, "$1");
const channel = evt.string.replace(/.* (#.*?)/, "$1");
const now = "[" + new Date().fullDateTime() + "]";
const PLANS = {
1000: "$5",
2000: "$10",
3000: "$25",
Prime: "Prime"
};
if (client.CFG.STEALTH_CHANNELS.has(channel.replace(/ :.*/, ""))) {
return;
}
switch (type) {
case "sub":
case "resub": {
const months = evt.string.replace(/.*;msg-param-months=(.*?);.*/, "$1");
const plan = evt.string.replace(/.*;msg-param-sub-plan=(.*?);.*/, "$1");
client.send("#supibot", type.toUpperCase() + " | " + channel + " | " + months + "m | " + " TIER " + PLANS[plan] + " | " + user);
console.log(type.toUpperCase(), now, "(" + channel + ")", months + "m | " + " TIER " + PLANS[plan] + " | " + user);
break;
}
case "giftpaidupgrade": {
const gifter = evt.string.replace(/.*msg-param-sender-name=(.*?);.*/, "$1");
client.send("#supibot", "SUBTEMBER | " + channel + " | " + user + " CONTINUES GIFT FROM " + gifter);
console.log("SUBTEMBER", now, "(" + channel + ")", user + " CONTINUES GIFT FROM " + gifter);
break;
}
case "subgift": {
const recipient = evt.string.replace(/.*msg-param-recipient-display-name=(.*?);.*/, "$1");
const months = evt.string.replace(/.*;msg-param-months=(.*?);.*/, "$1");
const plan = evt.string.replace(/.*;msg-param-sub-plan=(.*?);.*/, "$1");
console.log("GIFTSUB", now, "(" + channel + ")", "FROM " + user + " TO " + recipient + " | " + months + "m | " + " TIER " + PLANS[plan]);
break;
}
case "submysterygift": {
const count = evt.string.replace(/.*msg-param-mass-gift-count=(.*?);.*/, "$1");
client.send("#supibot", "MASS GIFTSUB | FROM " + user + " | " + count + "x | " + channel);
console.log("MASS GIFTSUB ", now, "(" + channel + ")", "FROM " + user + " | " + count + "x");
break;
}
default: console.log("UNRECOGNIZED SUB EVENT", now, evt.command, "|", evt.trailing, "|", evt.string);
}
}
else {
const now = "[" + new Date().fullDateTime() + "]";
console.log(evt.command, now, evt.trailing, "|", evt.string);
}
if (evt.command !== "WHISPER") {
return;
}
let msg = evt.trailing;
let user = evt.prefix.split("!")[0];
console.log("WHISPER", user, msg, msg.indexOf("$"));
if (msg.indexOf("$") === 0) {
const cmdString = msg.split(" ")[0].split("$")[1];
const args = msg.split(" ").slice(1);
const now = Date.now();
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
return;
}
client.USER_COOLDOWNS[user] = client.USER_COOLDOWNS[user] || {};
// Change the reply function so that instead of replying to the event (in whispers, we don't have any channel available),
// the bot whispers the user via #supinic channel
evt.reply = (function (msg) {
client.send(client.CFG.CHAN.SUPINIC, ".w " + user + " " + msg);
}).bind(evt);
// Return if global cooldown did not pass yet. Does not apply to supermods
if (client.CFG.USER_LEVELS[user] < 1e6 && now <= client.USER_COOLDOWNS[user].whispers) {
return;
}
const command = COMMANDS.find(i => i.name === cmdString && i.whisper === WHISPERS_ONLY || i.whisper === WHISPERS_AND_CHATS);
if (command) {
client.USER_COOLDOWNS[user].whispers = now + client.CFG.DEFAULT_GLOBAL_COOLDOWN;
if (typeof command.level !== "undefined" && (client.CFG.USER_LEVELS[user] || 0) < command.level) {
evt.reply("You need a level of " + command.level + " to execute that command.");
return;
}
else {
try {
// Always notify @Supinic that someone whispered the bot
client.send("#supibot", ".w supinic " + user + ": " + msg);
command.exec(user, args, evt);
}
catch (e) {
evt.reply("monkaS command execution failed!");
console.log("WHISPER ERROR CAUGHT!\n", e);
}
}
return;
}
}
// Log all non-command whispers. Also, notify @Supinic about them
if (user !== "supinic") {
client.send("#supibot", ".w supinic " + user + " said: " + msg);
}
});
client.on("error", (err, evt) => {
console.log("IRC error!", err, evt);
client.restartFn("CONNECTION LOST");
});
// client.AKYLUS_RAFFLE = setInterval(() => client.send("#akylus_", "!raffle 10k 600"), 27e5);
process.on("beforeExit", () => {
client.CytubeClient.destroy();
client.DiscordClient.destroy();
fs.writeFileSync("config.json", JSON.stringify(client.CONFIG, null, 2));
});
client.APPLE_RAFFLE_ENABLED = false;
client.APPLE_RAFFLE_INTERVAL = setInterval(() => {
(client.APPLE_RAFFLE_ENABLED) && client.send("#appledcs", "!multiraffle 10000 600");
}, 3600000);
})(); | random_line_split | |
bot.js | (function () {
"use strict";
const fs = require("fs");
const request = require("request");
require("/code/keys/load-keys.js")();
const Utils = require("/code/global-modules/utils.js");
let CytubeConstructor = require("./my-modules/cytube.js")(Utils);
const DiscordConstructor = require("./my-modules/discord.js");
let CONFIG = JSON.parse(fs.readFileSync("config.json"));
const LINK_REGEX = /(https?:\/\/(?:www\.|(?!www))[ @#a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]\.[^\s]{2,})/gi;
const WHISPERS_ONLY = Symbol.for("WHISPERS_ONLY");
const WHISPERS_AND_CHATS = Symbol.for("WHISPERS_AND_CHATS");
const NO_COOLDOWN_TRIGGERED = Symbol.for("NO_COOLDOWN");
const IRC = require("coffea");
let client = IRC({
host: CONFIG.CHAT_SERVER_URL,
port: CONFIG.CHAT_SERVER_PORT,
ssl: false,
nick: process.env.TWITCH_USERNAME,
username: process.env.TWITCH_USERNAME,
pass: process.env.TWITCH_OAUTH,
throttling: CONFIG.CLIENT_DEFAULT_THROTTLE,
prefix: CONFIG.COMMAND_PREFIX
});
client.CFG = CONFIG;
CONFIG = null;
client.DISCORD_LINK_COOLDOWNS = {};
client.USER_BAN_TIMERS = {};
client.USER_AFK_DATA = {};
client.USER_COOLDOWNS = {};
client.GLOBAL_COOLDOWNS = {};
client.BAN_EVASION_FLAGS = {};
client.TIMEOUT_POOL = {};
client.PERMABAN_POOL = {};
client.TIMEOUT_TIMER = {};
client.PERMABAN_TIMER = {};
client.HARD_RESET_TIMESTAMP = Number(process.argv[2]) || Date.now();
client.SOFT_RESET_TIMESTAMP = Date.now();
client.HYDRATION_MESSAGE = {};
client.latestTriviaAnswer = null;
let COMMANDS = require("./my-modules/commands.js")(Utils, client, fs);
let SUPINIC_CHANNEL = require("./my-modules/supinic.js")(client);
const DEBUG = COMMANDS.find(i => i.name === "__debug__");
const RESTART = COMMANDS.find(i => i.name === "restart");
const AFK = COMMANDS.find(i => i.name === "afk");
const checkAFK = async (user, chan, evt) => {
if (!client.USER_AFK_DATA[user]) {
return;
}
const data = client.USER_AFK_DATA[user];
if (chan && client.CFG.PAJLADIFIED_CHANNELS.has(chan)) {
evt.reply = pajladify(user, chan, 150, {name: "afk"}).bind(evt);
}
(!data.silent) && setTimeout(() => {
// console.log(user + " is no longer AFK: " + data.text + " (" + Utils.ago(data.date) + ")");
const ago = (new Date(data.date).valueOf() === 0) ? "unknown time" : Utils.ago(data.date);
evt.reply(user + " is no longer AFK: " + data.text + " (" + ago + ")");
}, 1000);
COMMANDS.unsetAFK(data.id).then(() => { client.USER_AFK_DATA[user] = null; });
};
const pajladify = (user, chan, msgLimit, command = {}) => (async function (msg) {
client.BAN_EVASION_FLAGS[chan] = !client.BAN_EVASION_FLAGS[chan];
msg += " " + (client.BAN_EVASION_FLAGS[chan] ? client.CFG.BAN_EVASION_CHARACTER : "");
if (client.CFG.NO_LINK_CHANNELS.has(chan)) {
msg = msg.replace(LINK_REGEX, "[LINK]"); // replace all links with some text
}
const now = new Date();
if (chan === "#forsen") {
msg = msg
.replace(/[ČĘÊÏÑŇŚ]/ig, char => char.normalize("NFD").replace(/[\u0300-\u036f]/g, "")) // replaces all "dangerous" local characters with safe ones
.replace(/poggers/ig, "[POOGERS]") // replace all poggers with something more palatable
.replace(/(PogU)/g, "Pog U") // replace all PogU with Pog U - for better readability
.replace(/twitch\.tv/gi, "[LINK]");
if (now.getDate() === 8 && now.getMonth() === 1 && now.getFullYear() === 2019) {
msg = msg.toLowerCase();
}
}
else if (chan === "#nymn") {
msg = msg
.replace(/https?:\/\/t\.co/g, "twitter") // replace twitter short links
.replace(/\u05E4/g, "9"); // replace "dangerous" hebrew characters
}
let finalMessage = "";
try {
const userBanned = await Utils.pajladaCheck(user, chan, client.CFG);
let fixedUser = (!userBanned) ? user : "[illegal name]";
let ping = "";
if (client.CFG.PING_CHANNELS.has(chan) && !client.CFG.PING_EXCLUDED_COMMANDS.has(command.name)) {
ping = fixedUser + ", ";
}
msg = Utils.safeWrap(ping + msg, msgLimit);
const msgBanned = await Utils.pajladaCheck(msg, chan, client.CFG);
(msgBanned) && console.log("COMMAND REQUEST FAILED - BANNED PHRASE", msg, msgBanned);
finalMessage = (!msgBanned) ? msg : msgBanned.reply;
}
catch (e) {
console.log("COMMAND REQUEST FAILED - NO API AVAILABLE");
console.log(e);
finalMessage = "Cannot comply, pajbot API failed to reply monkaS";
}
if (now.getDate() === 8 && now.getMonth() === 1) {
if (chan === "#nymn") finalMessage = finalMessage.toUpperCase();
if (chan === "#forsen") finalMessage = finalMessage.toLowerCase();
}
this._reply("send", finalMessage);
});
client.checkAFK = checkAFK;
client.restartFn = (msg) => setTimeout(() => RESTART(null, null, null, msg), 5000);
client.reloadModule = (type, evt) => {
switch (type) {
case "commands":
COMMANDS.destroy();
COMMANDS = null;
delete require.cache[require.resolve("./my-modules/commands.js")];
COMMANDS = require("./my-modules/commands.js")(Utils, client, fs);
client.CytubeClient.commands = COMMANDS;
client.DiscordClient.commands = COMMANDS;
break;
case "cytube":
client.CytubeClient.destroy();
client.CytubeClient = null;
delete require.cache[require.resolve("./my-modules/cytube.js")];
CytubeConstructor = require("./my-modules/cytube.js")(Utils);
client.CytubeClient = new CytubeConstructor(client, COMMANDS);
break;
default: return false;
}
evt.reply("Done.");
return true;
};
client.DiscordClient = new DiscordConstructor(client, Utils, process.env.DISCORD_BOT_TOKEN, client.CFG.DISCORD_LINK, COMMANDS);
client.CytubeClient = new CytubeConstructor(client, COMMANDS);
client.on("motd", () => {
client.join(client.CFG.JOIN_CHANNELS);
client.capReq(":twitch.tv/tags twitch.tv/commands twitch.tv/membership");
console.log("Bot: Ready!");
client.send("#supibot", "@Supinic I'm back MrDestructoid");
});
client.on("message", (evt) => {
const user = evt.user.getNick().toLowerCase();
const chan = evt.channel.getName();
const msg = evt.message;
const now = Date.now();
if (chan === "#supinic") {
SUPINIC_CHANNEL.message(
user,
msg,
client.CFG.USER_LEVELS[user] <= -1e6,
client.CFG.USER_LEVELS[user] < 1e6 && (client.GLOBAL_COOLDOWNS[chan] && now <= client.GLOBAL_COOLDOWNS[chan])
);
}
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
return;
}
// Declare AFK people as non AFK - silently, if necessary
checkAFK(user, chan, evt);
// If it's a stealth channel, skip everything
if (client.CFG.STEALTH_CHANNELS.indexOf(chan) !== -1) {
return;
}
// Mirror messages to discord, if it's a linked channel
if (chan === client.CFG.CHAN.CEREBOT && client.CFG.DISCORD_LINK_ENABLED) {
client.DiscordClient && client.DiscordClient.send(user, msg, chan, evt.tags);
}
// Return if global cooldown did not pass. Does not apply to supermods
if (client.CFG.USER_LEVELS[user] < 1e6 && (client.GLOBAL_COOLDOWNS[chan] && now <= client.GLOBAL_COOLDOWNS[chan])) {
return;
}
if (msg.indexOf("$debug") === 0 && client.CFG.USER_LEVELS[user] >= DEBUG.level) {
DEBUG.exec(user, msg.split("$debug")[1].split(" "), evt);
}
else if (msg === "bot" || msg.indexOf("!afk") === 0) {
let silent = false;
if (msg === "bot") {
evt.reply("smol bot made by @supinic supiniL my commands start with $ - try $help for a list of commands");
}
else if (msg.indexOf("!afk") === 0) {
silent = true;
AFK.exec(user, msg.split(" ").splice(1), evt, true);
}
if (!silent) {
client.GLOBAL_COOLDOWNS[chan] = client.GLOBAL_COOLDOWNS[chan] || 0;
client.GLOBAL_COOLDOWNS[chan] = now + (client.CFG.CHANNEL_GLOBAL_COOLDOWNS[chan] || client.CFG.DEFAULT_GLOBAL_COOLDOWN);
}
}
else if (chan === "#forsen" && (user === "forsenai" || user === "snusbot")) {
if (
(user === "forsenai" && msg.indexOf("forsenThink") !== -1) ||
(user === "snusbot" && msg.indexOf("question/hint/clue") !== -1)
) {
let query = msg
.replace(" forsenThink", "")
.replace(/.*clue is(.*)" OMGScoots(.*)/, "$1")
.replace(/ /g, "+");
let url = "http://www.j-archive.com/search.php?submit=Search&search=" + query;
request(url, (err, data, body) => {
let parsedData = body.match(/class="search_correct_response">(.*?)<\/span>/);
let answer = (parsedData && Utils.removeHTML(parsedData[1])) || null;
if (answer) {
client.latestTriviaAnswer = answer;
client.AUTO_TRIVIA && evt.reply(answer);
// console.log("[" + new Date().simpleDateTime() + "] Answer: ", answer);
}
else {
client.latestTriviaAnswer = "eShrug idk kev";
// console.log("idk");
}
});
}
}
else if (chan === "#forsen" && user === "gazatu2" && msg.has("question:")) {
const question = (msg.match(/question: (.*)/) || [])[1];
COMMANDS.autoGazatu(question)
.then(answer => {
if (client.AUTO_GAZ && answer) {
evt.reply(answer);
}
// console.log(`GAZATU TRIVIA [${answer || "<no answer found>"}] <- ${question}`);
})
.catch(err => console.log("[GAZATU TRIVIA ERROR] ", err));
}
});
client.on("command", (evt) => {
if (!evt.channel) {
console.log("An event with no channel?", evt);
return;
}
const args = (evt.args || []).map(i => i.replace(new RegExp(client.CFG.BAN_EVASION_CHARACTER, "g"), "").trim());
const cmd = evt.cmd.toLowerCase(); // @todo remove this, it is just temporary
const chan = evt.channel.getName().toLowerCase();
const user = evt.user.getNick().toLowerCase();
const now = Date.now();
client.CFG.USER_LEVELS[user] = client.CFG.USER_LEVELS[user] || 1;
const command = COMMANDS.find(i =>
cmd === i.name.toLowerCase() || (Array.isArray(i.aliases) && i.aliases.some(j => cmd === j.toLowerCase()))
);
if (!command) {
return;
}
// Skip own commands, if that would ever happen for some reason.
if (user === "supibot") return;
console.log(`CMD REQUEST (${chan}) [${new Date().simpleDateTime()}] <${user}>: ${client.CFG.COMMAND_PREFIX}${cmd} ${(args && args.join(" ")) || ""}`);
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
console.log("CMD REQUEST FAILED - BANNED");
return;
}
// Declare AFK people as non AFK - silently, if necessary
// checkAFK(user, chan, evt);
// If it's a stealth channel, skip everything
if (client.CFG.STEALTH_CHANNELS.indexOf(chan) !== -1) {
console.log("CMD REQUEST FAILED - STEALTH CHANNEL");
return;
}
// Skip if global cooldown hasn't passed yet. Doesn't apply to supermods.
// Also doesn't apply to read-only commands, those never reply - no global cooldown is needed.
if (!command.readOnly && client.CFG.USER_LEVELS[user] < 1e6 && now <= client.GLOBAL_COOLDOWNS[chan]) {
console.log("CMD REQUEST FAILED - GLOBAL COOLDOWN", (client.GLOBAL_COOLDOWNS[chan] - now));
return;
}
client.USER_COOLDOWNS[user] = client.USER_COOLDOWNS[user] || {};
// Skip execution if the user cooldown isn't expired
if (client.CFG.USER_LEVELS[user] < 1e6 && now <= client.USER_COOLDOWNS[user][command.name]) {
const time = (client.USER_COOLDOWNS[user][command.name] - now) / 1000;
client.send("#supibot",
".w " + user + " " +
"Your cooldown for " + client.CFG.COMMAND_PREFIX + cmd + " " +
"has not expired yet: " + time + " seconds remaining."
);
console.log("CMD REQUEST FAILED - USER COOLDOWN", (client.USER_COOLDOWNS[user][command.name] - now));
return;
}
// Set the global cooldown in all cases
client.GLOBAL_COOLDOWNS[chan] = now + (client.CFG.CHANNEL_GLOBAL_COOLDOWNS[chan] || client.CFG.DEFAULT_GLOBAL_COOLDOWN);
const msgLimit = client.CFG.CHANNEL_MSG_LIMIT[chan] || client.CFG.DEFAULT_MSG_LIMIT || 450;
// If it's a protected channel, pajbot-check it. This is done by overwriting the reply function with a call to the snusbot API, and checking its result
if (client.CFG.PAJLADIFIED_CHANNELS.indexOf(chan) !== -1) {
evt.reply = pajladify(user, chan, msgLimit, command).bind(evt);
}
// If it isn't, modify the reply function so that we always send the ban-evasion character and do some basic banphrase checking.
else {
evt.reply = (function (msg) {
const isDiscord = (Object.values(client.CFG.DISCORD_LINK).indexOf(chan) !== -1);
let ping = "";
if (client.CFG.PING_CHANNELS.has(chan) && !client.CFG.PING_EXCLUDED_COMMANDS.has(command.name)) {
ping = user + ", ";
}
client.BAN_EVASION_FLAGS[chan] = !client.BAN_EVASION_FLAGS[chan];
msg = ping
+ msg + " "
+ (client.BAN_EVASION_FLAGS[chan] ? client.CFG.BAN_EVASION_CHARACTER : "");
if (Utils.globalCheck(msg, client.CFG.GLOBAL_BANPHRASES)) {
for (const phrase of client.CFG.GLOBAL_BANPHRASES) {
msg = msg.replace(new RegExp(phrase, "gi"), "[REDACTED]");
}
}
this._reply("send", Utils.safeWrap(msg, msgLimit));
(isDiscord) && setTimeout(() =>
client.DiscordClient.send(user, " used " + client.CFG.COMMAND_PREFIX + cmd + ": " + msg, chan, evt.tags),
500
);
}).bind(evt);
}
if (args.join(" ").length > 400) {
ev | if (command.blacklist && command.blacklist.some(i => i === chan)) {
evt.reply("This command cannot be executed in this channel.");
console.log("CMD REQUEST FAILED - CHANNEL BLACKLISTED");
}
else if (command.whitelist && !command.whitelist.some(i => i === chan)) {
evt.reply("This command cannot be executed in this channel.");
console.log("CMD REQUEST FAILED - CHANNEL NOT WHITELISTED");
}
else if (typeof command.level !== "undefined" && (client.CFG.USER_LEVELS[user] || 0) < command.level) {
evt.reply("You don't have the sufficient level to execute that command.");
console.log("CMD REQUEST FAILED - NO USER LEVEL");
}
else if (command.whispers === WHISPERS_ONLY) {
evt.reply("This command is available via whispers only");
console.log("CMD REQUEST FAILED - COMMAND IS WHISPER ONLY");
}
else {
let result = null;
if (user !== "supinic") {
client.send("#supibot", `CMD | ${chan} | ${user} | ${client.CFG.COMMAND_PREFIX}${cmd} ${args.join(" ")}`);
}
try {
result = command.exec(user, args, evt);
}
catch (e) {
evt.reply("monkaS command execution failed!");
console.log("CMD REQUEST FAILED - INTERNAL ERROR");
console.log(e.toString());
return;
}
// Apply a cooldown, if the command has one. Skip if the command requested for no specific cooldown to be triggered - usually happens in failed invocations
if (result !== NO_COOLDOWN_TRIGGERED && command.cooldown) {
if (typeof client.CFG.CHANNEL_USER_COOLDOWNS[chan] === "undefined") {
client.USER_COOLDOWNS[user][command.name] = now + (command.cooldown * 1000);
}
else {
// Apply the larger cooldown: channel-specific or command-specific.
const cd = Math.max((command.cooldown * 1000), client.CFG.CHANNEL_USER_COOLDOWNS[chan]);
client.USER_COOLDOWNS[user][command.name] = now + cd;
}
}
}
});
client.on("data", (evt) => {
const skipRegex = /ERR_UNKNOWNCOMMAND|USERSTATE|PRIVMSG|JOIN|PART|MODE|PING|RPL*|CAP/gim;
if (skipRegex.test(evt.command)) {
return;
}
if (evt.command === "CLEARCHAT") {
const now = Date.now();
const targetUser = evt.trailing;
const targetChannel = evt.params;
const logsURL = (usr, chan) => `https://api.gempir.com/channel/${chan.replace(/#/, "")}/user/${usr}`;
// Time out
if (evt.string.indexOf("ban-duration") !== -1) {
client.USER_BAN_TIMERS[targetUser] = client.USER_BAN_TIMERS[targetUser] || 0;
const time = evt.string.match(/ban-duration=(\d+)/);
const length = Number(time[1]);
const filterLength = client.CFG.CHANNEL_BAN_THRESHOLD[targetChannel] || client.CFG.DEFAULT_BAN_THRESHOLD;
if (
(!client.CFG.USERS_ALWAYS_SHOW_BAN.has(targetUser)) // if the target is NOT a user who should always be shown,
&& (length < filterLength)// and if the timeout length is lower than the channel threshold, or the default threshold if the channel has none,
) {
return; // then do not log the timeout.
}
client.TIMEOUT_TIMER[targetChannel] = client.TIMEOUT_TIMER[targetChannel] || 1;
// Only log the message if it has not been repeated again in a while
if ((now - client.USER_BAN_TIMERS[targetUser]) > 5000) {
let logsLink = "";
if ((targetChannel === "#forsen" || targetChannel === "#nymn") && length >= 7200) {
logsLink = " | " + logsURL(targetUser, targetChannel);
}
// No pooling necessary if the time passed between two timeouts is long enough
if (now - client.TIMEOUT_TIMER[targetChannel] > client.CFG.TIMEOUT_POOLING_TIMEOUT) {
client.TIMEOUT_TIMER[targetChannel] = now;
client.send("#supibot", "BAN | " + targetChannel + " | " + targetUser + " | " + length + logsLink);
}
// If not, and the timeout pool object doesn't exist, create it
else if (!client.TIMEOUT_POOL[targetChannel]) {
client.TIMEOUT_POOL[targetChannel] = {
timeout: setTimeout(() => client.TIMEOUT_POOL[targetChannel].fn(), client.CFG.TIMEOUT_POOLING_TIMEOUT),
users: [targetUser],
lengths: [length],
fn: () => {
const obj = client.TIMEOUT_POOL[targetChannel];
const joined = obj.users.map((i, ind) => i + " " + obj.lengths[ind]).join(", ");
const msg = (joined.length <= 450)
? joined
: (obj.users.length + "x for a total of " + obj.lengths.reduce((acc, cur) => acc += cur) + " sec");
client.send("#supibot", (obj.users.length > 1 ? "GROUP " : "") + "BAN | " + targetChannel + " | " + msg);
client.TIMEOUT_POOL[targetChannel] = null;
}
};
}
// If not, and the timeout pool object exists, append the user and timeout to it and reset the timeout and the timer
else {
const pool = client.TIMEOUT_POOL[targetChannel];
clearTimeout(pool.timeout);
pool.users.push(targetUser);
pool.lengths.push(length);
pool.timeout = setTimeout(() => pool.fn(), client.CFG.TIMEOUT_POOLING_TIMEOUT);
}
client.USER_BAN_TIMERS[targetUser] = now;
client.TIMEOUT_TIMER[targetChannel] = now;
}
// If the timeout is very long (>2 hours), it is rarely automated. In that case, add a file log
if (length >= 7200) {
console.log(`LONG TIMEOUT [${new Date().simpleDateTime()}] (${targetChannel}) ${targetUser} (length: ${length})`);
}
}
// Permaban
else if (targetUser) {
client.PERMABAN_TIMER[targetChannel] = client.PERMABAN_TIMER[targetChannel] || 1;
if (now - client.PERMABAN_TIMER[targetChannel] > client.CFG.TIMEOUT_POOLING_TIMEOUT) {
client.PERMABAN_TIMER[targetChannel] = now;
client.send("#supibot", "PERMABAN | " + targetChannel + " | " + targetUser);
}
// If not, and the timeout pool object doesn't exist, create it
else if (!client.PERMABAN_POOL[targetChannel]) {
client.PERMABAN_POOL[targetChannel] = {
timeout: setTimeout(() => client.PERMABAN_POOL[targetChannel].fn(), client.CFG.TIMEOUT_POOLING_TIMEOUT),
users: [targetUser],
fn: () => {
const obj = client.PERMABAN_POOL[targetChannel];
const joined = obj.users.join(", ");
const msg = (joined.length <= 450) ? joined : (obj.users.length + "x");
client.send("#supibot", (obj.users.length > 1 ? "GROUP " : "") + "PERMABAN | " + targetChannel + " | " + msg);
client.PERMABAN_POOL[targetChannel] = null;
}
};
}
else {
const pool = client.PERMABAN_POOL[targetChannel];
clearTimeout(pool.timeout);
pool.users.push(targetUser);
pool.timeout = setTimeout(() => pool.fn(), client.CFG.TIMEOUT_POOLING_TIMEOUT);
}
client.USER_BAN_TIMERS[targetUser] = now;
client.PERMABAN_TIMER[targetChannel] = now;
// Always log to file
console.log(`PERMABAN [${new Date().simpleDateTime()}] (${targetChannel}) ${targetUser}`);
}
// Clear chat
else {
// client.send("#supibot", "CLR | " + evt.params);
}
}
else if (evt.command === "RECONNECT") {
RESTART.exec(null, null, evt, true);
}
else if (evt.params === "#supinic") {
SUPINIC_CHANNEL.data(evt.command, evt.string, evt);
}
else if (evt.command === "HOSTTARGET") {
const params = evt.trailing.split(" ");
if (params[0] === "-") {
return;
}
client.send("#supibot", "HOST | FROM " + evt.params.substr(1) + " | TO " + params[0] + " | VIEWERS " + params[1]);
}
else if (evt.command === "USERNOTICE") {
const type = evt.string.replace(/.*;msg-id=(.*?);.*/, "$1");
const user = evt.string.replace(/.*;display-name=(.*?);.*/, "$1");
const channel = evt.string.replace(/.* (#.*?)/, "$1");
const now = "[" + new Date().fullDateTime() + "]";
const PLANS = {
1000: "$5",
2000: "$10",
3000: "$25",
Prime: "Prime"
};
if (client.CFG.STEALTH_CHANNELS.has(channel.replace(/ :.*/, ""))) {
return;
}
switch (type) {
case "sub":
case "resub": {
const months = evt.string.replace(/.*;msg-param-months=(.*?);.*/, "$1");
const plan = evt.string.replace(/.*;msg-param-sub-plan=(.*?);.*/, "$1");
client.send("#supibot", type.toUpperCase() + " | " + channel + " | " + months + "m | " + " TIER " + PLANS[plan] + " | " + user);
console.log(type.toUpperCase(), now, "(" + channel + ")", months + "m | " + " TIER " + PLANS[plan] + " | " + user);
break;
}
case "giftpaidupgrade": {
const gifter = evt.string.replace(/.*msg-param-sender-name=(.*?);.*/, "$1");
client.send("#supibot", "SUBTEMBER | " + channel + " | " + user + " CONTINUES GIFT FROM " + gifter);
console.log("SUBTEMBER", now, "(" + channel + ")", user + " CONTINUES GIFT FROM " + gifter);
break;
}
case "subgift": {
const recipient = evt.string.replace(/.*msg-param-recipient-display-name=(.*?);.*/, "$1");
const months = evt.string.replace(/.*;msg-param-months=(.*?);.*/, "$1");
const plan = evt.string.replace(/.*;msg-param-sub-plan=(.*?);.*/, "$1");
console.log("GIFTSUB", now, "(" + channel + ")", "FROM " + user + " TO " + recipient + " | " + months + "m | " + " TIER " + PLANS[plan]);
break;
}
case "submysterygift": {
const count = evt.string.replace(/.*msg-param-mass-gift-count=(.*?);.*/, "$1");
client.send("#supibot", "MASS GIFTSUB | FROM " + user + " | " + count + "x | " + channel);
console.log("MASS GIFTSUB ", now, "(" + channel + ")", "FROM " + user + " | " + count + "x");
break;
}
default: console.log("UNRECOGNIZED SUB EVENT", now, evt.command, "|", evt.trailing, "|", evt.string);
}
}
else {
const now = "[" + new Date().fullDateTime() + "]";
console.log(evt.command, now, evt.trailing, "|", evt.string);
}
if (evt.command !== "WHISPER") {
return;
}
let msg = evt.trailing;
let user = evt.prefix.split("!")[0];
console.log("WHISPER", user, msg, msg.indexOf("$"));
if (msg.indexOf("$") === 0) {
const cmdString = msg.split(" ")[0].split("$")[1];
const args = msg.split(" ").slice(1);
const now = Date.now();
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
return;
}
client.USER_COOLDOWNS[user] = client.USER_COOLDOWNS[user] || {};
// Change the reply function so that instead of replying to the event (in whispers, we don't have any channel available),
// the bot whispers the user via #supinic channel
evt.reply = (function (msg) {
client.send(client.CFG.CHAN.SUPINIC, ".w " + user + " " + msg);
}).bind(evt);
// Return if global cooldown did not pass yet. Does not apply to supermods
if (client.CFG.USER_LEVELS[user] < 1e6 && now <= client.USER_COOLDOWNS[user].whispers) {
return;
}
const command = COMMANDS.find(i => i.name === cmdString && i.whisper === WHISPERS_ONLY || i.whisper === WHISPERS_AND_CHATS);
if (command) {
client.USER_COOLDOWNS[user].whispers = now + client.CFG.DEFAULT_GLOBAL_COOLDOWN;
if (typeof command.level !== "undefined" && (client.CFG.USER_LEVELS[user] || 0) < command.level) {
evt.reply("You need a level of " + command.level + " to execute that command.");
return;
}
else {
try {
// Always notify @Supinic that someone whispered the bot
client.send("#supibot", ".w supinic " + user + ": " + msg);
command.exec(user, args, evt);
}
catch (e) {
evt.reply("monkaS command execution failed!");
console.log("WHISPER ERROR CAUGHT!\n", e);
}
}
return;
}
}
// Log all non-command whispers. Also, notify @Supinic about them
if (user !== "supinic") {
client.send("#supibot", ".w supinic " + user + " said: " + msg);
}
});
client.on("error", (err, evt) => {
console.log("IRC error!", err, evt);
client.restartFn("CONNECTION LOST");
});
// client.AKYLUS_RAFFLE = setInterval(() => client.send("#akylus_", "!raffle 10k 600"), 27e5);
process.on("beforeExit", () => {
client.CytubeClient.destroy();
client.DiscordClient.destroy();
fs.writeFileSync("config.json", JSON.stringify(client.CONFIG, null, 2));
});
client.APPLE_RAFFLE_ENABLED = false;
client.APPLE_RAFFLE_INTERVAL = setInterval(() => {
(client.APPLE_RAFFLE_ENABLED) && client.send("#appledcs", "!multiraffle 10000 600");
}, 3600000);
})(); | t.reply(":z message too long.");
console.log("CMD REQUEST FAILED - MESSAGE TOO LONG", args.join(" ").length);
}
else | conditional_block |
jpt_location.py | import dataclasses
import time
from typing import Optional, List, Tuple
import jpt
import numpy as np
import pybullet
import tf
import pycram.designators.location_designator
import pycram.task
from pycram.costmaps import OccupancyCostmap, plot_grid
from pycram.plan_failures import PlanFailure
class JPTCostmapLocation(pycram.designators.location_designator.CostmapLocation):
"""Costmap Locations using Joint Probability Trees (JPTs).
JPT costmaps are trained to model the dependency with a robot position relative to the object, the robots type,
the objects type, the robot torso height, and the grasp parameters.
Solutions to the problem definitions are chosen in such a way that the success probability is highest.
"""
@dataclasses.dataclass
class Location(pycram.designators.location_designator.LocationDesignatorDescription.Location):
pose: Tuple[List[float], List[float]]
reachable_arm: str
torso_height: float
grasp: str
def __init__(self, target, reachable_for=None, reachable_arm=None,
model: Optional[jpt.trees.JPT] = None, path: Optional[str] = None, resolver=None):
"""
Create a JPT Costmap
:param target: The target object
:param reachable_for: The robot to grab the object with
:param reachable_arm: The arm to use
:param model: The JPT model as a loaded tree in memory, either model or path must be set
:param path: The path to the JPT model, either model or path must be set
"""
super().__init__(target, reachable_for, None, reachable_arm, resolver)
# check if arguments are plausible
if (not model and not path) or (model and path):
raise ValueError("Either model or path must be set.")
# set model
if model:
self.model = model
# load model from path
if path:
self.model = jpt.trees.JPT.load(path)
# initialize member for visualized objects
self.visual_ids: List[int] = []
def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:
"""
Create a list of boxes that can be used as evidences for a jpt. The list of boxes describe areas where the
robot can stand.
:return: List of evidences describing the found boxes
"""
# create Occupancy costmap for the target object
position, orientation = self.target.get_position_and_orientation()
position = list(position)
position[-1] = 0
ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,
origin=(position, orientation))
# ocm.visualize()
# working on a copy of the costmap, since found rectangles are deleted
map = np.copy(ocm.map)
# initialize result
queries = []
origin = np.array([ocm.height/2, ocm.width/2])
# for every index pair (i, j) in the occupancy map
for i in range(0, map.shape[0]):
for j in range(0, map.shape[1]):
# if this index has not been used yet
if map[i][j] > 0:
# get consecutive box
width = ocm._find_consectuive_line((i, j), map)
height = ocm._find_max_box_height((i, j), width, map)
# mark box as used
map[i:i+height, j:j+width] = 0
# calculate to coordinates relative to the objects pose
pose = np.array([i, j])
lower_corner = (pose - origin) * ocm.resolution
upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution
rectangle = np.array([lower_corner, upper_corner]).T
# transform to jpt query
query = self.model.bind({"x": list(rectangle[0]), "y": list(rectangle[1])})
queries.append(query)
return queries
def create_evidence(self, use_success=True) -> jpt.variables.LabelAssignment:
"""
Create evidence usable for JPTs where type and status are set if wanted.
:param use_success: Rather to set success or not
:return: The usable label-assignment
"""
evidence = dict()
evidence["type"] = {self.target.type}
if use_success:
evidence["status"] = {"SUCCEEDED"}
return self.model.bind(evidence)
def sample(self, amount: int = 1) -> np.ndarray:
"""
Sample from the locations that fit the CostMap and are not occupied.
:param amount: The amount of samples to draw
:return: A numpy array containing the samples drawn from the tree.
"""
evidence = self.create_evidence()
locations = self.evidence_from_occupancy_costmap()
solutions = []
for location in locations:
for variable, value in evidence.items():
location[variable] = value
for leaf in self.model.apply(location):
if leaf.probability(location) == 0:
continue
altered_leaf = leaf.conditional_leaf(location)
success_probability = altered_leaf.probability(location)
_, mpe_state = altered_leaf.mpe(self.model.minimal_distances)
location["grasp"] = mpe_state["grasp"]
location["arm"] = mpe_state["arm"]
location["relative torso height"] = mpe_state["relative torso height"]
location["x"] = mpe_state["x"]
location["y"] = mpe_state["y"]
solutions.append((location, success_probability, leaf.prior))
solutions = sorted(solutions, key=lambda x: x[1], reverse=True)
best_solution = solutions[0]
conditional_model = self.model.conditional_jpt(best_solution[0])
# conditional_model.plot(plotvars=conditional_model.variables)
return conditional_model.sample(amount)
def | (self, sample: np.ndarray) -> Location:
"""
Convert a numpy array sampled from the JPT to a costmap-location
:param sample: The drawn sample
:return: The usable costmap-location
"""
sample_dict = {variable.name: value for variable, value in zip(self.model.variables, sample)}
target_x, target_y, target_z = self.target.pose
pose = [target_x + sample_dict["x"], target_y + sample_dict["y"], 0]
angle = np.arctan2(pose[1] - target_y, pose[0] - target_x) + np.pi
orientation = list(tf.transformations.quaternion_from_euler(0, 0, angle, axes="sxyz"))
torso_height = np.clip(target_z - sample_dict["relative torso height"], 0, 0.33)
result = self.Location((pose, orientation), sample_dict["arm"], torso_height, sample_dict["grasp"])
return result
def __iter__(self):
samples = self.sample(200)
for sample in samples:
yield self.sample_to_location(sample)
def visualize(self):
"""
Plot the possible areas to stand in the BulletWorld. The opacity is the probability of success.
"""
evidence = self.create_evidence(use_success=False)
conditional_model = self.model.conditional_jpt(evidence)
for leaf in conditional_model.leaves.values():
success = leaf.distributions["status"].p({"SUCCEEDED"})
if success == 0:
continue
x_intervals = leaf.distributions["x"].cdf.intervals
y_intervals = leaf.distributions["y"].cdf.intervals
x_range = np.array([x_intervals[0].upper, x_intervals[-1].lower])
y_range = np.array([y_intervals[0].upper, y_intervals[-1].lower])
center = np.array([sum(x_range) / 2, sum(y_range) / 2])
visual = pybullet.createVisualShape(pybullet.GEOM_BOX,
halfExtents=[(x_range[1] - x_range[0]) / 2,
(y_range[1] - y_range[0]) / 2, 0.001],
rgbaColor=[1, 0, 0, success],
visualFramePosition=[*center, 0])
self.visual_ids.append(visual)
for id_list in np.array_split(np.array(self.visual_ids), np.ceil(len(self.visual_ids) / 127)):
# Dummy paramater since these are needed to spawn visual shapes as a multibody.
link_poses = [[0, 0, 0] for c in id_list]
link_orientations = [[0, 0, 0, 1] for c in id_list]
link_masses = [1.0 for c in id_list]
link_parent = [0 for c in id_list]
link_joints = [pybullet.JOINT_FIXED for c in id_list]
link_collision = [-1 for c in id_list]
link_joint_axis = [[1, 0, 0] for c in id_list]
# The position at which the multibody will be spawned. Offset such that
# the origin referes to the centre of the costmap.
origin_pose = self.target.get_position_and_orientation()
base_position = list(origin_pose[0])
base_position[2] = 0
map_obj = pybullet.createMultiBody(baseVisualShapeIndex=-1, linkVisualShapeIndices=id_list,
basePosition=base_position, baseOrientation=origin_pose[1],
linkPositions=link_poses,
linkMasses=link_masses, linkOrientations=link_orientations,
linkInertialFramePositions=link_poses,
linkInertialFrameOrientations=link_orientations,
linkParentIndices=link_parent,
linkJointTypes=link_joints, linkJointAxis=link_joint_axis,
linkCollisionShapeIndices=link_collision)
self.visual_ids.append(map_obj)
def close_visualization(self) -> None:
"""
Close all plotted objects.
"""
for id in self.visual_ids:
pybullet.removeBody(id)
self.visual_ids = []
| sample_to_location | identifier_name |
jpt_location.py | import dataclasses
import time
from typing import Optional, List, Tuple
import jpt
import numpy as np
import pybullet
import tf
import pycram.designators.location_designator
import pycram.task
from pycram.costmaps import OccupancyCostmap, plot_grid
from pycram.plan_failures import PlanFailure
class JPTCostmapLocation(pycram.designators.location_designator.CostmapLocation):
"""Costmap Locations using Joint Probability Trees (JPTs).
JPT costmaps are trained to model the dependency with a robot position relative to the object, the robots type,
the objects type, the robot torso height, and the grasp parameters.
Solutions to the problem definitions are chosen in such a way that the success probability is highest.
"""
@dataclasses.dataclass
class Location(pycram.designators.location_designator.LocationDesignatorDescription.Location):
pose: Tuple[List[float], List[float]]
reachable_arm: str
torso_height: float
grasp: str
def __init__(self, target, reachable_for=None, reachable_arm=None,
model: Optional[jpt.trees.JPT] = None, path: Optional[str] = None, resolver=None):
"""
Create a JPT Costmap
:param target: The target object
:param reachable_for: The robot to grab the object with
:param reachable_arm: The arm to use
:param model: The JPT model as a loaded tree in memory, either model or path must be set
:param path: The path to the JPT model, either model or path must be set
"""
super().__init__(target, reachable_for, None, reachable_arm, resolver)
# check if arguments are plausible
if (not model and not path) or (model and path):
raise ValueError("Either model or path must be set.")
# set model
if model:
|
# load model from path
if path:
self.model = jpt.trees.JPT.load(path)
# initialize member for visualized objects
self.visual_ids: List[int] = []
def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:
"""
Create a list of boxes that can be used as evidences for a jpt. The list of boxes describe areas where the
robot can stand.
:return: List of evidences describing the found boxes
"""
# create Occupancy costmap for the target object
position, orientation = self.target.get_position_and_orientation()
position = list(position)
position[-1] = 0
ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,
origin=(position, orientation))
# ocm.visualize()
# working on a copy of the costmap, since found rectangles are deleted
map = np.copy(ocm.map)
# initialize result
queries = []
origin = np.array([ocm.height/2, ocm.width/2])
# for every index pair (i, j) in the occupancy map
for i in range(0, map.shape[0]):
for j in range(0, map.shape[1]):
# if this index has not been used yet
if map[i][j] > 0:
# get consecutive box
width = ocm._find_consectuive_line((i, j), map)
height = ocm._find_max_box_height((i, j), width, map)
# mark box as used
map[i:i+height, j:j+width] = 0
# calculate to coordinates relative to the objects pose
pose = np.array([i, j])
lower_corner = (pose - origin) * ocm.resolution
upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution
rectangle = np.array([lower_corner, upper_corner]).T
# transform to jpt query
query = self.model.bind({"x": list(rectangle[0]), "y": list(rectangle[1])})
queries.append(query)
return queries
def create_evidence(self, use_success=True) -> jpt.variables.LabelAssignment:
"""
Create evidence usable for JPTs where type and status are set if wanted.
:param use_success: Rather to set success or not
:return: The usable label-assignment
"""
evidence = dict()
evidence["type"] = {self.target.type}
if use_success:
evidence["status"] = {"SUCCEEDED"}
return self.model.bind(evidence)
def sample(self, amount: int = 1) -> np.ndarray:
"""
Sample from the locations that fit the CostMap and are not occupied.
:param amount: The amount of samples to draw
:return: A numpy array containing the samples drawn from the tree.
"""
evidence = self.create_evidence()
locations = self.evidence_from_occupancy_costmap()
solutions = []
for location in locations:
for variable, value in evidence.items():
location[variable] = value
for leaf in self.model.apply(location):
if leaf.probability(location) == 0:
continue
altered_leaf = leaf.conditional_leaf(location)
success_probability = altered_leaf.probability(location)
_, mpe_state = altered_leaf.mpe(self.model.minimal_distances)
location["grasp"] = mpe_state["grasp"]
location["arm"] = mpe_state["arm"]
location["relative torso height"] = mpe_state["relative torso height"]
location["x"] = mpe_state["x"]
location["y"] = mpe_state["y"]
solutions.append((location, success_probability, leaf.prior))
solutions = sorted(solutions, key=lambda x: x[1], reverse=True)
best_solution = solutions[0]
conditional_model = self.model.conditional_jpt(best_solution[0])
# conditional_model.plot(plotvars=conditional_model.variables)
return conditional_model.sample(amount)
def sample_to_location(self, sample: np.ndarray) -> Location:
"""
Convert a numpy array sampled from the JPT to a costmap-location
:param sample: The drawn sample
:return: The usable costmap-location
"""
sample_dict = {variable.name: value for variable, value in zip(self.model.variables, sample)}
target_x, target_y, target_z = self.target.pose
pose = [target_x + sample_dict["x"], target_y + sample_dict["y"], 0]
angle = np.arctan2(pose[1] - target_y, pose[0] - target_x) + np.pi
orientation = list(tf.transformations.quaternion_from_euler(0, 0, angle, axes="sxyz"))
torso_height = np.clip(target_z - sample_dict["relative torso height"], 0, 0.33)
result = self.Location((pose, orientation), sample_dict["arm"], torso_height, sample_dict["grasp"])
return result
def __iter__(self):
samples = self.sample(200)
for sample in samples:
yield self.sample_to_location(sample)
def visualize(self):
"""
Plot the possible areas to stand in the BulletWorld. The opacity is the probability of success.
"""
evidence = self.create_evidence(use_success=False)
conditional_model = self.model.conditional_jpt(evidence)
for leaf in conditional_model.leaves.values():
success = leaf.distributions["status"].p({"SUCCEEDED"})
if success == 0:
continue
x_intervals = leaf.distributions["x"].cdf.intervals
y_intervals = leaf.distributions["y"].cdf.intervals
x_range = np.array([x_intervals[0].upper, x_intervals[-1].lower])
y_range = np.array([y_intervals[0].upper, y_intervals[-1].lower])
center = np.array([sum(x_range) / 2, sum(y_range) / 2])
visual = pybullet.createVisualShape(pybullet.GEOM_BOX,
halfExtents=[(x_range[1] - x_range[0]) / 2,
(y_range[1] - y_range[0]) / 2, 0.001],
rgbaColor=[1, 0, 0, success],
visualFramePosition=[*center, 0])
self.visual_ids.append(visual)
for id_list in np.array_split(np.array(self.visual_ids), np.ceil(len(self.visual_ids) / 127)):
# Dummy paramater since these are needed to spawn visual shapes as a multibody.
link_poses = [[0, 0, 0] for c in id_list]
link_orientations = [[0, 0, 0, 1] for c in id_list]
link_masses = [1.0 for c in id_list]
link_parent = [0 for c in id_list]
link_joints = [pybullet.JOINT_FIXED for c in id_list]
link_collision = [-1 for c in id_list]
link_joint_axis = [[1, 0, 0] for c in id_list]
# The position at which the multibody will be spawned. Offset such that
# the origin referes to the centre of the costmap.
origin_pose = self.target.get_position_and_orientation()
base_position = list(origin_pose[0])
base_position[2] = 0
map_obj = pybullet.createMultiBody(baseVisualShapeIndex=-1, linkVisualShapeIndices=id_list,
basePosition=base_position, baseOrientation=origin_pose[1],
linkPositions=link_poses,
linkMasses=link_masses, linkOrientations=link_orientations,
linkInertialFramePositions=link_poses,
linkInertialFrameOrientations=link_orientations,
linkParentIndices=link_parent,
linkJointTypes=link_joints, linkJointAxis=link_joint_axis,
linkCollisionShapeIndices=link_collision)
self.visual_ids.append(map_obj)
def close_visualization(self) -> None:
"""
Close all plotted objects.
"""
for id in self.visual_ids:
pybullet.removeBody(id)
self.visual_ids = []
| self.model = model | conditional_block |
jpt_location.py | import dataclasses
import time
from typing import Optional, List, Tuple
import jpt
import numpy as np
import pybullet
import tf
import pycram.designators.location_designator
import pycram.task
from pycram.costmaps import OccupancyCostmap, plot_grid
from pycram.plan_failures import PlanFailure
class JPTCostmapLocation(pycram.designators.location_designator.CostmapLocation):
"""Costmap Locations using Joint Probability Trees (JPTs).
JPT costmaps are trained to model the dependency with a robot position relative to the object, the robots type,
the objects type, the robot torso height, and the grasp parameters.
Solutions to the problem definitions are chosen in such a way that the success probability is highest.
"""
@dataclasses.dataclass
class Location(pycram.designators.location_designator.LocationDesignatorDescription.Location):
pose: Tuple[List[float], List[float]]
reachable_arm: str
torso_height: float
grasp: str
def __init__(self, target, reachable_for=None, reachable_arm=None,
model: Optional[jpt.trees.JPT] = None, path: Optional[str] = None, resolver=None):
"""
Create a JPT Costmap
:param target: The target object
:param reachable_for: The robot to grab the object with
:param reachable_arm: The arm to use
:param model: The JPT model as a loaded tree in memory, either model or path must be set
:param path: The path to the JPT model, either model or path must be set
"""
super().__init__(target, reachable_for, None, reachable_arm, resolver)
# check if arguments are plausible
if (not model and not path) or (model and path):
raise ValueError("Either model or path must be set.")
# set model
if model:
self.model = model
# load model from path
if path:
self.model = jpt.trees.JPT.load(path)
# initialize member for visualized objects
self.visual_ids: List[int] = []
def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:
"""
Create a list of boxes that can be used as evidences for a jpt. The list of boxes describe areas where the
robot can stand.
:return: List of evidences describing the found boxes
"""
# create Occupancy costmap for the target object
position, orientation = self.target.get_position_and_orientation()
position = list(position)
position[-1] = 0
ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,
origin=(position, orientation))
# ocm.visualize()
# working on a copy of the costmap, since found rectangles are deleted
map = np.copy(ocm.map)
# initialize result
queries = []
origin = np.array([ocm.height/2, ocm.width/2])
# for every index pair (i, j) in the occupancy map
for i in range(0, map.shape[0]):
for j in range(0, map.shape[1]):
# if this index has not been used yet
if map[i][j] > 0:
# get consecutive box
width = ocm._find_consectuive_line((i, j), map)
height = ocm._find_max_box_height((i, j), width, map)
# mark box as used
map[i:i+height, j:j+width] = 0
# calculate to coordinates relative to the objects pose
pose = np.array([i, j])
lower_corner = (pose - origin) * ocm.resolution
upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution
rectangle = np.array([lower_corner, upper_corner]).T
# transform to jpt query
query = self.model.bind({"x": list(rectangle[0]), "y": list(rectangle[1])})
queries.append(query)
return queries
def create_evidence(self, use_success=True) -> jpt.variables.LabelAssignment:
|
def sample(self, amount: int = 1) -> np.ndarray:
"""
Sample from the locations that fit the CostMap and are not occupied.
:param amount: The amount of samples to draw
:return: A numpy array containing the samples drawn from the tree.
"""
evidence = self.create_evidence()
locations = self.evidence_from_occupancy_costmap()
solutions = []
for location in locations:
for variable, value in evidence.items():
location[variable] = value
for leaf in self.model.apply(location):
if leaf.probability(location) == 0:
continue
altered_leaf = leaf.conditional_leaf(location)
success_probability = altered_leaf.probability(location)
_, mpe_state = altered_leaf.mpe(self.model.minimal_distances)
location["grasp"] = mpe_state["grasp"]
location["arm"] = mpe_state["arm"]
location["relative torso height"] = mpe_state["relative torso height"]
location["x"] = mpe_state["x"]
location["y"] = mpe_state["y"]
solutions.append((location, success_probability, leaf.prior))
solutions = sorted(solutions, key=lambda x: x[1], reverse=True)
best_solution = solutions[0]
conditional_model = self.model.conditional_jpt(best_solution[0])
# conditional_model.plot(plotvars=conditional_model.variables)
return conditional_model.sample(amount)
def sample_to_location(self, sample: np.ndarray) -> Location:
"""
Convert a numpy array sampled from the JPT to a costmap-location
:param sample: The drawn sample
:return: The usable costmap-location
"""
sample_dict = {variable.name: value for variable, value in zip(self.model.variables, sample)}
target_x, target_y, target_z = self.target.pose
pose = [target_x + sample_dict["x"], target_y + sample_dict["y"], 0]
angle = np.arctan2(pose[1] - target_y, pose[0] - target_x) + np.pi
orientation = list(tf.transformations.quaternion_from_euler(0, 0, angle, axes="sxyz"))
torso_height = np.clip(target_z - sample_dict["relative torso height"], 0, 0.33)
result = self.Location((pose, orientation), sample_dict["arm"], torso_height, sample_dict["grasp"])
return result
def __iter__(self):
samples = self.sample(200)
for sample in samples:
yield self.sample_to_location(sample)
def visualize(self):
"""
Plot the possible areas to stand in the BulletWorld. The opacity is the probability of success.
"""
evidence = self.create_evidence(use_success=False)
conditional_model = self.model.conditional_jpt(evidence)
for leaf in conditional_model.leaves.values():
success = leaf.distributions["status"].p({"SUCCEEDED"})
if success == 0:
continue
x_intervals = leaf.distributions["x"].cdf.intervals
y_intervals = leaf.distributions["y"].cdf.intervals
x_range = np.array([x_intervals[0].upper, x_intervals[-1].lower])
y_range = np.array([y_intervals[0].upper, y_intervals[-1].lower])
center = np.array([sum(x_range) / 2, sum(y_range) / 2])
visual = pybullet.createVisualShape(pybullet.GEOM_BOX,
halfExtents=[(x_range[1] - x_range[0]) / 2,
(y_range[1] - y_range[0]) / 2, 0.001],
rgbaColor=[1, 0, 0, success],
visualFramePosition=[*center, 0])
self.visual_ids.append(visual)
for id_list in np.array_split(np.array(self.visual_ids), np.ceil(len(self.visual_ids) / 127)):
# Dummy paramater since these are needed to spawn visual shapes as a multibody.
link_poses = [[0, 0, 0] for c in id_list]
link_orientations = [[0, 0, 0, 1] for c in id_list]
link_masses = [1.0 for c in id_list]
link_parent = [0 for c in id_list]
link_joints = [pybullet.JOINT_FIXED for c in id_list]
link_collision = [-1 for c in id_list]
link_joint_axis = [[1, 0, 0] for c in id_list]
# The position at which the multibody will be spawned. Offset such that
# the origin referes to the centre of the costmap.
origin_pose = self.target.get_position_and_orientation()
base_position = list(origin_pose[0])
base_position[2] = 0
map_obj = pybullet.createMultiBody(baseVisualShapeIndex=-1, linkVisualShapeIndices=id_list,
basePosition=base_position, baseOrientation=origin_pose[1],
linkPositions=link_poses,
linkMasses=link_masses, linkOrientations=link_orientations,
linkInertialFramePositions=link_poses,
linkInertialFrameOrientations=link_orientations,
linkParentIndices=link_parent,
linkJointTypes=link_joints, linkJointAxis=link_joint_axis,
linkCollisionShapeIndices=link_collision)
self.visual_ids.append(map_obj)
def close_visualization(self) -> None:
"""
Close all plotted objects.
"""
for id in self.visual_ids:
pybullet.removeBody(id)
self.visual_ids = []
| """
Create evidence usable for JPTs where type and status are set if wanted.
:param use_success: Rather to set success or not
:return: The usable label-assignment
"""
evidence = dict()
evidence["type"] = {self.target.type}
if use_success:
evidence["status"] = {"SUCCEEDED"}
return self.model.bind(evidence) | identifier_body |
jpt_location.py | import dataclasses
import time
from typing import Optional, List, Tuple
import jpt | import pycram.designators.location_designator
import pycram.task
from pycram.costmaps import OccupancyCostmap, plot_grid
from pycram.plan_failures import PlanFailure
class JPTCostmapLocation(pycram.designators.location_designator.CostmapLocation):
"""Costmap Locations using Joint Probability Trees (JPTs).
JPT costmaps are trained to model the dependency with a robot position relative to the object, the robots type,
the objects type, the robot torso height, and the grasp parameters.
Solutions to the problem definitions are chosen in such a way that the success probability is highest.
"""
@dataclasses.dataclass
class Location(pycram.designators.location_designator.LocationDesignatorDescription.Location):
pose: Tuple[List[float], List[float]]
reachable_arm: str
torso_height: float
grasp: str
def __init__(self, target, reachable_for=None, reachable_arm=None,
model: Optional[jpt.trees.JPT] = None, path: Optional[str] = None, resolver=None):
"""
Create a JPT Costmap
:param target: The target object
:param reachable_for: The robot to grab the object with
:param reachable_arm: The arm to use
:param model: The JPT model as a loaded tree in memory, either model or path must be set
:param path: The path to the JPT model, either model or path must be set
"""
super().__init__(target, reachable_for, None, reachable_arm, resolver)
# check if arguments are plausible
if (not model and not path) or (model and path):
raise ValueError("Either model or path must be set.")
# set model
if model:
self.model = model
# load model from path
if path:
self.model = jpt.trees.JPT.load(path)
# initialize member for visualized objects
self.visual_ids: List[int] = []
def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:
"""
Create a list of boxes that can be used as evidences for a jpt. The list of boxes describe areas where the
robot can stand.
:return: List of evidences describing the found boxes
"""
# create Occupancy costmap for the target object
position, orientation = self.target.get_position_and_orientation()
position = list(position)
position[-1] = 0
ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,
origin=(position, orientation))
# ocm.visualize()
# working on a copy of the costmap, since found rectangles are deleted
map = np.copy(ocm.map)
# initialize result
queries = []
origin = np.array([ocm.height/2, ocm.width/2])
# for every index pair (i, j) in the occupancy map
for i in range(0, map.shape[0]):
for j in range(0, map.shape[1]):
# if this index has not been used yet
if map[i][j] > 0:
# get consecutive box
width = ocm._find_consectuive_line((i, j), map)
height = ocm._find_max_box_height((i, j), width, map)
# mark box as used
map[i:i+height, j:j+width] = 0
# calculate to coordinates relative to the objects pose
pose = np.array([i, j])
lower_corner = (pose - origin) * ocm.resolution
upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution
rectangle = np.array([lower_corner, upper_corner]).T
# transform to jpt query
query = self.model.bind({"x": list(rectangle[0]), "y": list(rectangle[1])})
queries.append(query)
return queries
def create_evidence(self, use_success=True) -> jpt.variables.LabelAssignment:
"""
Create evidence usable for JPTs where type and status are set if wanted.
:param use_success: Rather to set success or not
:return: The usable label-assignment
"""
evidence = dict()
evidence["type"] = {self.target.type}
if use_success:
evidence["status"] = {"SUCCEEDED"}
return self.model.bind(evidence)
def sample(self, amount: int = 1) -> np.ndarray:
"""
Sample from the locations that fit the CostMap and are not occupied.
:param amount: The amount of samples to draw
:return: A numpy array containing the samples drawn from the tree.
"""
evidence = self.create_evidence()
locations = self.evidence_from_occupancy_costmap()
solutions = []
for location in locations:
for variable, value in evidence.items():
location[variable] = value
for leaf in self.model.apply(location):
if leaf.probability(location) == 0:
continue
altered_leaf = leaf.conditional_leaf(location)
success_probability = altered_leaf.probability(location)
_, mpe_state = altered_leaf.mpe(self.model.minimal_distances)
location["grasp"] = mpe_state["grasp"]
location["arm"] = mpe_state["arm"]
location["relative torso height"] = mpe_state["relative torso height"]
location["x"] = mpe_state["x"]
location["y"] = mpe_state["y"]
solutions.append((location, success_probability, leaf.prior))
solutions = sorted(solutions, key=lambda x: x[1], reverse=True)
best_solution = solutions[0]
conditional_model = self.model.conditional_jpt(best_solution[0])
# conditional_model.plot(plotvars=conditional_model.variables)
return conditional_model.sample(amount)
def sample_to_location(self, sample: np.ndarray) -> Location:
"""
Convert a numpy array sampled from the JPT to a costmap-location
:param sample: The drawn sample
:return: The usable costmap-location
"""
sample_dict = {variable.name: value for variable, value in zip(self.model.variables, sample)}
target_x, target_y, target_z = self.target.pose
pose = [target_x + sample_dict["x"], target_y + sample_dict["y"], 0]
angle = np.arctan2(pose[1] - target_y, pose[0] - target_x) + np.pi
orientation = list(tf.transformations.quaternion_from_euler(0, 0, angle, axes="sxyz"))
torso_height = np.clip(target_z - sample_dict["relative torso height"], 0, 0.33)
result = self.Location((pose, orientation), sample_dict["arm"], torso_height, sample_dict["grasp"])
return result
def __iter__(self):
samples = self.sample(200)
for sample in samples:
yield self.sample_to_location(sample)
def visualize(self):
"""
Plot the possible areas to stand in the BulletWorld. The opacity is the probability of success.
"""
evidence = self.create_evidence(use_success=False)
conditional_model = self.model.conditional_jpt(evidence)
for leaf in conditional_model.leaves.values():
success = leaf.distributions["status"].p({"SUCCEEDED"})
if success == 0:
continue
x_intervals = leaf.distributions["x"].cdf.intervals
y_intervals = leaf.distributions["y"].cdf.intervals
x_range = np.array([x_intervals[0].upper, x_intervals[-1].lower])
y_range = np.array([y_intervals[0].upper, y_intervals[-1].lower])
center = np.array([sum(x_range) / 2, sum(y_range) / 2])
visual = pybullet.createVisualShape(pybullet.GEOM_BOX,
halfExtents=[(x_range[1] - x_range[0]) / 2,
(y_range[1] - y_range[0]) / 2, 0.001],
rgbaColor=[1, 0, 0, success],
visualFramePosition=[*center, 0])
self.visual_ids.append(visual)
for id_list in np.array_split(np.array(self.visual_ids), np.ceil(len(self.visual_ids) / 127)):
# Dummy paramater since these are needed to spawn visual shapes as a multibody.
link_poses = [[0, 0, 0] for c in id_list]
link_orientations = [[0, 0, 0, 1] for c in id_list]
link_masses = [1.0 for c in id_list]
link_parent = [0 for c in id_list]
link_joints = [pybullet.JOINT_FIXED for c in id_list]
link_collision = [-1 for c in id_list]
link_joint_axis = [[1, 0, 0] for c in id_list]
# The position at which the multibody will be spawned. Offset such that
# the origin referes to the centre of the costmap.
origin_pose = self.target.get_position_and_orientation()
base_position = list(origin_pose[0])
base_position[2] = 0
map_obj = pybullet.createMultiBody(baseVisualShapeIndex=-1, linkVisualShapeIndices=id_list,
basePosition=base_position, baseOrientation=origin_pose[1],
linkPositions=link_poses,
linkMasses=link_masses, linkOrientations=link_orientations,
linkInertialFramePositions=link_poses,
linkInertialFrameOrientations=link_orientations,
linkParentIndices=link_parent,
linkJointTypes=link_joints, linkJointAxis=link_joint_axis,
linkCollisionShapeIndices=link_collision)
self.visual_ids.append(map_obj)
def close_visualization(self) -> None:
"""
Close all plotted objects.
"""
for id in self.visual_ids:
pybullet.removeBody(id)
self.visual_ids = [] | import numpy as np
import pybullet
import tf
| random_line_split |
main.go | package core
import (
"context"
"crypto/tls"
"fmt"
genesisUploader "github.com/MinterTeam/explorer-genesis-uploader/core"
genesisEnv "github.com/MinterTeam/explorer-genesis-uploader/env"
"github.com/MinterTeam/minter-explorer-api/v2/coins"
"github.com/MinterTeam/minter-explorer-extender/v2/address"
"github.com/MinterTeam/minter-explorer-extender/v2/balance"
"github.com/MinterTeam/minter-explorer-extender/v2/block"
"github.com/MinterTeam/minter-explorer-extender/v2/broadcast"
"github.com/MinterTeam/minter-explorer-extender/v2/coin"
"github.com/MinterTeam/minter-explorer-extender/v2/env"
"github.com/MinterTeam/minter-explorer-extender/v2/events"
"github.com/MinterTeam/minter-explorer-extender/v2/liquidity_pool"
"github.com/MinterTeam/minter-explorer-extender/v2/metrics"
"github.com/MinterTeam/minter-explorer-extender/v2/models"
"github.com/MinterTeam/minter-explorer-extender/v2/orderbook"
"github.com/MinterTeam/minter-explorer-extender/v2/transaction"
"github.com/MinterTeam/minter-explorer-extender/v2/validator"
"github.com/MinterTeam/minter-explorer-tools/v4/helpers"
"github.com/MinterTeam/minter-go-sdk/v2/api/grpc_client"
"github.com/MinterTeam/node-grpc-gateway/api_pb"
"github.com/go-pg/pg/v10"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/status"
"math"
"os"
"regexp"
"strings"
"time"
)
const ChasingModDiff = 121
var Version string
type Extender struct {
Metrics *metrics.Metrics
env *env.ExtenderEnvironment
nodeApi *grpc_client.Client
blockService *block.Service
addressService *address.Service
blockRepository *block.Repository
validatorService *validator.Service
validatorRepository *validator.Repository
transactionService *transaction.Service
eventService *events.Service
balanceService *balance.Service
coinService *coin.Service
broadcastService *broadcast.Service
orderBookService *orderbook.Service
chasingMode bool
startBlockHeight uint64
currentNodeHeight uint64
lastLPSnapshotHeight uint64
log *logrus.Entry
lpSnapshotChannel chan *api_pb.BlockResponse
lpWorkerChannel chan *api_pb.BlockResponse
orderBookChannel chan *api_pb.BlockResponse
}
type ExtenderElapsedTime struct {
Height uint64
GettingBlock time.Duration
GettingEvents time.Duration
HandleCoinsFromTransactions time.Duration
HandleAddressesFromResponses time.Duration
HandleBlockResponse time.Duration
Total time.Duration
}
type eventHook struct {
beforeTime time.Time
log *logrus.Logger
}
func (eh eventHook) BeforeQuery(ctx context.Context, event *pg.QueryEvent) (context.Context, error) {
if event.Stash == nil {
event.Stash = make(map[interface{}]interface{})
}
event.Stash["query_time"] = time.Now()
return ctx, nil
}
func (eh eventHook) AfterQuery(ctx context.Context, event *pg.QueryEvent) error {
critical := time.Millisecond * 500
result := time.Duration(0)
if event.Stash != nil {
if v, ok := event.Stash["query_time"]; ok {
result = time.Now().Sub(v.(time.Time))
}
}
if result > critical {
bigQueryLog, err := os.OpenFile("big_query.log", os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
eh.log.Error("error opening file: %v", err)
}
// don't forget to close it
defer bigQueryLog.Close()
eh.log.SetReportCaller(false)
eh.log.SetFormatter(&logrus.JSONFormatter{})
eh.log.SetOutput(bigQueryLog)
q, err := event.UnformattedQuery()
if err != nil {
eh.log.Error(err)
}
r := regexp.MustCompile("\\s+")
replace := r.ReplaceAllString(fmt.Sprintf("%v", string(q)), " ")
eh.log.WithFields(logrus.Fields{
"query": strings.TrimSpace(replace),
"time": fmt.Sprintf("%s", result),
}).Error("DB query time is too height")
}
return nil
}
func NewExtender(env *env.ExtenderEnvironment) *Extender {
//Init Logger
logger := logrus.New()
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetOutput(os.Stdout)
logger.SetReportCaller(true)
if env.Debug {
logger.SetFormatter(&logrus.TextFormatter{
DisableColors: false,
FullTimestamp: true,
})
} else {
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetLevel(logrus.WarnLevel)
}
contextLogger := logger.WithFields(logrus.Fields{
"version": Version,
"app": "Minter Explorer Extender",
})
//Init DB
pgOptions := &pg.Options{
Addr: fmt.Sprintf("%s:%s", env.DbHost, env.DbPort),
User: env.DbUser,
Password: env.DbPassword,
Database: env.DbName,
}
if os.Getenv("POSTGRES_SSL_ENABLED") == "true" {
pgOptions.TLSConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
//hookImpl := eventHook{
// log: logrus.New(),
// beforeTime: time.Now(),
//}
db := pg.Connect(pgOptions)
//db.AddQueryHook(hookImpl)
uploader := genesisUploader.New(genesisEnv.Config{
Debug: false,
PostgresHost: env.DbHost,
PostgresPort: env.DbPort,
PostgresDB: env.DbName,
PostgresUser: env.DbUser,
PostgresPassword: env.DbPassword,
PostgresSSLEnabled: os.Getenv("POSTGRES_SSL_ENABLED") == "true",
MinterBaseCoin: env.BaseCoin,
NodeGrpc: env.NodeApi,
AddressChunkSize: uint64(env.AddrChunkSize),
CoinsChunkSize: 1000,
BalanceChunkSize: 10000,
StakeChunkSize: uint64(env.StakeChunkSize),
ValidatorChunkSize: uint64(env.StakeChunkSize),
})
err := uploader.Do()
if err != nil {
logger.Warn(err)
}
//api
nodeApi, err := grpc_client.New(env.NodeApi)
if err != nil {
panic(err)
}
nodeStatus, err := nodeApi.Status()
if err != nil {
panic(err)
}
// Repositories
blockRepository := block.NewRepository(db)
validatorRepository := validator.NewRepository(db, contextLogger)
transactionRepository := transaction.NewRepository(db)
addressRepository := address.NewRepository(db)
coinRepository := coin.NewRepository(db)
eventsRepository := events.NewRepository(db)
balanceRepository := balance.NewRepository(db)
liquidityPoolRepository := liquidity_pool.NewRepository(db)
orderbookRepository := orderbook.NewRepository(db)
coins.GlobalRepository = coins.NewRepository(db) //temporary solution
// Services
addressService := address.NewService(env, addressRepository, contextLogger)
broadcastService := broadcast.NewService(env, addressRepository, coinRepository, nodeApi, contextLogger)
balanceService := balance.NewService(env, balanceRepository, nodeApi, addressService, coinRepository, broadcastService, contextLogger)
coinService := coin.NewService(env, nodeApi, coinRepository, addressRepository, contextLogger)
validatorService := validator.NewService(env, nodeApi, validatorRepository, addressRepository, coinRepository, contextLogger)
eventService := events.NewService(env, eventsRepository, validatorRepository, addressRepository, coinRepository, coinService, blockRepository, orderbookRepository, balanceRepository, broadcastService, contextLogger, nodeStatus.InitialHeight+1)
orderBookService := orderbook.NewService(db, addressRepository, liquidityPoolRepository, contextLogger)
return &Extender{
Metrics: metrics.New(),
env: env,
nodeApi: nodeApi,
blockService: block.NewBlockService(blockRepository, validatorRepository, broadcastService),
eventService: eventService,
blockRepository: blockRepository,
validatorService: validatorService,
transactionService: transaction.NewService(env, transactionRepository, addressRepository, validatorRepository, coinRepository, coinService, broadcastService, contextLogger, validatorService.GetUnbondSaverJobChannel(), liquidityPoolRepository, validatorService.GetMoveStakeJobChannel()),
addressService: addressService,
validatorRepository: validatorRepository,
balanceService: balanceService,
coinService: coinService,
broadcastService: broadcastService,
orderBookService: orderBookService,
chasingMode: false,
currentNodeHeight: 0,
startBlockHeight: nodeStatus.InitialHeight + 1,
log: contextLogger,
lpSnapshotChannel: make(chan *api_pb.BlockResponse),
lpWorkerChannel: make(chan *api_pb.BlockResponse),
orderBookChannel: make(chan *api_pb.BlockResponse),
}
}
func (ext *Extender) GetInfo() {
fmt.Printf("%s v%s\n", "Minter Explorer Extender", Version)
}
func (ext *Extender) Run() {
//check connections to node
_, err := ext.nodeApi.Status()
if err == nil {
err = ext.blockRepository.DeleteLastBlockData()
}
if err != nil {
ext.log.Fatal(err)
}
var height uint64
// ----- Workers -----
ext.runWorkers()
lastExplorerBlock, err := ext.blockRepository.GetLastFromDB()
if err != nil && err != pg.ErrNoRows {
ext.log.Fatal(err)
}
if lastExplorerBlock != nil {
height = lastExplorerBlock.ID + 1
ext.blockService.SetBlockCache(lastExplorerBlock)
} else {
height = ext.startBlockHeight
}
for {
eet := ExtenderElapsedTime{
Height: height,
GettingBlock: 0,
GettingEvents: 0,
HandleCoinsFromTransactions: 0,
HandleAddressesFromResponses: 0,
HandleBlockResponse: 0,
Total: 0,
}
start := time.Now()
//ext.findOutChasingMode(height)
//Pulling block data
countStart := time.Now()
blockResponse, err := ext.nodeApi.BlockExtended(height, true, true)
if err != nil {
grpcErr, ok := status.FromError(err)
if !ok {
ext.log.Error(err)
time.Sleep(2 * time.Second)
continue
}
if grpcErr.Message() == "Block not found" || grpcErr.Message() == "Block results not found" {
time.Sleep(2 * time.Second)
continue
}
ext.log.Fatal(err)
}
eet.GettingBlock = time.Since(countStart)
countStart = time.Now()
ext.handleCoinsFromTransactions(blockResponse)
eet.HandleCoinsFromTransactions = time.Since(countStart)
countStart = time.Now()
ext.handleAddressesFromResponses(blockResponse)
eet.HandleAddressesFromResponses = time.Since(countStart)
countStart = time.Now()
ext.handleBlockResponse(blockResponse)
eet.HandleBlockResponse = time.Since(countStart)
ext.balanceService.UpdateChannel() <- blockResponse
go ext.handleEventResponse(height, blockResponse)
if len(blockResponse.Transactions) > 0 {
ext.orderBookChannel <- blockResponse
}
//ext.validatorService.GetUpdateStakesJobChannel() <- height
ext.validatorService.GetUpdateValidatorsJobChannel() <- height
ext.validatorService.GetClearJobChannel() <- height
eet.Total = time.Since(start)
ext.printSpentTimeLog(eet)
height++
}
}
func (ext *Extender) runWorkers() {
// Addresses
for w := 1; w <= ext.env.WrkSaveAddressesCount; w++ {
go ext.addressService.SaveAddressesWorker(ext.addressService.GetSaveAddressesJobChannel())
}
// Transactions
for w := 1; w <= ext.env.WrkSaveTxsCount; w++ {
go ext.transactionService.SaveTransactionsWorker(ext.transactionService.GetSaveTxJobChannel())
}
for w := 1; w <= ext.env.WrkSaveTxsOutputCount; w++ {
go ext.transactionService.SaveTransactionsOutputWorker(ext.transactionService.GetSaveTxsOutputJobChannel())
}
for w := 1; w <= ext.env.WrkSaveInvTxsCount; w++ {
go ext.transactionService.SaveInvalidTransactionsWorker(ext.transactionService.GetSaveInvalidTxsJobChannel())
}
go ext.transactionService.UpdateTxsIndexWorker()
// Validators
for w := 1; w <= ext.env.WrkSaveValidatorTxsCount; w++ {
go ext.transactionService.SaveTxValidatorWorker(ext.transactionService.GetSaveTxValidatorJobChannel())
}
go ext.validatorService.UpdateValidatorsWorker(ext.validatorService.GetUpdateValidatorsJobChannel())
//go ext.validatorService.UpdateStakesWorker(ext.validatorService.GetUpdateStakesJobChannel())
// Events
for w := 1; w <= ext.env.WrkSaveRewardsCount; w++ {
go ext.eventService.SaveRewardsWorker(ext.eventService.GetSaveRewardsJobChannel())
}
for w := 1; w <= ext.env.WrkSaveSlashesCount; w++ {
go ext.eventService.SaveSlashesWorker(ext.eventService.GetSaveSlashesJobChannel())
}
// Balances
go ext.balanceService.BalanceManager()
//Coins
go ext.coinService.UpdateCoinsInfoFromTxsWorker(ext.coinService.GetUpdateCoinsFromTxsJobChannel())
go ext.coinService.UpdateCoinsInfoFromCoinsMap(ext.coinService.GetUpdateCoinsFromCoinsMapJobChannel())
go ext.coinService.UpdateHubInfoWorker()
//Unbonds
go ext.validatorService.UnbondSaverWorker(ext.validatorService.GetUnbondSaverJobChannel())
//Move Stake
go ext.validatorService.MoveStakeWorker(ext.validatorService.GetMoveStakeJobChannel())
go ext.validatorService.ClearMoveStakeAndUnbondWorker(ext.validatorService.GetClearJobChannel())
//OrderBook
go ext.orderBookService.OrderBookWorker(ext.orderBookChannel)
go ext.orderBookService.UpdateOrderBookWorker(ext.orderBookService.UpdateOrderChannel())
//Broadcast
go ext.broadcastService.Manager()
}
func (ext *Extender) handleAddressesFromResponses(blockResponse *api_pb.BlockResponse) {
err := ext.addressService.SaveAddressesFromResponses(blockResponse)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) handleBlockResponse(response *api_pb.BlockResponse) {
// Save validators if not exist
err := ext.validatorService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
// Save block
err = ext.blockService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
ext.linkBlockValidator(response)
//first block don't have validators
if response.TransactionCount > 0 {
ext.handleTransactions(response)
}
}
func (ext *Extender) handleCoinsFromTransactions(block *api_pb.BlockResponse) {
if len(block.Transactions) == 0 {
return
}
err := ext.coinService.HandleCoinsFromBlock(block)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) handleTransactions(response *api_pb.BlockResponse) {
chunksCount := int(math.Ceil(float64(len(response.Transactions)) / float64(ext.env.TxChunkSize)))
for i := 0; i < chunksCount; i++ {
start := ext.env.TxChunkSize * i
end := start + ext.env.TxChunkSize
if end > len(response.Transactions) {
end = len(response.Transactions)
}
layout := "2006-01-02T15:04:05Z"
blockTime, err := time.Parse(layout, response.Time)
if err != nil {
ext.log.Panic(err)
}
ext.saveTransactions(response.Height, blockTime, response.Transactions[start:end])
}
}
func (ext *Extender) handleEventResponse(blockHeight uint64, response *api_pb.BlockResponse) {
if len(response.Events) > 0 {
//Save events
err := ext.eventService.HandleEventResponse(blockHeight, response)
if err != nil {
ext.log.Fatal(err)
}
}
}
func (ext *Extender) linkBlockValidator(response *api_pb.BlockResponse) |
func (ext *Extender) saveTransactions(blockHeight uint64, blockCreatedAt time.Time, transactions []*api_pb.TransactionResponse) {
// Save transactions
err := ext.transactionService.HandleTransactionsFromBlockResponse(blockHeight, blockCreatedAt, transactions)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) getNodeLastBlockId() (uint64, error) {
statusResponse, err := ext.nodeApi.Status()
if err != nil {
ext.log.Error(err)
return 0, err
}
return statusResponse.LatestBlockHeight, err
}
func (ext *Extender) findOutChasingMode(height uint64) {
var err error
if ext.currentNodeHeight == 0 {
ext.currentNodeHeight, err = ext.getNodeLastBlockId()
if err != nil {
ext.log.Fatal(err)
}
}
isChasingMode := ext.currentNodeHeight-height > ChasingModDiff
if ext.chasingMode && !isChasingMode {
ext.currentNodeHeight, err = ext.getNodeLastBlockId()
if err != nil {
ext.log.Fatal(err)
}
ext.chasingMode = ext.currentNodeHeight-height > ChasingModDiff
}
ext.broadcastService.SetChasingMode(ext.chasingMode)
ext.balanceService.SetChasingMode(ext.chasingMode)
//ext.liquidityPoolService.SetChasingMode(ext.chasingMode)
}
func (ext *Extender) printSpentTimeLog(eet ExtenderElapsedTime) {
critical := 7 * time.Second
if eet.Total > critical {
ext.log.WithFields(logrus.Fields{
"getting block time": eet.GettingBlock,
"getting events time": eet.GettingEvents,
"handle addresses": eet.HandleAddressesFromResponses,
"handle coins": eet.HandleCoinsFromTransactions,
"handle block": eet.HandleBlockResponse,
"block": eet.Height,
"time": fmt.Sprintf("%s", eet.Total),
}).Warning("Processing time is too height")
}
ext.log.WithFields(logrus.Fields{
"getting block time": eet.GettingBlock,
"getting events time": eet.GettingEvents,
"handle addresses": eet.HandleAddressesFromResponses,
"handle coins": eet.HandleCoinsFromTransactions,
"handle block": eet.HandleBlockResponse,
}).Info(fmt.Sprintf("Block: %d Processing time: %s", eet.Height, eet.Total))
}
| {
if response.Height == 1 {
return
}
var links []*models.BlockValidator
for _, v := range response.Validators {
vId, err := ext.validatorRepository.FindIdByPk(helpers.RemovePrefix(v.PublicKey))
if err != nil {
ext.log.Error(err)
}
helpers.HandleError(err)
link := models.BlockValidator{
ValidatorID: uint64(vId),
BlockID: response.Height,
Signed: v.Signed,
}
links = append(links, &link)
}
err := ext.blockRepository.LinkWithValidators(links)
if err != nil {
ext.log.Fatal(err)
}
} | identifier_body |
main.go | package core
import (
"context"
"crypto/tls"
"fmt"
genesisUploader "github.com/MinterTeam/explorer-genesis-uploader/core"
genesisEnv "github.com/MinterTeam/explorer-genesis-uploader/env"
"github.com/MinterTeam/minter-explorer-api/v2/coins"
"github.com/MinterTeam/minter-explorer-extender/v2/address"
"github.com/MinterTeam/minter-explorer-extender/v2/balance"
"github.com/MinterTeam/minter-explorer-extender/v2/block"
"github.com/MinterTeam/minter-explorer-extender/v2/broadcast"
"github.com/MinterTeam/minter-explorer-extender/v2/coin"
"github.com/MinterTeam/minter-explorer-extender/v2/env"
"github.com/MinterTeam/minter-explorer-extender/v2/events"
"github.com/MinterTeam/minter-explorer-extender/v2/liquidity_pool"
"github.com/MinterTeam/minter-explorer-extender/v2/metrics"
"github.com/MinterTeam/minter-explorer-extender/v2/models"
"github.com/MinterTeam/minter-explorer-extender/v2/orderbook"
"github.com/MinterTeam/minter-explorer-extender/v2/transaction"
"github.com/MinterTeam/minter-explorer-extender/v2/validator"
"github.com/MinterTeam/minter-explorer-tools/v4/helpers"
"github.com/MinterTeam/minter-go-sdk/v2/api/grpc_client"
"github.com/MinterTeam/node-grpc-gateway/api_pb"
"github.com/go-pg/pg/v10"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/status"
"math"
"os"
"regexp"
"strings"
"time"
)
const ChasingModDiff = 121
var Version string
type Extender struct {
Metrics *metrics.Metrics
env *env.ExtenderEnvironment
nodeApi *grpc_client.Client
blockService *block.Service
addressService *address.Service
blockRepository *block.Repository
validatorService *validator.Service
validatorRepository *validator.Repository
transactionService *transaction.Service
eventService *events.Service
balanceService *balance.Service
coinService *coin.Service
broadcastService *broadcast.Service
orderBookService *orderbook.Service
chasingMode bool
startBlockHeight uint64
currentNodeHeight uint64
lastLPSnapshotHeight uint64
log *logrus.Entry
lpSnapshotChannel chan *api_pb.BlockResponse
lpWorkerChannel chan *api_pb.BlockResponse
orderBookChannel chan *api_pb.BlockResponse
}
type ExtenderElapsedTime struct {
Height uint64
GettingBlock time.Duration
GettingEvents time.Duration
HandleCoinsFromTransactions time.Duration
HandleAddressesFromResponses time.Duration
HandleBlockResponse time.Duration
Total time.Duration
}
type eventHook struct {
beforeTime time.Time
log *logrus.Logger
}
func (eh eventHook) BeforeQuery(ctx context.Context, event *pg.QueryEvent) (context.Context, error) {
if event.Stash == nil {
event.Stash = make(map[interface{}]interface{})
}
event.Stash["query_time"] = time.Now()
return ctx, nil
}
func (eh eventHook) AfterQuery(ctx context.Context, event *pg.QueryEvent) error {
critical := time.Millisecond * 500
result := time.Duration(0)
if event.Stash != nil {
if v, ok := event.Stash["query_time"]; ok {
result = time.Now().Sub(v.(time.Time))
}
} | if result > critical {
bigQueryLog, err := os.OpenFile("big_query.log", os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
eh.log.Error("error opening file: %v", err)
}
// don't forget to close it
defer bigQueryLog.Close()
eh.log.SetReportCaller(false)
eh.log.SetFormatter(&logrus.JSONFormatter{})
eh.log.SetOutput(bigQueryLog)
q, err := event.UnformattedQuery()
if err != nil {
eh.log.Error(err)
}
r := regexp.MustCompile("\\s+")
replace := r.ReplaceAllString(fmt.Sprintf("%v", string(q)), " ")
eh.log.WithFields(logrus.Fields{
"query": strings.TrimSpace(replace),
"time": fmt.Sprintf("%s", result),
}).Error("DB query time is too height")
}
return nil
}
func NewExtender(env *env.ExtenderEnvironment) *Extender {
//Init Logger
logger := logrus.New()
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetOutput(os.Stdout)
logger.SetReportCaller(true)
if env.Debug {
logger.SetFormatter(&logrus.TextFormatter{
DisableColors: false,
FullTimestamp: true,
})
} else {
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetLevel(logrus.WarnLevel)
}
contextLogger := logger.WithFields(logrus.Fields{
"version": Version,
"app": "Minter Explorer Extender",
})
//Init DB
pgOptions := &pg.Options{
Addr: fmt.Sprintf("%s:%s", env.DbHost, env.DbPort),
User: env.DbUser,
Password: env.DbPassword,
Database: env.DbName,
}
if os.Getenv("POSTGRES_SSL_ENABLED") == "true" {
pgOptions.TLSConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
//hookImpl := eventHook{
// log: logrus.New(),
// beforeTime: time.Now(),
//}
db := pg.Connect(pgOptions)
//db.AddQueryHook(hookImpl)
uploader := genesisUploader.New(genesisEnv.Config{
Debug: false,
PostgresHost: env.DbHost,
PostgresPort: env.DbPort,
PostgresDB: env.DbName,
PostgresUser: env.DbUser,
PostgresPassword: env.DbPassword,
PostgresSSLEnabled: os.Getenv("POSTGRES_SSL_ENABLED") == "true",
MinterBaseCoin: env.BaseCoin,
NodeGrpc: env.NodeApi,
AddressChunkSize: uint64(env.AddrChunkSize),
CoinsChunkSize: 1000,
BalanceChunkSize: 10000,
StakeChunkSize: uint64(env.StakeChunkSize),
ValidatorChunkSize: uint64(env.StakeChunkSize),
})
err := uploader.Do()
if err != nil {
logger.Warn(err)
}
//api
nodeApi, err := grpc_client.New(env.NodeApi)
if err != nil {
panic(err)
}
nodeStatus, err := nodeApi.Status()
if err != nil {
panic(err)
}
// Repositories
blockRepository := block.NewRepository(db)
validatorRepository := validator.NewRepository(db, contextLogger)
transactionRepository := transaction.NewRepository(db)
addressRepository := address.NewRepository(db)
coinRepository := coin.NewRepository(db)
eventsRepository := events.NewRepository(db)
balanceRepository := balance.NewRepository(db)
liquidityPoolRepository := liquidity_pool.NewRepository(db)
orderbookRepository := orderbook.NewRepository(db)
coins.GlobalRepository = coins.NewRepository(db) //temporary solution
// Services
addressService := address.NewService(env, addressRepository, contextLogger)
broadcastService := broadcast.NewService(env, addressRepository, coinRepository, nodeApi, contextLogger)
balanceService := balance.NewService(env, balanceRepository, nodeApi, addressService, coinRepository, broadcastService, contextLogger)
coinService := coin.NewService(env, nodeApi, coinRepository, addressRepository, contextLogger)
validatorService := validator.NewService(env, nodeApi, validatorRepository, addressRepository, coinRepository, contextLogger)
eventService := events.NewService(env, eventsRepository, validatorRepository, addressRepository, coinRepository, coinService, blockRepository, orderbookRepository, balanceRepository, broadcastService, contextLogger, nodeStatus.InitialHeight+1)
orderBookService := orderbook.NewService(db, addressRepository, liquidityPoolRepository, contextLogger)
return &Extender{
Metrics: metrics.New(),
env: env,
nodeApi: nodeApi,
blockService: block.NewBlockService(blockRepository, validatorRepository, broadcastService),
eventService: eventService,
blockRepository: blockRepository,
validatorService: validatorService,
transactionService: transaction.NewService(env, transactionRepository, addressRepository, validatorRepository, coinRepository, coinService, broadcastService, contextLogger, validatorService.GetUnbondSaverJobChannel(), liquidityPoolRepository, validatorService.GetMoveStakeJobChannel()),
addressService: addressService,
validatorRepository: validatorRepository,
balanceService: balanceService,
coinService: coinService,
broadcastService: broadcastService,
orderBookService: orderBookService,
chasingMode: false,
currentNodeHeight: 0,
startBlockHeight: nodeStatus.InitialHeight + 1,
log: contextLogger,
lpSnapshotChannel: make(chan *api_pb.BlockResponse),
lpWorkerChannel: make(chan *api_pb.BlockResponse),
orderBookChannel: make(chan *api_pb.BlockResponse),
}
}
func (ext *Extender) GetInfo() {
fmt.Printf("%s v%s\n", "Minter Explorer Extender", Version)
}
func (ext *Extender) Run() {
//check connections to node
_, err := ext.nodeApi.Status()
if err == nil {
err = ext.blockRepository.DeleteLastBlockData()
}
if err != nil {
ext.log.Fatal(err)
}
var height uint64
// ----- Workers -----
ext.runWorkers()
lastExplorerBlock, err := ext.blockRepository.GetLastFromDB()
if err != nil && err != pg.ErrNoRows {
ext.log.Fatal(err)
}
if lastExplorerBlock != nil {
height = lastExplorerBlock.ID + 1
ext.blockService.SetBlockCache(lastExplorerBlock)
} else {
height = ext.startBlockHeight
}
for {
eet := ExtenderElapsedTime{
Height: height,
GettingBlock: 0,
GettingEvents: 0,
HandleCoinsFromTransactions: 0,
HandleAddressesFromResponses: 0,
HandleBlockResponse: 0,
Total: 0,
}
start := time.Now()
//ext.findOutChasingMode(height)
//Pulling block data
countStart := time.Now()
blockResponse, err := ext.nodeApi.BlockExtended(height, true, true)
if err != nil {
grpcErr, ok := status.FromError(err)
if !ok {
ext.log.Error(err)
time.Sleep(2 * time.Second)
continue
}
if grpcErr.Message() == "Block not found" || grpcErr.Message() == "Block results not found" {
time.Sleep(2 * time.Second)
continue
}
ext.log.Fatal(err)
}
eet.GettingBlock = time.Since(countStart)
countStart = time.Now()
ext.handleCoinsFromTransactions(blockResponse)
eet.HandleCoinsFromTransactions = time.Since(countStart)
countStart = time.Now()
ext.handleAddressesFromResponses(blockResponse)
eet.HandleAddressesFromResponses = time.Since(countStart)
countStart = time.Now()
ext.handleBlockResponse(blockResponse)
eet.HandleBlockResponse = time.Since(countStart)
ext.balanceService.UpdateChannel() <- blockResponse
go ext.handleEventResponse(height, blockResponse)
if len(blockResponse.Transactions) > 0 {
ext.orderBookChannel <- blockResponse
}
//ext.validatorService.GetUpdateStakesJobChannel() <- height
ext.validatorService.GetUpdateValidatorsJobChannel() <- height
ext.validatorService.GetClearJobChannel() <- height
eet.Total = time.Since(start)
ext.printSpentTimeLog(eet)
height++
}
}
func (ext *Extender) runWorkers() {
// Addresses
for w := 1; w <= ext.env.WrkSaveAddressesCount; w++ {
go ext.addressService.SaveAddressesWorker(ext.addressService.GetSaveAddressesJobChannel())
}
// Transactions
for w := 1; w <= ext.env.WrkSaveTxsCount; w++ {
go ext.transactionService.SaveTransactionsWorker(ext.transactionService.GetSaveTxJobChannel())
}
for w := 1; w <= ext.env.WrkSaveTxsOutputCount; w++ {
go ext.transactionService.SaveTransactionsOutputWorker(ext.transactionService.GetSaveTxsOutputJobChannel())
}
for w := 1; w <= ext.env.WrkSaveInvTxsCount; w++ {
go ext.transactionService.SaveInvalidTransactionsWorker(ext.transactionService.GetSaveInvalidTxsJobChannel())
}
go ext.transactionService.UpdateTxsIndexWorker()
// Validators
for w := 1; w <= ext.env.WrkSaveValidatorTxsCount; w++ {
go ext.transactionService.SaveTxValidatorWorker(ext.transactionService.GetSaveTxValidatorJobChannel())
}
go ext.validatorService.UpdateValidatorsWorker(ext.validatorService.GetUpdateValidatorsJobChannel())
//go ext.validatorService.UpdateStakesWorker(ext.validatorService.GetUpdateStakesJobChannel())
// Events
for w := 1; w <= ext.env.WrkSaveRewardsCount; w++ {
go ext.eventService.SaveRewardsWorker(ext.eventService.GetSaveRewardsJobChannel())
}
for w := 1; w <= ext.env.WrkSaveSlashesCount; w++ {
go ext.eventService.SaveSlashesWorker(ext.eventService.GetSaveSlashesJobChannel())
}
// Balances
go ext.balanceService.BalanceManager()
//Coins
go ext.coinService.UpdateCoinsInfoFromTxsWorker(ext.coinService.GetUpdateCoinsFromTxsJobChannel())
go ext.coinService.UpdateCoinsInfoFromCoinsMap(ext.coinService.GetUpdateCoinsFromCoinsMapJobChannel())
go ext.coinService.UpdateHubInfoWorker()
//Unbonds
go ext.validatorService.UnbondSaverWorker(ext.validatorService.GetUnbondSaverJobChannel())
//Move Stake
go ext.validatorService.MoveStakeWorker(ext.validatorService.GetMoveStakeJobChannel())
go ext.validatorService.ClearMoveStakeAndUnbondWorker(ext.validatorService.GetClearJobChannel())
//OrderBook
go ext.orderBookService.OrderBookWorker(ext.orderBookChannel)
go ext.orderBookService.UpdateOrderBookWorker(ext.orderBookService.UpdateOrderChannel())
//Broadcast
go ext.broadcastService.Manager()
}
func (ext *Extender) handleAddressesFromResponses(blockResponse *api_pb.BlockResponse) {
err := ext.addressService.SaveAddressesFromResponses(blockResponse)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) handleBlockResponse(response *api_pb.BlockResponse) {
// Save validators if not exist
err := ext.validatorService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
// Save block
err = ext.blockService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
ext.linkBlockValidator(response)
//first block don't have validators
if response.TransactionCount > 0 {
ext.handleTransactions(response)
}
}
func (ext *Extender) handleCoinsFromTransactions(block *api_pb.BlockResponse) {
if len(block.Transactions) == 0 {
return
}
err := ext.coinService.HandleCoinsFromBlock(block)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) handleTransactions(response *api_pb.BlockResponse) {
chunksCount := int(math.Ceil(float64(len(response.Transactions)) / float64(ext.env.TxChunkSize)))
for i := 0; i < chunksCount; i++ {
start := ext.env.TxChunkSize * i
end := start + ext.env.TxChunkSize
if end > len(response.Transactions) {
end = len(response.Transactions)
}
layout := "2006-01-02T15:04:05Z"
blockTime, err := time.Parse(layout, response.Time)
if err != nil {
ext.log.Panic(err)
}
ext.saveTransactions(response.Height, blockTime, response.Transactions[start:end])
}
}
func (ext *Extender) handleEventResponse(blockHeight uint64, response *api_pb.BlockResponse) {
if len(response.Events) > 0 {
//Save events
err := ext.eventService.HandleEventResponse(blockHeight, response)
if err != nil {
ext.log.Fatal(err)
}
}
}
func (ext *Extender) linkBlockValidator(response *api_pb.BlockResponse) {
if response.Height == 1 {
return
}
var links []*models.BlockValidator
for _, v := range response.Validators {
vId, err := ext.validatorRepository.FindIdByPk(helpers.RemovePrefix(v.PublicKey))
if err != nil {
ext.log.Error(err)
}
helpers.HandleError(err)
link := models.BlockValidator{
ValidatorID: uint64(vId),
BlockID: response.Height,
Signed: v.Signed,
}
links = append(links, &link)
}
err := ext.blockRepository.LinkWithValidators(links)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) saveTransactions(blockHeight uint64, blockCreatedAt time.Time, transactions []*api_pb.TransactionResponse) {
// Save transactions
err := ext.transactionService.HandleTransactionsFromBlockResponse(blockHeight, blockCreatedAt, transactions)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) getNodeLastBlockId() (uint64, error) {
statusResponse, err := ext.nodeApi.Status()
if err != nil {
ext.log.Error(err)
return 0, err
}
return statusResponse.LatestBlockHeight, err
}
func (ext *Extender) findOutChasingMode(height uint64) {
var err error
if ext.currentNodeHeight == 0 {
ext.currentNodeHeight, err = ext.getNodeLastBlockId()
if err != nil {
ext.log.Fatal(err)
}
}
isChasingMode := ext.currentNodeHeight-height > ChasingModDiff
if ext.chasingMode && !isChasingMode {
ext.currentNodeHeight, err = ext.getNodeLastBlockId()
if err != nil {
ext.log.Fatal(err)
}
ext.chasingMode = ext.currentNodeHeight-height > ChasingModDiff
}
ext.broadcastService.SetChasingMode(ext.chasingMode)
ext.balanceService.SetChasingMode(ext.chasingMode)
//ext.liquidityPoolService.SetChasingMode(ext.chasingMode)
}
func (ext *Extender) printSpentTimeLog(eet ExtenderElapsedTime) {
critical := 7 * time.Second
if eet.Total > critical {
ext.log.WithFields(logrus.Fields{
"getting block time": eet.GettingBlock,
"getting events time": eet.GettingEvents,
"handle addresses": eet.HandleAddressesFromResponses,
"handle coins": eet.HandleCoinsFromTransactions,
"handle block": eet.HandleBlockResponse,
"block": eet.Height,
"time": fmt.Sprintf("%s", eet.Total),
}).Warning("Processing time is too height")
}
ext.log.WithFields(logrus.Fields{
"getting block time": eet.GettingBlock,
"getting events time": eet.GettingEvents,
"handle addresses": eet.HandleAddressesFromResponses,
"handle coins": eet.HandleCoinsFromTransactions,
"handle block": eet.HandleBlockResponse,
}).Info(fmt.Sprintf("Block: %d Processing time: %s", eet.Height, eet.Total))
} | random_line_split | |
main.go | package core
import (
"context"
"crypto/tls"
"fmt"
genesisUploader "github.com/MinterTeam/explorer-genesis-uploader/core"
genesisEnv "github.com/MinterTeam/explorer-genesis-uploader/env"
"github.com/MinterTeam/minter-explorer-api/v2/coins"
"github.com/MinterTeam/minter-explorer-extender/v2/address"
"github.com/MinterTeam/minter-explorer-extender/v2/balance"
"github.com/MinterTeam/minter-explorer-extender/v2/block"
"github.com/MinterTeam/minter-explorer-extender/v2/broadcast"
"github.com/MinterTeam/minter-explorer-extender/v2/coin"
"github.com/MinterTeam/minter-explorer-extender/v2/env"
"github.com/MinterTeam/minter-explorer-extender/v2/events"
"github.com/MinterTeam/minter-explorer-extender/v2/liquidity_pool"
"github.com/MinterTeam/minter-explorer-extender/v2/metrics"
"github.com/MinterTeam/minter-explorer-extender/v2/models"
"github.com/MinterTeam/minter-explorer-extender/v2/orderbook"
"github.com/MinterTeam/minter-explorer-extender/v2/transaction"
"github.com/MinterTeam/minter-explorer-extender/v2/validator"
"github.com/MinterTeam/minter-explorer-tools/v4/helpers"
"github.com/MinterTeam/minter-go-sdk/v2/api/grpc_client"
"github.com/MinterTeam/node-grpc-gateway/api_pb"
"github.com/go-pg/pg/v10"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/status"
"math"
"os"
"regexp"
"strings"
"time"
)
const ChasingModDiff = 121
var Version string
type Extender struct {
Metrics *metrics.Metrics
env *env.ExtenderEnvironment
nodeApi *grpc_client.Client
blockService *block.Service
addressService *address.Service
blockRepository *block.Repository
validatorService *validator.Service
validatorRepository *validator.Repository
transactionService *transaction.Service
eventService *events.Service
balanceService *balance.Service
coinService *coin.Service
broadcastService *broadcast.Service
orderBookService *orderbook.Service
chasingMode bool
startBlockHeight uint64
currentNodeHeight uint64
lastLPSnapshotHeight uint64
log *logrus.Entry
lpSnapshotChannel chan *api_pb.BlockResponse
lpWorkerChannel chan *api_pb.BlockResponse
orderBookChannel chan *api_pb.BlockResponse
}
type ExtenderElapsedTime struct {
Height uint64
GettingBlock time.Duration
GettingEvents time.Duration
HandleCoinsFromTransactions time.Duration
HandleAddressesFromResponses time.Duration
HandleBlockResponse time.Duration
Total time.Duration
}
type eventHook struct {
beforeTime time.Time
log *logrus.Logger
}
func (eh eventHook) BeforeQuery(ctx context.Context, event *pg.QueryEvent) (context.Context, error) {
if event.Stash == nil {
event.Stash = make(map[interface{}]interface{})
}
event.Stash["query_time"] = time.Now()
return ctx, nil
}
func (eh eventHook) AfterQuery(ctx context.Context, event *pg.QueryEvent) error {
critical := time.Millisecond * 500
result := time.Duration(0)
if event.Stash != nil {
if v, ok := event.Stash["query_time"]; ok {
result = time.Now().Sub(v.(time.Time))
}
}
if result > critical {
bigQueryLog, err := os.OpenFile("big_query.log", os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
eh.log.Error("error opening file: %v", err)
}
// don't forget to close it
defer bigQueryLog.Close()
eh.log.SetReportCaller(false)
eh.log.SetFormatter(&logrus.JSONFormatter{})
eh.log.SetOutput(bigQueryLog)
q, err := event.UnformattedQuery()
if err != nil {
eh.log.Error(err)
}
r := regexp.MustCompile("\\s+")
replace := r.ReplaceAllString(fmt.Sprintf("%v", string(q)), " ")
eh.log.WithFields(logrus.Fields{
"query": strings.TrimSpace(replace),
"time": fmt.Sprintf("%s", result),
}).Error("DB query time is too height")
}
return nil
}
func NewExtender(env *env.ExtenderEnvironment) *Extender {
//Init Logger
logger := logrus.New()
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetOutput(os.Stdout)
logger.SetReportCaller(true)
if env.Debug {
logger.SetFormatter(&logrus.TextFormatter{
DisableColors: false,
FullTimestamp: true,
})
} else {
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetLevel(logrus.WarnLevel)
}
contextLogger := logger.WithFields(logrus.Fields{
"version": Version,
"app": "Minter Explorer Extender",
})
//Init DB
pgOptions := &pg.Options{
Addr: fmt.Sprintf("%s:%s", env.DbHost, env.DbPort),
User: env.DbUser,
Password: env.DbPassword,
Database: env.DbName,
}
if os.Getenv("POSTGRES_SSL_ENABLED") == "true" {
pgOptions.TLSConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
//hookImpl := eventHook{
// log: logrus.New(),
// beforeTime: time.Now(),
//}
db := pg.Connect(pgOptions)
//db.AddQueryHook(hookImpl)
uploader := genesisUploader.New(genesisEnv.Config{
Debug: false,
PostgresHost: env.DbHost,
PostgresPort: env.DbPort,
PostgresDB: env.DbName,
PostgresUser: env.DbUser,
PostgresPassword: env.DbPassword,
PostgresSSLEnabled: os.Getenv("POSTGRES_SSL_ENABLED") == "true",
MinterBaseCoin: env.BaseCoin,
NodeGrpc: env.NodeApi,
AddressChunkSize: uint64(env.AddrChunkSize),
CoinsChunkSize: 1000,
BalanceChunkSize: 10000,
StakeChunkSize: uint64(env.StakeChunkSize),
ValidatorChunkSize: uint64(env.StakeChunkSize),
})
err := uploader.Do()
if err != nil {
logger.Warn(err)
}
//api
nodeApi, err := grpc_client.New(env.NodeApi)
if err != nil {
panic(err)
}
nodeStatus, err := nodeApi.Status()
if err != nil {
panic(err)
}
// Repositories
blockRepository := block.NewRepository(db)
validatorRepository := validator.NewRepository(db, contextLogger)
transactionRepository := transaction.NewRepository(db)
addressRepository := address.NewRepository(db)
coinRepository := coin.NewRepository(db)
eventsRepository := events.NewRepository(db)
balanceRepository := balance.NewRepository(db)
liquidityPoolRepository := liquidity_pool.NewRepository(db)
orderbookRepository := orderbook.NewRepository(db)
coins.GlobalRepository = coins.NewRepository(db) //temporary solution
// Services
addressService := address.NewService(env, addressRepository, contextLogger)
broadcastService := broadcast.NewService(env, addressRepository, coinRepository, nodeApi, contextLogger)
balanceService := balance.NewService(env, balanceRepository, nodeApi, addressService, coinRepository, broadcastService, contextLogger)
coinService := coin.NewService(env, nodeApi, coinRepository, addressRepository, contextLogger)
validatorService := validator.NewService(env, nodeApi, validatorRepository, addressRepository, coinRepository, contextLogger)
eventService := events.NewService(env, eventsRepository, validatorRepository, addressRepository, coinRepository, coinService, blockRepository, orderbookRepository, balanceRepository, broadcastService, contextLogger, nodeStatus.InitialHeight+1)
orderBookService := orderbook.NewService(db, addressRepository, liquidityPoolRepository, contextLogger)
return &Extender{
Metrics: metrics.New(),
env: env,
nodeApi: nodeApi,
blockService: block.NewBlockService(blockRepository, validatorRepository, broadcastService),
eventService: eventService,
blockRepository: blockRepository,
validatorService: validatorService,
transactionService: transaction.NewService(env, transactionRepository, addressRepository, validatorRepository, coinRepository, coinService, broadcastService, contextLogger, validatorService.GetUnbondSaverJobChannel(), liquidityPoolRepository, validatorService.GetMoveStakeJobChannel()),
addressService: addressService,
validatorRepository: validatorRepository,
balanceService: balanceService,
coinService: coinService,
broadcastService: broadcastService,
orderBookService: orderBookService,
chasingMode: false,
currentNodeHeight: 0,
startBlockHeight: nodeStatus.InitialHeight + 1,
log: contextLogger,
lpSnapshotChannel: make(chan *api_pb.BlockResponse),
lpWorkerChannel: make(chan *api_pb.BlockResponse),
orderBookChannel: make(chan *api_pb.BlockResponse),
}
}
func (ext *Extender) GetInfo() {
fmt.Printf("%s v%s\n", "Minter Explorer Extender", Version)
}
func (ext *Extender) Run() {
//check connections to node
_, err := ext.nodeApi.Status()
if err == nil {
err = ext.blockRepository.DeleteLastBlockData()
}
if err != nil {
ext.log.Fatal(err)
}
var height uint64
// ----- Workers -----
ext.runWorkers()
lastExplorerBlock, err := ext.blockRepository.GetLastFromDB()
if err != nil && err != pg.ErrNoRows {
ext.log.Fatal(err)
}
if lastExplorerBlock != nil {
height = lastExplorerBlock.ID + 1
ext.blockService.SetBlockCache(lastExplorerBlock)
} else {
height = ext.startBlockHeight
}
for {
eet := ExtenderElapsedTime{
Height: height,
GettingBlock: 0,
GettingEvents: 0,
HandleCoinsFromTransactions: 0,
HandleAddressesFromResponses: 0,
HandleBlockResponse: 0,
Total: 0,
}
start := time.Now()
//ext.findOutChasingMode(height)
//Pulling block data
countStart := time.Now()
blockResponse, err := ext.nodeApi.BlockExtended(height, true, true)
if err != nil {
grpcErr, ok := status.FromError(err)
if !ok {
ext.log.Error(err)
time.Sleep(2 * time.Second)
continue
}
if grpcErr.Message() == "Block not found" || grpcErr.Message() == "Block results not found" {
time.Sleep(2 * time.Second)
continue
}
ext.log.Fatal(err)
}
eet.GettingBlock = time.Since(countStart)
countStart = time.Now()
ext.handleCoinsFromTransactions(blockResponse)
eet.HandleCoinsFromTransactions = time.Since(countStart)
countStart = time.Now()
ext.handleAddressesFromResponses(blockResponse)
eet.HandleAddressesFromResponses = time.Since(countStart)
countStart = time.Now()
ext.handleBlockResponse(blockResponse)
eet.HandleBlockResponse = time.Since(countStart)
ext.balanceService.UpdateChannel() <- blockResponse
go ext.handleEventResponse(height, blockResponse)
if len(blockResponse.Transactions) > 0 {
ext.orderBookChannel <- blockResponse
}
//ext.validatorService.GetUpdateStakesJobChannel() <- height
ext.validatorService.GetUpdateValidatorsJobChannel() <- height
ext.validatorService.GetClearJobChannel() <- height
eet.Total = time.Since(start)
ext.printSpentTimeLog(eet)
height++
}
}
func (ext *Extender) runWorkers() {
// Addresses
for w := 1; w <= ext.env.WrkSaveAddressesCount; w++ {
go ext.addressService.SaveAddressesWorker(ext.addressService.GetSaveAddressesJobChannel())
}
// Transactions
for w := 1; w <= ext.env.WrkSaveTxsCount; w++ {
go ext.transactionService.SaveTransactionsWorker(ext.transactionService.GetSaveTxJobChannel())
}
for w := 1; w <= ext.env.WrkSaveTxsOutputCount; w++ {
go ext.transactionService.SaveTransactionsOutputWorker(ext.transactionService.GetSaveTxsOutputJobChannel())
}
for w := 1; w <= ext.env.WrkSaveInvTxsCount; w++ {
go ext.transactionService.SaveInvalidTransactionsWorker(ext.transactionService.GetSaveInvalidTxsJobChannel())
}
go ext.transactionService.UpdateTxsIndexWorker()
// Validators
for w := 1; w <= ext.env.WrkSaveValidatorTxsCount; w++ {
go ext.transactionService.SaveTxValidatorWorker(ext.transactionService.GetSaveTxValidatorJobChannel())
}
go ext.validatorService.UpdateValidatorsWorker(ext.validatorService.GetUpdateValidatorsJobChannel())
//go ext.validatorService.UpdateStakesWorker(ext.validatorService.GetUpdateStakesJobChannel())
// Events
for w := 1; w <= ext.env.WrkSaveRewardsCount; w++ {
go ext.eventService.SaveRewardsWorker(ext.eventService.GetSaveRewardsJobChannel())
}
for w := 1; w <= ext.env.WrkSaveSlashesCount; w++ {
go ext.eventService.SaveSlashesWorker(ext.eventService.GetSaveSlashesJobChannel())
}
// Balances
go ext.balanceService.BalanceManager()
//Coins
go ext.coinService.UpdateCoinsInfoFromTxsWorker(ext.coinService.GetUpdateCoinsFromTxsJobChannel())
go ext.coinService.UpdateCoinsInfoFromCoinsMap(ext.coinService.GetUpdateCoinsFromCoinsMapJobChannel())
go ext.coinService.UpdateHubInfoWorker()
//Unbonds
go ext.validatorService.UnbondSaverWorker(ext.validatorService.GetUnbondSaverJobChannel())
//Move Stake
go ext.validatorService.MoveStakeWorker(ext.validatorService.GetMoveStakeJobChannel())
go ext.validatorService.ClearMoveStakeAndUnbondWorker(ext.validatorService.GetClearJobChannel())
//OrderBook
go ext.orderBookService.OrderBookWorker(ext.orderBookChannel)
go ext.orderBookService.UpdateOrderBookWorker(ext.orderBookService.UpdateOrderChannel())
//Broadcast
go ext.broadcastService.Manager()
}
func (ext *Extender) handleAddressesFromResponses(blockResponse *api_pb.BlockResponse) {
err := ext.addressService.SaveAddressesFromResponses(blockResponse)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) | (response *api_pb.BlockResponse) {
// Save validators if not exist
err := ext.validatorService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
// Save block
err = ext.blockService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
ext.linkBlockValidator(response)
//first block don't have validators
if response.TransactionCount > 0 {
ext.handleTransactions(response)
}
}
func (ext *Extender) handleCoinsFromTransactions(block *api_pb.BlockResponse) {
if len(block.Transactions) == 0 {
return
}
err := ext.coinService.HandleCoinsFromBlock(block)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) handleTransactions(response *api_pb.BlockResponse) {
chunksCount := int(math.Ceil(float64(len(response.Transactions)) / float64(ext.env.TxChunkSize)))
for i := 0; i < chunksCount; i++ {
start := ext.env.TxChunkSize * i
end := start + ext.env.TxChunkSize
if end > len(response.Transactions) {
end = len(response.Transactions)
}
layout := "2006-01-02T15:04:05Z"
blockTime, err := time.Parse(layout, response.Time)
if err != nil {
ext.log.Panic(err)
}
ext.saveTransactions(response.Height, blockTime, response.Transactions[start:end])
}
}
func (ext *Extender) handleEventResponse(blockHeight uint64, response *api_pb.BlockResponse) {
if len(response.Events) > 0 {
//Save events
err := ext.eventService.HandleEventResponse(blockHeight, response)
if err != nil {
ext.log.Fatal(err)
}
}
}
func (ext *Extender) linkBlockValidator(response *api_pb.BlockResponse) {
if response.Height == 1 {
return
}
var links []*models.BlockValidator
for _, v := range response.Validators {
vId, err := ext.validatorRepository.FindIdByPk(helpers.RemovePrefix(v.PublicKey))
if err != nil {
ext.log.Error(err)
}
helpers.HandleError(err)
link := models.BlockValidator{
ValidatorID: uint64(vId),
BlockID: response.Height,
Signed: v.Signed,
}
links = append(links, &link)
}
err := ext.blockRepository.LinkWithValidators(links)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) saveTransactions(blockHeight uint64, blockCreatedAt time.Time, transactions []*api_pb.TransactionResponse) {
// Save transactions
err := ext.transactionService.HandleTransactionsFromBlockResponse(blockHeight, blockCreatedAt, transactions)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) getNodeLastBlockId() (uint64, error) {
statusResponse, err := ext.nodeApi.Status()
if err != nil {
ext.log.Error(err)
return 0, err
}
return statusResponse.LatestBlockHeight, err
}
func (ext *Extender) findOutChasingMode(height uint64) {
var err error
if ext.currentNodeHeight == 0 {
ext.currentNodeHeight, err = ext.getNodeLastBlockId()
if err != nil {
ext.log.Fatal(err)
}
}
isChasingMode := ext.currentNodeHeight-height > ChasingModDiff
if ext.chasingMode && !isChasingMode {
ext.currentNodeHeight, err = ext.getNodeLastBlockId()
if err != nil {
ext.log.Fatal(err)
}
ext.chasingMode = ext.currentNodeHeight-height > ChasingModDiff
}
ext.broadcastService.SetChasingMode(ext.chasingMode)
ext.balanceService.SetChasingMode(ext.chasingMode)
//ext.liquidityPoolService.SetChasingMode(ext.chasingMode)
}
func (ext *Extender) printSpentTimeLog(eet ExtenderElapsedTime) {
critical := 7 * time.Second
if eet.Total > critical {
ext.log.WithFields(logrus.Fields{
"getting block time": eet.GettingBlock,
"getting events time": eet.GettingEvents,
"handle addresses": eet.HandleAddressesFromResponses,
"handle coins": eet.HandleCoinsFromTransactions,
"handle block": eet.HandleBlockResponse,
"block": eet.Height,
"time": fmt.Sprintf("%s", eet.Total),
}).Warning("Processing time is too height")
}
ext.log.WithFields(logrus.Fields{
"getting block time": eet.GettingBlock,
"getting events time": eet.GettingEvents,
"handle addresses": eet.HandleAddressesFromResponses,
"handle coins": eet.HandleCoinsFromTransactions,
"handle block": eet.HandleBlockResponse,
}).Info(fmt.Sprintf("Block: %d Processing time: %s", eet.Height, eet.Total))
}
| handleBlockResponse | identifier_name |
main.go | package core
import (
"context"
"crypto/tls"
"fmt"
genesisUploader "github.com/MinterTeam/explorer-genesis-uploader/core"
genesisEnv "github.com/MinterTeam/explorer-genesis-uploader/env"
"github.com/MinterTeam/minter-explorer-api/v2/coins"
"github.com/MinterTeam/minter-explorer-extender/v2/address"
"github.com/MinterTeam/minter-explorer-extender/v2/balance"
"github.com/MinterTeam/minter-explorer-extender/v2/block"
"github.com/MinterTeam/minter-explorer-extender/v2/broadcast"
"github.com/MinterTeam/minter-explorer-extender/v2/coin"
"github.com/MinterTeam/minter-explorer-extender/v2/env"
"github.com/MinterTeam/minter-explorer-extender/v2/events"
"github.com/MinterTeam/minter-explorer-extender/v2/liquidity_pool"
"github.com/MinterTeam/minter-explorer-extender/v2/metrics"
"github.com/MinterTeam/minter-explorer-extender/v2/models"
"github.com/MinterTeam/minter-explorer-extender/v2/orderbook"
"github.com/MinterTeam/minter-explorer-extender/v2/transaction"
"github.com/MinterTeam/minter-explorer-extender/v2/validator"
"github.com/MinterTeam/minter-explorer-tools/v4/helpers"
"github.com/MinterTeam/minter-go-sdk/v2/api/grpc_client"
"github.com/MinterTeam/node-grpc-gateway/api_pb"
"github.com/go-pg/pg/v10"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/status"
"math"
"os"
"regexp"
"strings"
"time"
)
const ChasingModDiff = 121
var Version string
type Extender struct {
Metrics *metrics.Metrics
env *env.ExtenderEnvironment
nodeApi *grpc_client.Client
blockService *block.Service
addressService *address.Service
blockRepository *block.Repository
validatorService *validator.Service
validatorRepository *validator.Repository
transactionService *transaction.Service
eventService *events.Service
balanceService *balance.Service
coinService *coin.Service
broadcastService *broadcast.Service
orderBookService *orderbook.Service
chasingMode bool
startBlockHeight uint64
currentNodeHeight uint64
lastLPSnapshotHeight uint64
log *logrus.Entry
lpSnapshotChannel chan *api_pb.BlockResponse
lpWorkerChannel chan *api_pb.BlockResponse
orderBookChannel chan *api_pb.BlockResponse
}
type ExtenderElapsedTime struct {
Height uint64
GettingBlock time.Duration
GettingEvents time.Duration
HandleCoinsFromTransactions time.Duration
HandleAddressesFromResponses time.Duration
HandleBlockResponse time.Duration
Total time.Duration
}
type eventHook struct {
beforeTime time.Time
log *logrus.Logger
}
func (eh eventHook) BeforeQuery(ctx context.Context, event *pg.QueryEvent) (context.Context, error) {
if event.Stash == nil {
event.Stash = make(map[interface{}]interface{})
}
event.Stash["query_time"] = time.Now()
return ctx, nil
}
func (eh eventHook) AfterQuery(ctx context.Context, event *pg.QueryEvent) error {
critical := time.Millisecond * 500
result := time.Duration(0)
if event.Stash != nil {
if v, ok := event.Stash["query_time"]; ok {
result = time.Now().Sub(v.(time.Time))
}
}
if result > critical {
bigQueryLog, err := os.OpenFile("big_query.log", os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
eh.log.Error("error opening file: %v", err)
}
// don't forget to close it
defer bigQueryLog.Close()
eh.log.SetReportCaller(false)
eh.log.SetFormatter(&logrus.JSONFormatter{})
eh.log.SetOutput(bigQueryLog)
q, err := event.UnformattedQuery()
if err != nil {
eh.log.Error(err)
}
r := regexp.MustCompile("\\s+")
replace := r.ReplaceAllString(fmt.Sprintf("%v", string(q)), " ")
eh.log.WithFields(logrus.Fields{
"query": strings.TrimSpace(replace),
"time": fmt.Sprintf("%s", result),
}).Error("DB query time is too height")
}
return nil
}
func NewExtender(env *env.ExtenderEnvironment) *Extender {
//Init Logger
logger := logrus.New()
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetOutput(os.Stdout)
logger.SetReportCaller(true)
if env.Debug {
logger.SetFormatter(&logrus.TextFormatter{
DisableColors: false,
FullTimestamp: true,
})
} else {
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetLevel(logrus.WarnLevel)
}
contextLogger := logger.WithFields(logrus.Fields{
"version": Version,
"app": "Minter Explorer Extender",
})
//Init DB
pgOptions := &pg.Options{
Addr: fmt.Sprintf("%s:%s", env.DbHost, env.DbPort),
User: env.DbUser,
Password: env.DbPassword,
Database: env.DbName,
}
if os.Getenv("POSTGRES_SSL_ENABLED") == "true" {
pgOptions.TLSConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
//hookImpl := eventHook{
// log: logrus.New(),
// beforeTime: time.Now(),
//}
db := pg.Connect(pgOptions)
//db.AddQueryHook(hookImpl)
uploader := genesisUploader.New(genesisEnv.Config{
Debug: false,
PostgresHost: env.DbHost,
PostgresPort: env.DbPort,
PostgresDB: env.DbName,
PostgresUser: env.DbUser,
PostgresPassword: env.DbPassword,
PostgresSSLEnabled: os.Getenv("POSTGRES_SSL_ENABLED") == "true",
MinterBaseCoin: env.BaseCoin,
NodeGrpc: env.NodeApi,
AddressChunkSize: uint64(env.AddrChunkSize),
CoinsChunkSize: 1000,
BalanceChunkSize: 10000,
StakeChunkSize: uint64(env.StakeChunkSize),
ValidatorChunkSize: uint64(env.StakeChunkSize),
})
err := uploader.Do()
if err != nil {
logger.Warn(err)
}
//api
nodeApi, err := grpc_client.New(env.NodeApi)
if err != nil {
panic(err)
}
nodeStatus, err := nodeApi.Status()
if err != nil {
panic(err)
}
// Repositories
blockRepository := block.NewRepository(db)
validatorRepository := validator.NewRepository(db, contextLogger)
transactionRepository := transaction.NewRepository(db)
addressRepository := address.NewRepository(db)
coinRepository := coin.NewRepository(db)
eventsRepository := events.NewRepository(db)
balanceRepository := balance.NewRepository(db)
liquidityPoolRepository := liquidity_pool.NewRepository(db)
orderbookRepository := orderbook.NewRepository(db)
coins.GlobalRepository = coins.NewRepository(db) //temporary solution
// Services
addressService := address.NewService(env, addressRepository, contextLogger)
broadcastService := broadcast.NewService(env, addressRepository, coinRepository, nodeApi, contextLogger)
balanceService := balance.NewService(env, balanceRepository, nodeApi, addressService, coinRepository, broadcastService, contextLogger)
coinService := coin.NewService(env, nodeApi, coinRepository, addressRepository, contextLogger)
validatorService := validator.NewService(env, nodeApi, validatorRepository, addressRepository, coinRepository, contextLogger)
eventService := events.NewService(env, eventsRepository, validatorRepository, addressRepository, coinRepository, coinService, blockRepository, orderbookRepository, balanceRepository, broadcastService, contextLogger, nodeStatus.InitialHeight+1)
orderBookService := orderbook.NewService(db, addressRepository, liquidityPoolRepository, contextLogger)
return &Extender{
Metrics: metrics.New(),
env: env,
nodeApi: nodeApi,
blockService: block.NewBlockService(blockRepository, validatorRepository, broadcastService),
eventService: eventService,
blockRepository: blockRepository,
validatorService: validatorService,
transactionService: transaction.NewService(env, transactionRepository, addressRepository, validatorRepository, coinRepository, coinService, broadcastService, contextLogger, validatorService.GetUnbondSaverJobChannel(), liquidityPoolRepository, validatorService.GetMoveStakeJobChannel()),
addressService: addressService,
validatorRepository: validatorRepository,
balanceService: balanceService,
coinService: coinService,
broadcastService: broadcastService,
orderBookService: orderBookService,
chasingMode: false,
currentNodeHeight: 0,
startBlockHeight: nodeStatus.InitialHeight + 1,
log: contextLogger,
lpSnapshotChannel: make(chan *api_pb.BlockResponse),
lpWorkerChannel: make(chan *api_pb.BlockResponse),
orderBookChannel: make(chan *api_pb.BlockResponse),
}
}
func (ext *Extender) GetInfo() {
fmt.Printf("%s v%s\n", "Minter Explorer Extender", Version)
}
func (ext *Extender) Run() {
//check connections to node
_, err := ext.nodeApi.Status()
if err == nil {
err = ext.blockRepository.DeleteLastBlockData()
}
if err != nil {
ext.log.Fatal(err)
}
var height uint64
// ----- Workers -----
ext.runWorkers()
lastExplorerBlock, err := ext.blockRepository.GetLastFromDB()
if err != nil && err != pg.ErrNoRows {
ext.log.Fatal(err)
}
if lastExplorerBlock != nil {
height = lastExplorerBlock.ID + 1
ext.blockService.SetBlockCache(lastExplorerBlock)
} else {
height = ext.startBlockHeight
}
for {
eet := ExtenderElapsedTime{
Height: height,
GettingBlock: 0,
GettingEvents: 0,
HandleCoinsFromTransactions: 0,
HandleAddressesFromResponses: 0,
HandleBlockResponse: 0,
Total: 0,
}
start := time.Now()
//ext.findOutChasingMode(height)
//Pulling block data
countStart := time.Now()
blockResponse, err := ext.nodeApi.BlockExtended(height, true, true)
if err != nil {
grpcErr, ok := status.FromError(err)
if !ok {
ext.log.Error(err)
time.Sleep(2 * time.Second)
continue
}
if grpcErr.Message() == "Block not found" || grpcErr.Message() == "Block results not found" {
time.Sleep(2 * time.Second)
continue
}
ext.log.Fatal(err)
}
eet.GettingBlock = time.Since(countStart)
countStart = time.Now()
ext.handleCoinsFromTransactions(blockResponse)
eet.HandleCoinsFromTransactions = time.Since(countStart)
countStart = time.Now()
ext.handleAddressesFromResponses(blockResponse)
eet.HandleAddressesFromResponses = time.Since(countStart)
countStart = time.Now()
ext.handleBlockResponse(blockResponse)
eet.HandleBlockResponse = time.Since(countStart)
ext.balanceService.UpdateChannel() <- blockResponse
go ext.handleEventResponse(height, blockResponse)
if len(blockResponse.Transactions) > 0 {
ext.orderBookChannel <- blockResponse
}
//ext.validatorService.GetUpdateStakesJobChannel() <- height
ext.validatorService.GetUpdateValidatorsJobChannel() <- height
ext.validatorService.GetClearJobChannel() <- height
eet.Total = time.Since(start)
ext.printSpentTimeLog(eet)
height++
}
}
func (ext *Extender) runWorkers() {
// Addresses
for w := 1; w <= ext.env.WrkSaveAddressesCount; w++ {
go ext.addressService.SaveAddressesWorker(ext.addressService.GetSaveAddressesJobChannel())
}
// Transactions
for w := 1; w <= ext.env.WrkSaveTxsCount; w++ {
go ext.transactionService.SaveTransactionsWorker(ext.transactionService.GetSaveTxJobChannel())
}
for w := 1; w <= ext.env.WrkSaveTxsOutputCount; w++ {
go ext.transactionService.SaveTransactionsOutputWorker(ext.transactionService.GetSaveTxsOutputJobChannel())
}
for w := 1; w <= ext.env.WrkSaveInvTxsCount; w++ {
go ext.transactionService.SaveInvalidTransactionsWorker(ext.transactionService.GetSaveInvalidTxsJobChannel())
}
go ext.transactionService.UpdateTxsIndexWorker()
// Validators
for w := 1; w <= ext.env.WrkSaveValidatorTxsCount; w++ {
go ext.transactionService.SaveTxValidatorWorker(ext.transactionService.GetSaveTxValidatorJobChannel())
}
go ext.validatorService.UpdateValidatorsWorker(ext.validatorService.GetUpdateValidatorsJobChannel())
//go ext.validatorService.UpdateStakesWorker(ext.validatorService.GetUpdateStakesJobChannel())
// Events
for w := 1; w <= ext.env.WrkSaveRewardsCount; w++ {
go ext.eventService.SaveRewardsWorker(ext.eventService.GetSaveRewardsJobChannel())
}
for w := 1; w <= ext.env.WrkSaveSlashesCount; w++ {
go ext.eventService.SaveSlashesWorker(ext.eventService.GetSaveSlashesJobChannel())
}
// Balances
go ext.balanceService.BalanceManager()
//Coins
go ext.coinService.UpdateCoinsInfoFromTxsWorker(ext.coinService.GetUpdateCoinsFromTxsJobChannel())
go ext.coinService.UpdateCoinsInfoFromCoinsMap(ext.coinService.GetUpdateCoinsFromCoinsMapJobChannel())
go ext.coinService.UpdateHubInfoWorker()
//Unbonds
go ext.validatorService.UnbondSaverWorker(ext.validatorService.GetUnbondSaverJobChannel())
//Move Stake
go ext.validatorService.MoveStakeWorker(ext.validatorService.GetMoveStakeJobChannel())
go ext.validatorService.ClearMoveStakeAndUnbondWorker(ext.validatorService.GetClearJobChannel())
//OrderBook
go ext.orderBookService.OrderBookWorker(ext.orderBookChannel)
go ext.orderBookService.UpdateOrderBookWorker(ext.orderBookService.UpdateOrderChannel())
//Broadcast
go ext.broadcastService.Manager()
}
func (ext *Extender) handleAddressesFromResponses(blockResponse *api_pb.BlockResponse) {
err := ext.addressService.SaveAddressesFromResponses(blockResponse)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) handleBlockResponse(response *api_pb.BlockResponse) {
// Save validators if not exist
err := ext.validatorService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
// Save block
err = ext.blockService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
ext.linkBlockValidator(response)
//first block don't have validators
if response.TransactionCount > 0 {
ext.handleTransactions(response)
}
}
func (ext *Extender) handleCoinsFromTransactions(block *api_pb.BlockResponse) {
if len(block.Transactions) == 0 {
return
}
err := ext.coinService.HandleCoinsFromBlock(block)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) handleTransactions(response *api_pb.BlockResponse) {
chunksCount := int(math.Ceil(float64(len(response.Transactions)) / float64(ext.env.TxChunkSize)))
for i := 0; i < chunksCount; i++ |
}
func (ext *Extender) handleEventResponse(blockHeight uint64, response *api_pb.BlockResponse) {
if len(response.Events) > 0 {
//Save events
err := ext.eventService.HandleEventResponse(blockHeight, response)
if err != nil {
ext.log.Fatal(err)
}
}
}
func (ext *Extender) linkBlockValidator(response *api_pb.BlockResponse) {
if response.Height == 1 {
return
}
var links []*models.BlockValidator
for _, v := range response.Validators {
vId, err := ext.validatorRepository.FindIdByPk(helpers.RemovePrefix(v.PublicKey))
if err != nil {
ext.log.Error(err)
}
helpers.HandleError(err)
link := models.BlockValidator{
ValidatorID: uint64(vId),
BlockID: response.Height,
Signed: v.Signed,
}
links = append(links, &link)
}
err := ext.blockRepository.LinkWithValidators(links)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) saveTransactions(blockHeight uint64, blockCreatedAt time.Time, transactions []*api_pb.TransactionResponse) {
// Save transactions
err := ext.transactionService.HandleTransactionsFromBlockResponse(blockHeight, blockCreatedAt, transactions)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) getNodeLastBlockId() (uint64, error) {
statusResponse, err := ext.nodeApi.Status()
if err != nil {
ext.log.Error(err)
return 0, err
}
return statusResponse.LatestBlockHeight, err
}
func (ext *Extender) findOutChasingMode(height uint64) {
var err error
if ext.currentNodeHeight == 0 {
ext.currentNodeHeight, err = ext.getNodeLastBlockId()
if err != nil {
ext.log.Fatal(err)
}
}
isChasingMode := ext.currentNodeHeight-height > ChasingModDiff
if ext.chasingMode && !isChasingMode {
ext.currentNodeHeight, err = ext.getNodeLastBlockId()
if err != nil {
ext.log.Fatal(err)
}
ext.chasingMode = ext.currentNodeHeight-height > ChasingModDiff
}
ext.broadcastService.SetChasingMode(ext.chasingMode)
ext.balanceService.SetChasingMode(ext.chasingMode)
//ext.liquidityPoolService.SetChasingMode(ext.chasingMode)
}
func (ext *Extender) printSpentTimeLog(eet ExtenderElapsedTime) {
critical := 7 * time.Second
if eet.Total > critical {
ext.log.WithFields(logrus.Fields{
"getting block time": eet.GettingBlock,
"getting events time": eet.GettingEvents,
"handle addresses": eet.HandleAddressesFromResponses,
"handle coins": eet.HandleCoinsFromTransactions,
"handle block": eet.HandleBlockResponse,
"block": eet.Height,
"time": fmt.Sprintf("%s", eet.Total),
}).Warning("Processing time is too height")
}
ext.log.WithFields(logrus.Fields{
"getting block time": eet.GettingBlock,
"getting events time": eet.GettingEvents,
"handle addresses": eet.HandleAddressesFromResponses,
"handle coins": eet.HandleCoinsFromTransactions,
"handle block": eet.HandleBlockResponse,
}).Info(fmt.Sprintf("Block: %d Processing time: %s", eet.Height, eet.Total))
}
| {
start := ext.env.TxChunkSize * i
end := start + ext.env.TxChunkSize
if end > len(response.Transactions) {
end = len(response.Transactions)
}
layout := "2006-01-02T15:04:05Z"
blockTime, err := time.Parse(layout, response.Time)
if err != nil {
ext.log.Panic(err)
}
ext.saveTransactions(response.Height, blockTime, response.Transactions[start:end])
} | conditional_block |
utils.py | import numbers
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from numpy import asarray
import numpy as np
from numpy.lib.stride_tricks import as_strided
from skimage import feature
from skimage.filters import threshold_otsu
from sklearn.utils import check_random_state, check_array
from torch import sqrt
from torch.utils.data import DataLoader, ConcatDataset, Dataset
from torchvision.datasets import ImageFolder
import matplotlib.pyplot as plt
from tqdm import tqdm
def train_val_split(data, train_ratio=0.9):
train_size = int(train_ratio * len(data))
train_data = data[:train_size]
val_data = data[train_size:]
return train_data, val_data
def binary(img):
gray_img = img.convert('L')
otsu = threshold_otsu(asarray(gray_img))
binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
|
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class BinColorDataset(Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin_transform = bin_transform
def __getitem__(self, index):
x1, y1 = self.dataset[index]
if self.bin_transform:
x2 = self.bin_transform(x1)
if self.col_transform:
x1 = self.col_transform(x1)
return x1, x2, y1
def __len__(self):
return len(self.dataset)
if __name__ == '__main__':
# transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize([0.7993, 0.7404, 0.6438], [0.1168, 0.1198, 0.1186]), # icdar17 norm
# ])
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.9706, 0.9706, 0.9706], [0.1448, 0.1448, 0.1448]), # firemaker norm
])
train_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/train'
val_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/validation'
test_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/test'
train_data = ImageFolder(train_path, transform=transform)
val_data = ImageFolder(val_path, transform=transform)
test_data = ImageFolder(test_path, transform=transform)
labels, c1 = get_labels_and_class_counts(train_data.targets)
labels1, c2 = get_labels_and_class_counts(test_data.targets)
labels2, c3 = get_labels_and_class_counts(val_data.targets)
plot_class_distributions(train_data.classes, c1, c2, c3)
| step = stride
s_h = stride
s_w = stride | conditional_block |
utils.py | import numbers
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from numpy import asarray
import numpy as np
from numpy.lib.stride_tricks import as_strided
from skimage import feature
from skimage.filters import threshold_otsu
from sklearn.utils import check_random_state, check_array
from torch import sqrt
from torch.utils.data import DataLoader, ConcatDataset, Dataset
from torchvision.datasets import ImageFolder
import matplotlib.pyplot as plt
from tqdm import tqdm
def train_val_split(data, train_ratio=0.9):
train_size = int(train_ratio * len(data))
train_data = data[:train_size]
val_data = data[train_size:]
return train_data, val_data
def binary(img):
gray_img = img.convert('L')
otsu = threshold_otsu(asarray(gray_img))
binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
step = stride
s_h = stride
s_w = stride
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
|
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class BinColorDataset(Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin_transform = bin_transform
def __getitem__(self, index):
x1, y1 = self.dataset[index]
if self.bin_transform:
x2 = self.bin_transform(x1)
if self.col_transform:
x1 = self.col_transform(x1)
return x1, x2, y1
def __len__(self):
return len(self.dataset)
if __name__ == '__main__':
# transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize([0.7993, 0.7404, 0.6438], [0.1168, 0.1198, 0.1186]), # icdar17 norm
# ])
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.9706, 0.9706, 0.9706], [0.1448, 0.1448, 0.1448]), # firemaker norm
])
train_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/train'
val_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/validation'
test_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/test'
train_data = ImageFolder(train_path, transform=transform)
val_data = ImageFolder(val_path, transform=transform)
test_data = ImageFolder(test_path, transform=transform)
labels, c1 = get_labels_and_class_counts(train_data.targets)
labels1, c2 = get_labels_and_class_counts(test_data.targets)
labels2, c3 = get_labels_and_class_counts(val_data.targets)
plot_class_distributions(train_data.classes, c1, c2, c3)
| if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches | identifier_body |
utils.py | import numbers
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from numpy import asarray
import numpy as np
from numpy.lib.stride_tricks import as_strided
from skimage import feature
from skimage.filters import threshold_otsu
from sklearn.utils import check_random_state, check_array
from torch import sqrt
from torch.utils.data import DataLoader, ConcatDataset, Dataset
from torchvision.datasets import ImageFolder
import matplotlib.pyplot as plt
from tqdm import tqdm
def train_val_split(data, train_ratio=0.9):
train_size = int(train_ratio * len(data))
train_data = data[:train_size]
val_data = data[train_size:]
return train_data, val_data
def binary(img):
gray_img = img.convert('L')
otsu = threshold_otsu(asarray(gray_img))
binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
step = stride
s_h = stride
s_w = stride
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else: | patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class BinColorDataset(Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin_transform = bin_transform
def __getitem__(self, index):
x1, y1 = self.dataset[index]
if self.bin_transform:
x2 = self.bin_transform(x1)
if self.col_transform:
x1 = self.col_transform(x1)
return x1, x2, y1
def __len__(self):
return len(self.dataset)
if __name__ == '__main__':
# transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize([0.7993, 0.7404, 0.6438], [0.1168, 0.1198, 0.1186]), # icdar17 norm
# ])
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.9706, 0.9706, 0.9706], [0.1448, 0.1448, 0.1448]), # firemaker norm
])
train_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/train'
val_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/validation'
test_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/test'
train_data = ImageFolder(train_path, transform=transform)
val_data = ImageFolder(val_path, transform=transform)
test_data = ImageFolder(test_path, transform=transform)
labels, c1 = get_labels_and_class_counts(train_data.targets)
labels1, c2 = get_labels_and_class_counts(test_data.targets)
labels2, c3 = get_labels_and_class_counts(val_data.targets)
plot_class_distributions(train_data.classes, c1, c2, c3) | random_line_split | |
utils.py | import numbers
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from numpy import asarray
import numpy as np
from numpy.lib.stride_tricks import as_strided
from skimage import feature
from skimage.filters import threshold_otsu
from sklearn.utils import check_random_state, check_array
from torch import sqrt
from torch.utils.data import DataLoader, ConcatDataset, Dataset
from torchvision.datasets import ImageFolder
import matplotlib.pyplot as plt
from tqdm import tqdm
def train_val_split(data, train_ratio=0.9):
train_size = int(train_ratio * len(data))
train_data = data[:train_size]
val_data = data[train_size:]
return train_data, val_data
def binary(img):
gray_img = img.convert('L')
otsu = threshold_otsu(asarray(gray_img))
binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
step = stride
s_h = stride
s_w = stride
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class | (Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin_transform = bin_transform
def __getitem__(self, index):
x1, y1 = self.dataset[index]
if self.bin_transform:
x2 = self.bin_transform(x1)
if self.col_transform:
x1 = self.col_transform(x1)
return x1, x2, y1
def __len__(self):
return len(self.dataset)
if __name__ == '__main__':
# transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize([0.7993, 0.7404, 0.6438], [0.1168, 0.1198, 0.1186]), # icdar17 norm
# ])
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.9706, 0.9706, 0.9706], [0.1448, 0.1448, 0.1448]), # firemaker norm
])
train_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/train'
val_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/validation'
test_path = '/home/akshay/PycharmProjects/TFG/datasets/firemaker-500/test'
train_data = ImageFolder(train_path, transform=transform)
val_data = ImageFolder(val_path, transform=transform)
test_data = ImageFolder(test_path, transform=transform)
labels, c1 = get_labels_and_class_counts(train_data.targets)
labels1, c2 = get_labels_and_class_counts(test_data.targets)
labels2, c3 = get_labels_and_class_counts(val_data.targets)
plot_class_distributions(train_data.classes, c1, c2, c3)
| BinColorDataset | identifier_name |
reaper-rush.rs | #[macro_use]
extern crate clap;
use rand::prelude::*;
use rust_sc2::prelude::*;
use std::{cmp::Ordering, collections::HashSet};
#[bot]
#[derive(Default)]
struct ReaperRushAI {
reapers_retreat: HashSet<u64>,
last_loop_distributed: u32,
}
impl Player for ReaperRushAI {
fn on_start(&mut self) -> SC2Result<()> {
if let Some(townhall) = self.units.my.townhalls.first() {
// Setting rallypoint for command center
townhall.smart(Target::Pos(self.start_center), false);
// Ordering scv on initial 50 minerals
townhall.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
// Splitting workers to closest mineral crystals
for u in &self.units.my.workers {
if let Some(mineral) = self.units.mineral_fields.closest(u) {
u.gather(mineral.tag(), false);
}
}
Ok(())
}
fn on_step(&mut self, _iteration: usize) -> SC2Result<()> {
self.distribute_workers();
self.build();
self.train();
self.execute_micro();
Ok(())
}
fn get_player_settings(&self) -> PlayerSettings {
PlayerSettings::new(Race::Terran).with_name("RustyReapers")
}
}
impl ReaperRushAI {
const DISTRIBUTION_DELAY: u32 = 8;
fn distribute_workers(&mut self) {
if self.units.my.workers.is_empty() {
return;
}
let mut idle_workers = self.units.my.workers.idle();
// Check distribution delay if there aren't any idle workers
let game_loop = self.state.observation.game_loop();
let last_loop = &mut self.last_loop_distributed;
if idle_workers.is_empty() && *last_loop + Self::DISTRIBUTION_DELAY > game_loop {
return;
}
*last_loop = game_loop;
// Distribute
let mineral_fields = &self.units.mineral_fields;
if mineral_fields.is_empty() {
return;
}
let bases = self.units.my.townhalls.ready();
if bases.is_empty() {
return;
}
let mut deficit_minings = Units::new();
let mut deficit_geysers = Units::new();
// Distributing mineral workers
for base in &bases {
match base.assigned_harvesters().cmp(&base.ideal_harvesters()) {
Ordering::Less => (0..(base.ideal_harvesters().unwrap()
- base.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_minings.push(base.clone());
}),
Ordering::Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag| !mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) { | }
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
match targets
.iter()
.filter(|t| t.in_range(u, t.speed() + if is_retreating { 2.0 } else { 0.5 }))
.closest(u)
{
Some(closest_attacker) => {
let flee_position = {
let pos = u.position().towards(closest_attacker.position(), -u.speed());
if self.is_pathable(pos) {
pos
} else {
*u.position()
.neighbors8()
.iter()
.filter(|p| self.is_pathable(**p))
.furthest(closest_attacker)
.unwrap_or(&self.start_location)
}
};
u.move_to(Target::Pos(flee_position), false);
}
None => {
if !(is_retreating || u.in_range(&closest, 0.0)) {
u.move_to(Target::Pos(closest.position()), false);
}
}
}
} else {
match targets.iter().in_range_of(u, 0.0).min_by_key(|t| t.hits()) {
Some(target) => u.attack(Target::Tag(target.tag()), false),
None => u.move_to(Target::Pos(closest.position()), false),
}
}
}
None => {
let pos = if is_retreating {
u.position()
} else {
self.enemy_start
};
u.move_to(Target::Pos(pos), false);
}
}
}
}
}
fn main() -> SC2Result<()> {
let app = clap_app!(RustyReapers =>
(version: crate_version!())
(author: crate_authors!())
(@arg ladder_server: --LadderServer +takes_value)
(@arg opponent_id: --OpponentId +takes_value)
(@arg host_port: --GamePort +takes_value)
(@arg player_port: --StartPort +takes_value)
(@arg game_step: -s --step
+takes_value
default_value("2")
"Sets game step for bot"
)
(@subcommand local =>
(about: "Runs local game vs Computer")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race
+takes_value
"Sets opponent race"
)
(@arg difficulty: -d --difficulty
+takes_value
"Sets opponent diffuculty"
)
(@arg ai_build: --("ai-build")
+takes_value
"Sets opponent build"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
(@arg realtime: --realtime "Enables realtime mode")
)
(@subcommand human =>
(about: "Runs game Human vs Bot")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race *
+takes_value
"Sets human race"
)
(@arg name: --name
+takes_value
"Sets human name"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
)
)
.get_matches();
let game_step = match app.value_of("game_step") {
Some("0") => panic!("game_step must be X >= 1"),
Some(step) => step.parse::<u32>().expect("Can't parse game_step"),
None => unreachable!(),
};
let mut bot = ReaperRushAI::default();
bot.set_game_step(game_step);
const LADDER_MAPS: &[&str] = &[
"DeathauraLE",
"EternalEmpireLE",
"EverDreamLE",
"GoldenWallLE",
"IceandChromeLE",
"PillarsofGoldLE",
"SubmarineLE",
];
let mut rng = thread_rng();
match app.subcommand() {
("local", Some(sub)) => run_vs_computer(
&mut bot,
Computer::new(
sub.value_of("race").map_or(Race::Random, |race| {
race.parse().expect("Can't parse computer race")
}),
sub.value_of("difficulty")
.map_or(Difficulty::VeryEasy, |difficulty| {
difficulty.parse().expect("Can't parse computer difficulty")
}),
sub.value_of("ai_build")
.map(|ai_build| ai_build.parse().expect("Can't parse computer build")),
),
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: sub.is_present("realtime"),
save_replay_as: sub.value_of("save_replay"),
},
),
("human", Some(sub)) => run_vs_human(
&mut bot,
PlayerSettings {
race: sub
.value_of("race")
.unwrap()
.parse()
.expect("Can't parse human race"),
name: sub.value_of("name"),
..Default::default()
},
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: true,
save_replay_as: sub.value_of("save_replay"),
},
),
_ => run_ladder_game(
&mut bot,
app.value_of("ladder_server").unwrap_or("127.0.0.1"),
app.value_of("host_port").expect("GamePort must be specified"),
app.value_of("player_port")
.expect("StartPort must be specified")
.parse()
.expect("Can't parse StartPort"),
app.value_of("opponent_id"),
),
}
} | builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
} | random_line_split |
reaper-rush.rs | #[macro_use]
extern crate clap;
use rand::prelude::*;
use rust_sc2::prelude::*;
use std::{cmp::Ordering, collections::HashSet};
#[bot]
#[derive(Default)]
struct ReaperRushAI {
reapers_retreat: HashSet<u64>,
last_loop_distributed: u32,
}
impl Player for ReaperRushAI {
fn on_start(&mut self) -> SC2Result<()> {
if let Some(townhall) = self.units.my.townhalls.first() {
// Setting rallypoint for command center
townhall.smart(Target::Pos(self.start_center), false);
// Ordering scv on initial 50 minerals
townhall.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
// Splitting workers to closest mineral crystals
for u in &self.units.my.workers {
if let Some(mineral) = self.units.mineral_fields.closest(u) {
u.gather(mineral.tag(), false);
}
}
Ok(())
}
fn on_step(&mut self, _iteration: usize) -> SC2Result<()> {
self.distribute_workers();
self.build();
self.train();
self.execute_micro();
Ok(())
}
fn get_player_settings(&self) -> PlayerSettings {
PlayerSettings::new(Race::Terran).with_name("RustyReapers")
}
}
impl ReaperRushAI {
const DISTRIBUTION_DELAY: u32 = 8;
fn distribute_workers(&mut self) {
if self.units.my.workers.is_empty() {
return;
}
let mut idle_workers = self.units.my.workers.idle();
// Check distribution delay if there aren't any idle workers
let game_loop = self.state.observation.game_loop();
let last_loop = &mut self.last_loop_distributed;
if idle_workers.is_empty() && *last_loop + Self::DISTRIBUTION_DELAY > game_loop {
return;
}
*last_loop = game_loop;
// Distribute
let mineral_fields = &self.units.mineral_fields;
if mineral_fields.is_empty() {
return;
}
let bases = self.units.my.townhalls.ready();
if bases.is_empty() {
return;
}
let mut deficit_minings = Units::new();
let mut deficit_geysers = Units::new();
// Distributing mineral workers
for base in &bases {
match base.assigned_harvesters().cmp(&base.ideal_harvesters()) {
Ordering::Less => (0..(base.ideal_harvesters().unwrap()
- base.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_minings.push(base.clone());
}),
Ordering::Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag| !mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn | (&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
match targets
.iter()
.filter(|t| t.in_range(u, t.speed() + if is_retreating { 2.0 } else { 0.5 }))
.closest(u)
{
Some(closest_attacker) => {
let flee_position = {
let pos = u.position().towards(closest_attacker.position(), -u.speed());
if self.is_pathable(pos) {
pos
} else {
*u.position()
.neighbors8()
.iter()
.filter(|p| self.is_pathable(**p))
.furthest(closest_attacker)
.unwrap_or(&self.start_location)
}
};
u.move_to(Target::Pos(flee_position), false);
}
None => {
if !(is_retreating || u.in_range(&closest, 0.0)) {
u.move_to(Target::Pos(closest.position()), false);
}
}
}
} else {
match targets.iter().in_range_of(u, 0.0).min_by_key(|t| t.hits()) {
Some(target) => u.attack(Target::Tag(target.tag()), false),
None => u.move_to(Target::Pos(closest.position()), false),
}
}
}
None => {
let pos = if is_retreating {
u.position()
} else {
self.enemy_start
};
u.move_to(Target::Pos(pos), false);
}
}
}
}
}
fn main() -> SC2Result<()> {
let app = clap_app!(RustyReapers =>
(version: crate_version!())
(author: crate_authors!())
(@arg ladder_server: --LadderServer +takes_value)
(@arg opponent_id: --OpponentId +takes_value)
(@arg host_port: --GamePort +takes_value)
(@arg player_port: --StartPort +takes_value)
(@arg game_step: -s --step
+takes_value
default_value("2")
"Sets game step for bot"
)
(@subcommand local =>
(about: "Runs local game vs Computer")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race
+takes_value
"Sets opponent race"
)
(@arg difficulty: -d --difficulty
+takes_value
"Sets opponent diffuculty"
)
(@arg ai_build: --("ai-build")
+takes_value
"Sets opponent build"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
(@arg realtime: --realtime "Enables realtime mode")
)
(@subcommand human =>
(about: "Runs game Human vs Bot")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race *
+takes_value
"Sets human race"
)
(@arg name: --name
+takes_value
"Sets human name"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
)
)
.get_matches();
let game_step = match app.value_of("game_step") {
Some("0") => panic!("game_step must be X >= 1"),
Some(step) => step.parse::<u32>().expect("Can't parse game_step"),
None => unreachable!(),
};
let mut bot = ReaperRushAI::default();
bot.set_game_step(game_step);
const LADDER_MAPS: &[&str] = &[
"DeathauraLE",
"EternalEmpireLE",
"EverDreamLE",
"GoldenWallLE",
"IceandChromeLE",
"PillarsofGoldLE",
"SubmarineLE",
];
let mut rng = thread_rng();
match app.subcommand() {
("local", Some(sub)) => run_vs_computer(
&mut bot,
Computer::new(
sub.value_of("race").map_or(Race::Random, |race| {
race.parse().expect("Can't parse computer race")
}),
sub.value_of("difficulty")
.map_or(Difficulty::VeryEasy, |difficulty| {
difficulty.parse().expect("Can't parse computer difficulty")
}),
sub.value_of("ai_build")
.map(|ai_build| ai_build.parse().expect("Can't parse computer build")),
),
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: sub.is_present("realtime"),
save_replay_as: sub.value_of("save_replay"),
},
),
("human", Some(sub)) => run_vs_human(
&mut bot,
PlayerSettings {
race: sub
.value_of("race")
.unwrap()
.parse()
.expect("Can't parse human race"),
name: sub.value_of("name"),
..Default::default()
},
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: true,
save_replay_as: sub.value_of("save_replay"),
},
),
_ => run_ladder_game(
&mut bot,
app.value_of("ladder_server").unwrap_or("127.0.0.1"),
app.value_of("host_port").expect("GamePort must be specified"),
app.value_of("player_port")
.expect("StartPort must be specified")
.parse()
.expect("Can't parse StartPort"),
app.value_of("opponent_id"),
),
}
}
| train | identifier_name |
reaper-rush.rs | #[macro_use]
extern crate clap;
use rand::prelude::*;
use rust_sc2::prelude::*;
use std::{cmp::Ordering, collections::HashSet};
#[bot]
#[derive(Default)]
struct ReaperRushAI {
reapers_retreat: HashSet<u64>,
last_loop_distributed: u32,
}
impl Player for ReaperRushAI {
fn on_start(&mut self) -> SC2Result<()> {
if let Some(townhall) = self.units.my.townhalls.first() {
// Setting rallypoint for command center
townhall.smart(Target::Pos(self.start_center), false);
// Ordering scv on initial 50 minerals
townhall.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
// Splitting workers to closest mineral crystals
for u in &self.units.my.workers {
if let Some(mineral) = self.units.mineral_fields.closest(u) {
u.gather(mineral.tag(), false);
}
}
Ok(())
}
fn on_step(&mut self, _iteration: usize) -> SC2Result<()> {
self.distribute_workers();
self.build();
self.train();
self.execute_micro();
Ok(())
}
fn get_player_settings(&self) -> PlayerSettings {
PlayerSettings::new(Race::Terran).with_name("RustyReapers")
}
}
impl ReaperRushAI {
const DISTRIBUTION_DELAY: u32 = 8;
fn distribute_workers(&mut self) {
if self.units.my.workers.is_empty() {
return;
}
let mut idle_workers = self.units.my.workers.idle();
// Check distribution delay if there aren't any idle workers
let game_loop = self.state.observation.game_loop();
let last_loop = &mut self.last_loop_distributed;
if idle_workers.is_empty() && *last_loop + Self::DISTRIBUTION_DELAY > game_loop {
return;
}
*last_loop = game_loop;
// Distribute
let mineral_fields = &self.units.mineral_fields;
if mineral_fields.is_empty() {
return;
}
let bases = self.units.my.townhalls.ready();
if bases.is_empty() {
return;
}
let mut deficit_minings = Units::new();
let mut deficit_geysers = Units::new();
// Distributing mineral workers
for base in &bases {
match base.assigned_harvesters().cmp(&base.ideal_harvesters()) {
Ordering::Less => (0..(base.ideal_harvesters().unwrap()
- base.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_minings.push(base.clone());
}),
Ordering::Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() | else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag| !mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
match targets
.iter()
.filter(|t| t.in_range(u, t.speed() + if is_retreating { 2.0 } else { 0.5 }))
.closest(u)
{
Some(closest_attacker) => {
let flee_position = {
let pos = u.position().towards(closest_attacker.position(), -u.speed());
if self.is_pathable(pos) {
pos
} else {
*u.position()
.neighbors8()
.iter()
.filter(|p| self.is_pathable(**p))
.furthest(closest_attacker)
.unwrap_or(&self.start_location)
}
};
u.move_to(Target::Pos(flee_position), false);
}
None => {
if !(is_retreating || u.in_range(&closest, 0.0)) {
u.move_to(Target::Pos(closest.position()), false);
}
}
}
} else {
match targets.iter().in_range_of(u, 0.0).min_by_key(|t| t.hits()) {
Some(target) => u.attack(Target::Tag(target.tag()), false),
None => u.move_to(Target::Pos(closest.position()), false),
}
}
}
None => {
let pos = if is_retreating {
u.position()
} else {
self.enemy_start
};
u.move_to(Target::Pos(pos), false);
}
}
}
}
}
fn main() -> SC2Result<()> {
let app = clap_app!(RustyReapers =>
(version: crate_version!())
(author: crate_authors!())
(@arg ladder_server: --LadderServer +takes_value)
(@arg opponent_id: --OpponentId +takes_value)
(@arg host_port: --GamePort +takes_value)
(@arg player_port: --StartPort +takes_value)
(@arg game_step: -s --step
+takes_value
default_value("2")
"Sets game step for bot"
)
(@subcommand local =>
(about: "Runs local game vs Computer")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race
+takes_value
"Sets opponent race"
)
(@arg difficulty: -d --difficulty
+takes_value
"Sets opponent diffuculty"
)
(@arg ai_build: --("ai-build")
+takes_value
"Sets opponent build"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
(@arg realtime: --realtime "Enables realtime mode")
)
(@subcommand human =>
(about: "Runs game Human vs Bot")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race *
+takes_value
"Sets human race"
)
(@arg name: --name
+takes_value
"Sets human name"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
)
)
.get_matches();
let game_step = match app.value_of("game_step") {
Some("0") => panic!("game_step must be X >= 1"),
Some(step) => step.parse::<u32>().expect("Can't parse game_step"),
None => unreachable!(),
};
let mut bot = ReaperRushAI::default();
bot.set_game_step(game_step);
const LADDER_MAPS: &[&str] = &[
"DeathauraLE",
"EternalEmpireLE",
"EverDreamLE",
"GoldenWallLE",
"IceandChromeLE",
"PillarsofGoldLE",
"SubmarineLE",
];
let mut rng = thread_rng();
match app.subcommand() {
("local", Some(sub)) => run_vs_computer(
&mut bot,
Computer::new(
sub.value_of("race").map_or(Race::Random, |race| {
race.parse().expect("Can't parse computer race")
}),
sub.value_of("difficulty")
.map_or(Difficulty::VeryEasy, |difficulty| {
difficulty.parse().expect("Can't parse computer difficulty")
}),
sub.value_of("ai_build")
.map(|ai_build| ai_build.parse().expect("Can't parse computer build")),
),
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: sub.is_present("realtime"),
save_replay_as: sub.value_of("save_replay"),
},
),
("human", Some(sub)) => run_vs_human(
&mut bot,
PlayerSettings {
race: sub
.value_of("race")
.unwrap()
.parse()
.expect("Can't parse human race"),
name: sub.value_of("name"),
..Default::default()
},
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: true,
save_replay_as: sub.value_of("save_replay"),
},
),
_ => run_ladder_game(
&mut bot,
app.value_of("ladder_server").unwrap_or("127.0.0.1"),
app.value_of("host_port").expect("GamePort must be specified"),
app.value_of("player_port")
.expect("StartPort must be specified")
.parse()
.expect("Can't parse StartPort"),
app.value_of("opponent_id"),
),
}
}
| {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} | conditional_block |
reaper-rush.rs | #[macro_use]
extern crate clap;
use rand::prelude::*;
use rust_sc2::prelude::*;
use std::{cmp::Ordering, collections::HashSet};
#[bot]
#[derive(Default)]
struct ReaperRushAI {
reapers_retreat: HashSet<u64>,
last_loop_distributed: u32,
}
impl Player for ReaperRushAI {
fn on_start(&mut self) -> SC2Result<()> {
if let Some(townhall) = self.units.my.townhalls.first() {
// Setting rallypoint for command center
townhall.smart(Target::Pos(self.start_center), false);
// Ordering scv on initial 50 minerals
townhall.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
// Splitting workers to closest mineral crystals
for u in &self.units.my.workers {
if let Some(mineral) = self.units.mineral_fields.closest(u) {
u.gather(mineral.tag(), false);
}
}
Ok(())
}
fn on_step(&mut self, _iteration: usize) -> SC2Result<()> {
self.distribute_workers();
self.build();
self.train();
self.execute_micro();
Ok(())
}
fn get_player_settings(&self) -> PlayerSettings {
PlayerSettings::new(Race::Terran).with_name("RustyReapers")
}
}
impl ReaperRushAI {
const DISTRIBUTION_DELAY: u32 = 8;
fn distribute_workers(&mut self) {
if self.units.my.workers.is_empty() {
return;
}
let mut idle_workers = self.units.my.workers.idle();
// Check distribution delay if there aren't any idle workers
let game_loop = self.state.observation.game_loop();
let last_loop = &mut self.last_loop_distributed;
if idle_workers.is_empty() && *last_loop + Self::DISTRIBUTION_DELAY > game_loop {
return;
}
*last_loop = game_loop;
// Distribute
let mineral_fields = &self.units.mineral_fields;
if mineral_fields.is_empty() {
return;
}
let bases = self.units.my.townhalls.ready();
if bases.is_empty() {
return;
}
let mut deficit_minings = Units::new();
let mut deficit_geysers = Units::new();
// Distributing mineral workers
for base in &bases {
match base.assigned_harvesters().cmp(&base.ideal_harvesters()) {
Ordering::Less => (0..(base.ideal_harvesters().unwrap()
- base.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_minings.push(base.clone());
}),
Ordering::Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag| !mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) |
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
match targets
.iter()
.filter(|t| t.in_range(u, t.speed() + if is_retreating { 2.0 } else { 0.5 }))
.closest(u)
{
Some(closest_attacker) => {
let flee_position = {
let pos = u.position().towards(closest_attacker.position(), -u.speed());
if self.is_pathable(pos) {
pos
} else {
*u.position()
.neighbors8()
.iter()
.filter(|p| self.is_pathable(**p))
.furthest(closest_attacker)
.unwrap_or(&self.start_location)
}
};
u.move_to(Target::Pos(flee_position), false);
}
None => {
if !(is_retreating || u.in_range(&closest, 0.0)) {
u.move_to(Target::Pos(closest.position()), false);
}
}
}
} else {
match targets.iter().in_range_of(u, 0.0).min_by_key(|t| t.hits()) {
Some(target) => u.attack(Target::Tag(target.tag()), false),
None => u.move_to(Target::Pos(closest.position()), false),
}
}
}
None => {
let pos = if is_retreating {
u.position()
} else {
self.enemy_start
};
u.move_to(Target::Pos(pos), false);
}
}
}
}
}
fn main() -> SC2Result<()> {
let app = clap_app!(RustyReapers =>
(version: crate_version!())
(author: crate_authors!())
(@arg ladder_server: --LadderServer +takes_value)
(@arg opponent_id: --OpponentId +takes_value)
(@arg host_port: --GamePort +takes_value)
(@arg player_port: --StartPort +takes_value)
(@arg game_step: -s --step
+takes_value
default_value("2")
"Sets game step for bot"
)
(@subcommand local =>
(about: "Runs local game vs Computer")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race
+takes_value
"Sets opponent race"
)
(@arg difficulty: -d --difficulty
+takes_value
"Sets opponent diffuculty"
)
(@arg ai_build: --("ai-build")
+takes_value
"Sets opponent build"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
(@arg realtime: --realtime "Enables realtime mode")
)
(@subcommand human =>
(about: "Runs game Human vs Bot")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race *
+takes_value
"Sets human race"
)
(@arg name: --name
+takes_value
"Sets human name"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
)
)
.get_matches();
let game_step = match app.value_of("game_step") {
Some("0") => panic!("game_step must be X >= 1"),
Some(step) => step.parse::<u32>().expect("Can't parse game_step"),
None => unreachable!(),
};
let mut bot = ReaperRushAI::default();
bot.set_game_step(game_step);
const LADDER_MAPS: &[&str] = &[
"DeathauraLE",
"EternalEmpireLE",
"EverDreamLE",
"GoldenWallLE",
"IceandChromeLE",
"PillarsofGoldLE",
"SubmarineLE",
];
let mut rng = thread_rng();
match app.subcommand() {
("local", Some(sub)) => run_vs_computer(
&mut bot,
Computer::new(
sub.value_of("race").map_or(Race::Random, |race| {
race.parse().expect("Can't parse computer race")
}),
sub.value_of("difficulty")
.map_or(Difficulty::VeryEasy, |difficulty| {
difficulty.parse().expect("Can't parse computer difficulty")
}),
sub.value_of("ai_build")
.map(|ai_build| ai_build.parse().expect("Can't parse computer build")),
),
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: sub.is_present("realtime"),
save_replay_as: sub.value_of("save_replay"),
},
),
("human", Some(sub)) => run_vs_human(
&mut bot,
PlayerSettings {
race: sub
.value_of("race")
.unwrap()
.parse()
.expect("Can't parse human race"),
name: sub.value_of("name"),
..Default::default()
},
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: true,
save_replay_as: sub.value_of("save_replay"),
},
),
_ => run_ladder_game(
&mut bot,
app.value_of("ladder_server").unwrap_or("127.0.0.1"),
app.value_of("host_port").expect("GamePort must be specified"),
app.value_of("player_port")
.expect("StartPort must be specified")
.parse()
.expect("Can't parse StartPort"),
app.value_of("opponent_id"),
),
}
}
| {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
} | identifier_body |
p_test.go | package p
import (
"strings"
"testing"
"github.com/lSimul/php2go/lang"
"github.com/lSimul/php2go/p/test"
"github.com/z7zmey/php-parser/node"
"github.com/z7zmey/php-parser/node/expr"
"github.com/z7zmey/php-parser/node/name"
"github.com/z7zmey/php-parser/node/stmt"
"github.com/z7zmey/php-parser/php7"
)
func TestP(t *testing.T) {
t.Run("helper functions", helpers)
t.Run("basic set", functionDef)
t.Run("binary operations", testBinaryOp)
t.Run("unary operations", unaryOp)
t.Run("statements", testStatements)
t.Run("text comparison of the main function", testMain)
}
func helpers(t *testing.T) {
t.Helper()
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
functions := []struct {
source *name.Name
expected string
}{
{test.Name("f"), "f"},
{test.Name("function"), "function"},
{test.Name("func"), "func1"},
}
for _, f := range functions {
if name := parser.constructName(f.source, true); name != f.expected {
t.Errorf("'%s' expected, '%s' found.\n", f.expected, name)
}
}
variables := []struct {
source *expr.Variable
expected string
}{
{test.Variable("f"), "f"},
{test.Variable("func"), "func1"},
{test.Variable("function"), "function"},
}
for _, v := range variables {
if name := parser.identifierName(v.source); name != v.expected {
t.Errorf("'%s' expected, '%s' found.\n", v.expected, name)
}
}
nop := test.Nop()
if l := nodeList(nop); l[0] != nop {
t.Error("Nothing should happen to passed node.")
}
if l := nodeList(nil); len(l) != 0 {
t.Error("Nil cannot create non-empty statement list.")
}
list := test.List([]node.Node{nop})
if l := nodeList(list); l[0] != nop {
t.Error("Nothing should happen to the nodes passed in the node list.")
}
}
func functionDef(t *testing.T) {
t.Helper()
parser := fileParser{
parser: &parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
},
}
// This tests which name and return type will
// be used. lang.NewFunc(string) is tested
// elsewhere.
f, _ := parser.funcDef(nil)
if f != nil {
t.Error("From nil nothing can be created.")
}
funcDefs := []struct {
f *stmt.Function
name string
ret string
}{
{test.Func("f"), "f", lang.Void},
{test.Func("function"), "function", lang.Void},
{test.Func("func"), "func1", lang.Void},
}
for _, f := range funcDefs {
def, _ := parser.funcDef(f.f)
if def.Name != f.name {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Name)
}
if !def.Return.Equal(f.ret) {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Return)
}
}
// f = mainDef(parser.file, false)
// if f.Name != "main" {
// t.Errorf("'%s' expected, '%s' found.\n", "main", f.Name)
// }
// if !f.Return.Equal(lang.Void) {
// t.Errorf("'%s' expected, '%s' found.\n", lang.Void, f.Return)
// }
// It used to be empty string, but because
// funcDef translates the function name,
// it had to be changed to something
// meaningful.
placeholderFunction := test.Func("placeholderFunction")
returnTypes := []struct {
typ *name.Name
expected string
}{
{test.Name("void"), lang.Void},
{test.Name("int"), lang.Int},
{test.Name("string"), lang.String},
}
for _, rt := range returnTypes {
placeholderFunction.ReturnType = rt.typ
f, _ := parser.funcDef(placeholderFunction)
if !f.Return.Equal(rt.expected) {
t.Errorf("'%s' expected, '%s' found.\n", rt.expected, f.Return)
}
}
}
func testBinaryOp(t *testing.T) {
t.Helper()
left := test.Int("1")
right := test.Int("2")
cases := []struct {
op string
ret string
}{
{"+", lang.Int},
{"-", lang.Int},
{"*", lang.Int},
{"<", lang.Bool},
{"<=", lang.Bool},
{">=", lang.Bool},
{">", lang.Bool},
{"==", lang.Bool},
}
parser := fileParser{parser: &parser{}}
for _, c := range cases {
expr := parser.expression(nil, test.BinaryOp(left, c.op, right))
op, ok := expr.(*lang.BinaryOp)
if !ok {
t.Fatal("Expected binary operation, something else found.")
}
if op.Operation != c.op {
t.Errorf("'%s' expected, '%s' found.", c.op, op.Operation)
}
if !op.Type().Equal(c.ret) {
t.Errorf("'%s' expected, '%s' found.", c.ret, op.Type())
}
}
}
func unaryOp(t *testing.T) {
t.Helper()
parent := lang.NewCode(nil)
parser := fileParser{parser: &parser{}}
for _, n := range []node.Node{
test.Plus(test.String(`"test"`)),
test.Plus(test.String(`""`)),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Str); !ok {
t.Error("lang.Str expected.")
}
if typ := e.Type(); !typ.Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Int("0")),
test.Plus(test.Int("2")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Number); !ok {
t.Error("lang.Number expected.")
}
if typ := e.Type(); !typ.Equal(lang.Int) {
t.Errorf("'int' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Float("0")),
test.Plus(test.Float("1.0")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Float); !ok {
t.Error("lang.Float expected.")
}
if typ := e.Type(); !typ.Equal(lang.Float64) {
t.Errorf("'float' expected, '%s' found.", typ)
}
}
for _, c := range []struct {
n node.Node
t string
}{
{test.Minus(test.String(`"test"`)), lang.String},
{test.Minus(test.String(`""`)), lang.String},
{test.Minus(test.Int("0")), lang.Int},
{test.Minus(test.Int("2")), lang.Int},
{test.Minus(test.Float("0")), lang.Float64},
{test.Minus(test.Float("1.0")), lang.Float64},
} {
e := parser.expression(parent, c.n)
u, ok := e.(*lang.UnaryMinus)
if !ok {
t.Fatal("lang.UnaryMinus expected.")
}
if u.Parent() != parent {
t.Error("Parent not set.")
}
if u.Expr.Parent() != u {
t.Error("Parent not set.")
}
if typ := u.Type(); !typ.Equal(c.t) {
t.Errorf("'%s' expected, '%s' found.", c.t, typ)
}
}
}
func testStatements(t *testing.T) {
t.Helper()
gc := lang.NewGlobalContext()
funcs := NewFunc(gc)
parser := fileParser{
parser: &parser{
gc: gc,
funcs: funcs,
},
}
parser.file = lang.NewFile(gc, "dummy", false, true)
parser.funcs = &FileFunc{funcs, parser.file}
b := lang.NewCode(nil)
html := test.HTML("<html></html>")
parser.createFunction(b, []node.Node{html})
if len(b.Statements) != 1 {
t.Fatal("Wrong amount of statements in the block.")
}
h, ok := b.Statements[0].(*lang.FunctionCall)
if !ok {
t.Fatal("That one statement should be function call.")
}
if h.Parent() != b {
t.Error("Parent not set.")
}
if !h.Return.Equal(lang.Void) {
t.Errorf("'void' expected, '%s' found.", h.Return)
}
if h.Name != "fmt.Print" {
t.Errorf("'fmt.Print' expected, '%s' found.", h.Name)
}
if len(h.Args) != 1 {
t.Fatal("'fmt.Print' should have only one argument.")
}
a, ok := h.Args[0].(*lang.Str)
if !ok {
t.Fatal("That one argument should be string.")
}
if a.Parent() != h {
t.Error("Parent not set.")
}
if a.Value != "`<html></html>`" {
t.Errorf("'`<html></html>`' expected, '%s' found", a.Value)
}
if !a.Type().Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", a.Type())
}
}
func testMain(tt *testing.T) { | tests := []struct {
source []byte
expected string
}{
// Sandbox
{
source: []byte(`<?php
function fc() {
$a = 1 + 2;
}
`),
expected: `func fc() {
a := 1 + 2
}`,
},
// examples/04.php
{
source: []byte(`<?php
function fc() {
$a = 2 + 3 + 4 * 2;
echo $a * $a;
}
`),
expected: `func fc() {
a := 2 + 3 + 4 * 2
fmt.Print(a * a)
}`,
},
// examples/05.php
{
source: []byte(`<?php
function fc() {
{
{
$a = "0";
// Added to compile it in Go. This var is not used.
echo $a;
}
$a = 1;
echo $a;
}
}
`),
expected: `func fc() {
{
{
a := "0"
fmt.Print(a)
}
a := 1
fmt.Print(a)
}
}`,
},
// examples/06.php
{
source: []byte(`<?php
function fc() {
{
$a = 0;
}
$a++;
echo $a;
}
`),
expected: `func fc() {
var a int
{
a = 0
}
a++
fmt.Print(a)
}`,
},
// examples/07.php
{
source: []byte(`<?php
function fc() {
$a = 0;
{
$a = "1";
echo $a;
}
echo $a;
$a = 2;
echo $a;
}
`),
expected: `func fc() {
var a interface{}
a = 0
{
a = "1"
fmt.Print(a.(string))
}
fmt.Print(a.(string))
a = 2
fmt.Print(a.(int))
}`,
},
}
for _, t := range tests {
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
out := parser.Run(parsePHP(t.source), "dummy", false)
for _, f := range out.Files {
if f.Name == "fc" {
main := f.String()
compare(tt, t.expected, main)
}
}
}
}
func parsePHP(source []byte) *node.Root {
parser := php7.NewParser(source, "")
parser.Parse()
return parser.GetRootNode().(*node.Root)
}
func compare(t *testing.T, ref, out string) {
r := strings.Split(ref, "\n")
o := strings.Split(out, "\n")
i, j := 0, 0
for i < len(r) && j < len(o) {
c := true
s1 := strings.TrimLeft(r[i], "\t")
if s1 == "" {
i++
c = false
}
s2 := strings.TrimLeft(o[j], "\t")
if s2 == "" {
j++
c = false
}
if !c {
continue
}
if s1 != s2 {
t.Errorf("Line %d:\nExpected:\n%s\nFound:\n%s\n", i, s1, s2)
}
i++
j++
}
for i < len(r) {
s := strings.TrimLeft(r[i], "\t")
if s != "" {
t.Errorf("Whole string was not parsed")
return
}
i++
}
for j < len(o) {
s := strings.TrimLeft(o[j], "\t")
if s != "" {
t.Errorf("Whole string was not parsed")
return
}
j++
}
} | tt.Helper()
| random_line_split |
p_test.go | package p
import (
"strings"
"testing"
"github.com/lSimul/php2go/lang"
"github.com/lSimul/php2go/p/test"
"github.com/z7zmey/php-parser/node"
"github.com/z7zmey/php-parser/node/expr"
"github.com/z7zmey/php-parser/node/name"
"github.com/z7zmey/php-parser/node/stmt"
"github.com/z7zmey/php-parser/php7"
)
func TestP(t *testing.T) {
t.Run("helper functions", helpers)
t.Run("basic set", functionDef)
t.Run("binary operations", testBinaryOp)
t.Run("unary operations", unaryOp)
t.Run("statements", testStatements)
t.Run("text comparison of the main function", testMain)
}
func helpers(t *testing.T) {
t.Helper()
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
functions := []struct {
source *name.Name
expected string
}{
{test.Name("f"), "f"},
{test.Name("function"), "function"},
{test.Name("func"), "func1"},
}
for _, f := range functions {
if name := parser.constructName(f.source, true); name != f.expected {
t.Errorf("'%s' expected, '%s' found.\n", f.expected, name)
}
}
variables := []struct {
source *expr.Variable
expected string
}{
{test.Variable("f"), "f"},
{test.Variable("func"), "func1"},
{test.Variable("function"), "function"},
}
for _, v := range variables {
if name := parser.identifierName(v.source); name != v.expected {
t.Errorf("'%s' expected, '%s' found.\n", v.expected, name)
}
}
nop := test.Nop()
if l := nodeList(nop); l[0] != nop {
t.Error("Nothing should happen to passed node.")
}
if l := nodeList(nil); len(l) != 0 {
t.Error("Nil cannot create non-empty statement list.")
}
list := test.List([]node.Node{nop})
if l := nodeList(list); l[0] != nop {
t.Error("Nothing should happen to the nodes passed in the node list.")
}
}
func functionDef(t *testing.T) {
t.Helper()
parser := fileParser{
parser: &parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
},
}
// This tests which name and return type will
// be used. lang.NewFunc(string) is tested
// elsewhere.
f, _ := parser.funcDef(nil)
if f != nil {
t.Error("From nil nothing can be created.")
}
funcDefs := []struct {
f *stmt.Function
name string
ret string
}{
{test.Func("f"), "f", lang.Void},
{test.Func("function"), "function", lang.Void},
{test.Func("func"), "func1", lang.Void},
}
for _, f := range funcDefs {
def, _ := parser.funcDef(f.f)
if def.Name != f.name {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Name)
}
if !def.Return.Equal(f.ret) {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Return)
}
}
// f = mainDef(parser.file, false)
// if f.Name != "main" {
// t.Errorf("'%s' expected, '%s' found.\n", "main", f.Name)
// }
// if !f.Return.Equal(lang.Void) {
// t.Errorf("'%s' expected, '%s' found.\n", lang.Void, f.Return)
// }
// It used to be empty string, but because
// funcDef translates the function name,
// it had to be changed to something
// meaningful.
placeholderFunction := test.Func("placeholderFunction")
returnTypes := []struct {
typ *name.Name
expected string
}{
{test.Name("void"), lang.Void},
{test.Name("int"), lang.Int},
{test.Name("string"), lang.String},
}
for _, rt := range returnTypes {
placeholderFunction.ReturnType = rt.typ
f, _ := parser.funcDef(placeholderFunction)
if !f.Return.Equal(rt.expected) {
t.Errorf("'%s' expected, '%s' found.\n", rt.expected, f.Return)
}
}
}
func testBinaryOp(t *testing.T) {
t.Helper()
left := test.Int("1")
right := test.Int("2")
cases := []struct {
op string
ret string
}{
{"+", lang.Int},
{"-", lang.Int},
{"*", lang.Int},
{"<", lang.Bool},
{"<=", lang.Bool},
{">=", lang.Bool},
{">", lang.Bool},
{"==", lang.Bool},
}
parser := fileParser{parser: &parser{}}
for _, c := range cases |
}
func unaryOp(t *testing.T) {
t.Helper()
parent := lang.NewCode(nil)
parser := fileParser{parser: &parser{}}
for _, n := range []node.Node{
test.Plus(test.String(`"test"`)),
test.Plus(test.String(`""`)),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Str); !ok {
t.Error("lang.Str expected.")
}
if typ := e.Type(); !typ.Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Int("0")),
test.Plus(test.Int("2")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Number); !ok {
t.Error("lang.Number expected.")
}
if typ := e.Type(); !typ.Equal(lang.Int) {
t.Errorf("'int' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Float("0")),
test.Plus(test.Float("1.0")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Float); !ok {
t.Error("lang.Float expected.")
}
if typ := e.Type(); !typ.Equal(lang.Float64) {
t.Errorf("'float' expected, '%s' found.", typ)
}
}
for _, c := range []struct {
n node.Node
t string
}{
{test.Minus(test.String(`"test"`)), lang.String},
{test.Minus(test.String(`""`)), lang.String},
{test.Minus(test.Int("0")), lang.Int},
{test.Minus(test.Int("2")), lang.Int},
{test.Minus(test.Float("0")), lang.Float64},
{test.Minus(test.Float("1.0")), lang.Float64},
} {
e := parser.expression(parent, c.n)
u, ok := e.(*lang.UnaryMinus)
if !ok {
t.Fatal("lang.UnaryMinus expected.")
}
if u.Parent() != parent {
t.Error("Parent not set.")
}
if u.Expr.Parent() != u {
t.Error("Parent not set.")
}
if typ := u.Type(); !typ.Equal(c.t) {
t.Errorf("'%s' expected, '%s' found.", c.t, typ)
}
}
}
func testStatements(t *testing.T) {
t.Helper()
gc := lang.NewGlobalContext()
funcs := NewFunc(gc)
parser := fileParser{
parser: &parser{
gc: gc,
funcs: funcs,
},
}
parser.file = lang.NewFile(gc, "dummy", false, true)
parser.funcs = &FileFunc{funcs, parser.file}
b := lang.NewCode(nil)
html := test.HTML("<html></html>")
parser.createFunction(b, []node.Node{html})
if len(b.Statements) != 1 {
t.Fatal("Wrong amount of statements in the block.")
}
h, ok := b.Statements[0].(*lang.FunctionCall)
if !ok {
t.Fatal("That one statement should be function call.")
}
if h.Parent() != b {
t.Error("Parent not set.")
}
if !h.Return.Equal(lang.Void) {
t.Errorf("'void' expected, '%s' found.", h.Return)
}
if h.Name != "fmt.Print" {
t.Errorf("'fmt.Print' expected, '%s' found.", h.Name)
}
if len(h.Args) != 1 {
t.Fatal("'fmt.Print' should have only one argument.")
}
a, ok := h.Args[0].(*lang.Str)
if !ok {
t.Fatal("That one argument should be string.")
}
if a.Parent() != h {
t.Error("Parent not set.")
}
if a.Value != "`<html></html>`" {
t.Errorf("'`<html></html>`' expected, '%s' found", a.Value)
}
if !a.Type().Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", a.Type())
}
}
func testMain(tt *testing.T) {
tt.Helper()
tests := []struct {
source []byte
expected string
}{
// Sandbox
{
source: []byte(`<?php
function fc() {
$a = 1 + 2;
}
`),
expected: `func fc() {
a := 1 + 2
}`,
},
// examples/04.php
{
source: []byte(`<?php
function fc() {
$a = 2 + 3 + 4 * 2;
echo $a * $a;
}
`),
expected: `func fc() {
a := 2 + 3 + 4 * 2
fmt.Print(a * a)
}`,
},
// examples/05.php
{
source: []byte(`<?php
function fc() {
{
{
$a = "0";
// Added to compile it in Go. This var is not used.
echo $a;
}
$a = 1;
echo $a;
}
}
`),
expected: `func fc() {
{
{
a := "0"
fmt.Print(a)
}
a := 1
fmt.Print(a)
}
}`,
},
// examples/06.php
{
source: []byte(`<?php
function fc() {
{
$a = 0;
}
$a++;
echo $a;
}
`),
expected: `func fc() {
var a int
{
a = 0
}
a++
fmt.Print(a)
}`,
},
// examples/07.php
{
source: []byte(`<?php
function fc() {
$a = 0;
{
$a = "1";
echo $a;
}
echo $a;
$a = 2;
echo $a;
}
`),
expected: `func fc() {
var a interface{}
a = 0
{
a = "1"
fmt.Print(a.(string))
}
fmt.Print(a.(string))
a = 2
fmt.Print(a.(int))
}`,
},
}
for _, t := range tests {
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
out := parser.Run(parsePHP(t.source), "dummy", false)
for _, f := range out.Files {
if f.Name == "fc" {
main := f.String()
compare(tt, t.expected, main)
}
}
}
}
func parsePHP(source []byte) *node.Root {
parser := php7.NewParser(source, "")
parser.Parse()
return parser.GetRootNode().(*node.Root)
}
func compare(t *testing.T, ref, out string) {
r := strings.Split(ref, "\n")
o := strings.Split(out, "\n")
i, j := 0, 0
for i < len(r) && j < len(o) {
c := true
s1 := strings.TrimLeft(r[i], "\t")
if s1 == "" {
i++
c = false
}
s2 := strings.TrimLeft(o[j], "\t")
if s2 == "" {
j++
c = false
}
if !c {
continue
}
if s1 != s2 {
t.Errorf("Line %d:\nExpected:\n%s\nFound:\n%s\n", i, s1, s2)
}
i++
j++
}
for i < len(r) {
s := strings.TrimLeft(r[i], "\t")
if s != "" {
t.Errorf("Whole string was not parsed")
return
}
i++
}
for j < len(o) {
s := strings.TrimLeft(o[j], "\t")
if s != "" {
t.Errorf("Whole string was not parsed")
return
}
j++
}
}
| {
expr := parser.expression(nil, test.BinaryOp(left, c.op, right))
op, ok := expr.(*lang.BinaryOp)
if !ok {
t.Fatal("Expected binary operation, something else found.")
}
if op.Operation != c.op {
t.Errorf("'%s' expected, '%s' found.", c.op, op.Operation)
}
if !op.Type().Equal(c.ret) {
t.Errorf("'%s' expected, '%s' found.", c.ret, op.Type())
}
} | conditional_block |
p_test.go | package p
import (
"strings"
"testing"
"github.com/lSimul/php2go/lang"
"github.com/lSimul/php2go/p/test"
"github.com/z7zmey/php-parser/node"
"github.com/z7zmey/php-parser/node/expr"
"github.com/z7zmey/php-parser/node/name"
"github.com/z7zmey/php-parser/node/stmt"
"github.com/z7zmey/php-parser/php7"
)
func TestP(t *testing.T) {
t.Run("helper functions", helpers)
t.Run("basic set", functionDef)
t.Run("binary operations", testBinaryOp)
t.Run("unary operations", unaryOp)
t.Run("statements", testStatements)
t.Run("text comparison of the main function", testMain)
}
func helpers(t *testing.T) {
t.Helper()
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
functions := []struct {
source *name.Name
expected string
}{
{test.Name("f"), "f"},
{test.Name("function"), "function"},
{test.Name("func"), "func1"},
}
for _, f := range functions {
if name := parser.constructName(f.source, true); name != f.expected {
t.Errorf("'%s' expected, '%s' found.\n", f.expected, name)
}
}
variables := []struct {
source *expr.Variable
expected string
}{
{test.Variable("f"), "f"},
{test.Variable("func"), "func1"},
{test.Variable("function"), "function"},
}
for _, v := range variables {
if name := parser.identifierName(v.source); name != v.expected {
t.Errorf("'%s' expected, '%s' found.\n", v.expected, name)
}
}
nop := test.Nop()
if l := nodeList(nop); l[0] != nop {
t.Error("Nothing should happen to passed node.")
}
if l := nodeList(nil); len(l) != 0 {
t.Error("Nil cannot create non-empty statement list.")
}
list := test.List([]node.Node{nop})
if l := nodeList(list); l[0] != nop {
t.Error("Nothing should happen to the nodes passed in the node list.")
}
}
func functionDef(t *testing.T) {
t.Helper()
parser := fileParser{
parser: &parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
},
}
// This tests which name and return type will
// be used. lang.NewFunc(string) is tested
// elsewhere.
f, _ := parser.funcDef(nil)
if f != nil {
t.Error("From nil nothing can be created.")
}
funcDefs := []struct {
f *stmt.Function
name string
ret string
}{
{test.Func("f"), "f", lang.Void},
{test.Func("function"), "function", lang.Void},
{test.Func("func"), "func1", lang.Void},
}
for _, f := range funcDefs {
def, _ := parser.funcDef(f.f)
if def.Name != f.name {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Name)
}
if !def.Return.Equal(f.ret) {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Return)
}
}
// f = mainDef(parser.file, false)
// if f.Name != "main" {
// t.Errorf("'%s' expected, '%s' found.\n", "main", f.Name)
// }
// if !f.Return.Equal(lang.Void) {
// t.Errorf("'%s' expected, '%s' found.\n", lang.Void, f.Return)
// }
// It used to be empty string, but because
// funcDef translates the function name,
// it had to be changed to something
// meaningful.
placeholderFunction := test.Func("placeholderFunction")
returnTypes := []struct {
typ *name.Name
expected string
}{
{test.Name("void"), lang.Void},
{test.Name("int"), lang.Int},
{test.Name("string"), lang.String},
}
for _, rt := range returnTypes {
placeholderFunction.ReturnType = rt.typ
f, _ := parser.funcDef(placeholderFunction)
if !f.Return.Equal(rt.expected) {
t.Errorf("'%s' expected, '%s' found.\n", rt.expected, f.Return)
}
}
}
func testBinaryOp(t *testing.T) {
t.Helper()
left := test.Int("1")
right := test.Int("2")
cases := []struct {
op string
ret string
}{
{"+", lang.Int},
{"-", lang.Int},
{"*", lang.Int},
{"<", lang.Bool},
{"<=", lang.Bool},
{">=", lang.Bool},
{">", lang.Bool},
{"==", lang.Bool},
}
parser := fileParser{parser: &parser{}}
for _, c := range cases {
expr := parser.expression(nil, test.BinaryOp(left, c.op, right))
op, ok := expr.(*lang.BinaryOp)
if !ok {
t.Fatal("Expected binary operation, something else found.")
}
if op.Operation != c.op {
t.Errorf("'%s' expected, '%s' found.", c.op, op.Operation)
}
if !op.Type().Equal(c.ret) {
t.Errorf("'%s' expected, '%s' found.", c.ret, op.Type())
}
}
}
func unaryOp(t *testing.T) {
t.Helper()
parent := lang.NewCode(nil)
parser := fileParser{parser: &parser{}}
for _, n := range []node.Node{
test.Plus(test.String(`"test"`)),
test.Plus(test.String(`""`)),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Str); !ok {
t.Error("lang.Str expected.")
}
if typ := e.Type(); !typ.Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Int("0")),
test.Plus(test.Int("2")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Number); !ok {
t.Error("lang.Number expected.")
}
if typ := e.Type(); !typ.Equal(lang.Int) {
t.Errorf("'int' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Float("0")),
test.Plus(test.Float("1.0")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Float); !ok {
t.Error("lang.Float expected.")
}
if typ := e.Type(); !typ.Equal(lang.Float64) {
t.Errorf("'float' expected, '%s' found.", typ)
}
}
for _, c := range []struct {
n node.Node
t string
}{
{test.Minus(test.String(`"test"`)), lang.String},
{test.Minus(test.String(`""`)), lang.String},
{test.Minus(test.Int("0")), lang.Int},
{test.Minus(test.Int("2")), lang.Int},
{test.Minus(test.Float("0")), lang.Float64},
{test.Minus(test.Float("1.0")), lang.Float64},
} {
e := parser.expression(parent, c.n)
u, ok := e.(*lang.UnaryMinus)
if !ok {
t.Fatal("lang.UnaryMinus expected.")
}
if u.Parent() != parent {
t.Error("Parent not set.")
}
if u.Expr.Parent() != u {
t.Error("Parent not set.")
}
if typ := u.Type(); !typ.Equal(c.t) {
t.Errorf("'%s' expected, '%s' found.", c.t, typ)
}
}
}
func testStatements(t *testing.T) {
t.Helper()
gc := lang.NewGlobalContext()
funcs := NewFunc(gc)
parser := fileParser{
parser: &parser{
gc: gc,
funcs: funcs,
},
}
parser.file = lang.NewFile(gc, "dummy", false, true)
parser.funcs = &FileFunc{funcs, parser.file}
b := lang.NewCode(nil)
html := test.HTML("<html></html>")
parser.createFunction(b, []node.Node{html})
if len(b.Statements) != 1 {
t.Fatal("Wrong amount of statements in the block.")
}
h, ok := b.Statements[0].(*lang.FunctionCall)
if !ok {
t.Fatal("That one statement should be function call.")
}
if h.Parent() != b {
t.Error("Parent not set.")
}
if !h.Return.Equal(lang.Void) {
t.Errorf("'void' expected, '%s' found.", h.Return)
}
if h.Name != "fmt.Print" {
t.Errorf("'fmt.Print' expected, '%s' found.", h.Name)
}
if len(h.Args) != 1 {
t.Fatal("'fmt.Print' should have only one argument.")
}
a, ok := h.Args[0].(*lang.Str)
if !ok {
t.Fatal("That one argument should be string.")
}
if a.Parent() != h {
t.Error("Parent not set.")
}
if a.Value != "`<html></html>`" {
t.Errorf("'`<html></html>`' expected, '%s' found", a.Value)
}
if !a.Type().Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", a.Type())
}
}
func | (tt *testing.T) {
tt.Helper()
tests := []struct {
source []byte
expected string
}{
// Sandbox
{
source: []byte(`<?php
function fc() {
$a = 1 + 2;
}
`),
expected: `func fc() {
a := 1 + 2
}`,
},
// examples/04.php
{
source: []byte(`<?php
function fc() {
$a = 2 + 3 + 4 * 2;
echo $a * $a;
}
`),
expected: `func fc() {
a := 2 + 3 + 4 * 2
fmt.Print(a * a)
}`,
},
// examples/05.php
{
source: []byte(`<?php
function fc() {
{
{
$a = "0";
// Added to compile it in Go. This var is not used.
echo $a;
}
$a = 1;
echo $a;
}
}
`),
expected: `func fc() {
{
{
a := "0"
fmt.Print(a)
}
a := 1
fmt.Print(a)
}
}`,
},
// examples/06.php
{
source: []byte(`<?php
function fc() {
{
$a = 0;
}
$a++;
echo $a;
}
`),
expected: `func fc() {
var a int
{
a = 0
}
a++
fmt.Print(a)
}`,
},
// examples/07.php
{
source: []byte(`<?php
function fc() {
$a = 0;
{
$a = "1";
echo $a;
}
echo $a;
$a = 2;
echo $a;
}
`),
expected: `func fc() {
var a interface{}
a = 0
{
a = "1"
fmt.Print(a.(string))
}
fmt.Print(a.(string))
a = 2
fmt.Print(a.(int))
}`,
},
}
for _, t := range tests {
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
out := parser.Run(parsePHP(t.source), "dummy", false)
for _, f := range out.Files {
if f.Name == "fc" {
main := f.String()
compare(tt, t.expected, main)
}
}
}
}
func parsePHP(source []byte) *node.Root {
parser := php7.NewParser(source, "")
parser.Parse()
return parser.GetRootNode().(*node.Root)
}
func compare(t *testing.T, ref, out string) {
r := strings.Split(ref, "\n")
o := strings.Split(out, "\n")
i, j := 0, 0
for i < len(r) && j < len(o) {
c := true
s1 := strings.TrimLeft(r[i], "\t")
if s1 == "" {
i++
c = false
}
s2 := strings.TrimLeft(o[j], "\t")
if s2 == "" {
j++
c = false
}
if !c {
continue
}
if s1 != s2 {
t.Errorf("Line %d:\nExpected:\n%s\nFound:\n%s\n", i, s1, s2)
}
i++
j++
}
for i < len(r) {
s := strings.TrimLeft(r[i], "\t")
if s != "" {
t.Errorf("Whole string was not parsed")
return
}
i++
}
for j < len(o) {
s := strings.TrimLeft(o[j], "\t")
if s != "" {
t.Errorf("Whole string was not parsed")
return
}
j++
}
}
| testMain | identifier_name |
p_test.go | package p
import (
"strings"
"testing"
"github.com/lSimul/php2go/lang"
"github.com/lSimul/php2go/p/test"
"github.com/z7zmey/php-parser/node"
"github.com/z7zmey/php-parser/node/expr"
"github.com/z7zmey/php-parser/node/name"
"github.com/z7zmey/php-parser/node/stmt"
"github.com/z7zmey/php-parser/php7"
)
func TestP(t *testing.T) {
t.Run("helper functions", helpers)
t.Run("basic set", functionDef)
t.Run("binary operations", testBinaryOp)
t.Run("unary operations", unaryOp)
t.Run("statements", testStatements)
t.Run("text comparison of the main function", testMain)
}
func helpers(t *testing.T) {
t.Helper()
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
functions := []struct {
source *name.Name
expected string
}{
{test.Name("f"), "f"},
{test.Name("function"), "function"},
{test.Name("func"), "func1"},
}
for _, f := range functions {
if name := parser.constructName(f.source, true); name != f.expected {
t.Errorf("'%s' expected, '%s' found.\n", f.expected, name)
}
}
variables := []struct {
source *expr.Variable
expected string
}{
{test.Variable("f"), "f"},
{test.Variable("func"), "func1"},
{test.Variable("function"), "function"},
}
for _, v := range variables {
if name := parser.identifierName(v.source); name != v.expected {
t.Errorf("'%s' expected, '%s' found.\n", v.expected, name)
}
}
nop := test.Nop()
if l := nodeList(nop); l[0] != nop {
t.Error("Nothing should happen to passed node.")
}
if l := nodeList(nil); len(l) != 0 {
t.Error("Nil cannot create non-empty statement list.")
}
list := test.List([]node.Node{nop})
if l := nodeList(list); l[0] != nop {
t.Error("Nothing should happen to the nodes passed in the node list.")
}
}
func functionDef(t *testing.T) {
t.Helper()
parser := fileParser{
parser: &parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
},
}
// This tests which name and return type will
// be used. lang.NewFunc(string) is tested
// elsewhere.
f, _ := parser.funcDef(nil)
if f != nil {
t.Error("From nil nothing can be created.")
}
funcDefs := []struct {
f *stmt.Function
name string
ret string
}{
{test.Func("f"), "f", lang.Void},
{test.Func("function"), "function", lang.Void},
{test.Func("func"), "func1", lang.Void},
}
for _, f := range funcDefs {
def, _ := parser.funcDef(f.f)
if def.Name != f.name {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Name)
}
if !def.Return.Equal(f.ret) {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Return)
}
}
// f = mainDef(parser.file, false)
// if f.Name != "main" {
// t.Errorf("'%s' expected, '%s' found.\n", "main", f.Name)
// }
// if !f.Return.Equal(lang.Void) {
// t.Errorf("'%s' expected, '%s' found.\n", lang.Void, f.Return)
// }
// It used to be empty string, but because
// funcDef translates the function name,
// it had to be changed to something
// meaningful.
placeholderFunction := test.Func("placeholderFunction")
returnTypes := []struct {
typ *name.Name
expected string
}{
{test.Name("void"), lang.Void},
{test.Name("int"), lang.Int},
{test.Name("string"), lang.String},
}
for _, rt := range returnTypes {
placeholderFunction.ReturnType = rt.typ
f, _ := parser.funcDef(placeholderFunction)
if !f.Return.Equal(rt.expected) {
t.Errorf("'%s' expected, '%s' found.\n", rt.expected, f.Return)
}
}
}
func testBinaryOp(t *testing.T) {
t.Helper()
left := test.Int("1")
right := test.Int("2")
cases := []struct {
op string
ret string
}{
{"+", lang.Int},
{"-", lang.Int},
{"*", lang.Int},
{"<", lang.Bool},
{"<=", lang.Bool},
{">=", lang.Bool},
{">", lang.Bool},
{"==", lang.Bool},
}
parser := fileParser{parser: &parser{}}
for _, c := range cases {
expr := parser.expression(nil, test.BinaryOp(left, c.op, right))
op, ok := expr.(*lang.BinaryOp)
if !ok {
t.Fatal("Expected binary operation, something else found.")
}
if op.Operation != c.op {
t.Errorf("'%s' expected, '%s' found.", c.op, op.Operation)
}
if !op.Type().Equal(c.ret) {
t.Errorf("'%s' expected, '%s' found.", c.ret, op.Type())
}
}
}
func unaryOp(t *testing.T) {
t.Helper()
parent := lang.NewCode(nil)
parser := fileParser{parser: &parser{}}
for _, n := range []node.Node{
test.Plus(test.String(`"test"`)),
test.Plus(test.String(`""`)),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Str); !ok {
t.Error("lang.Str expected.")
}
if typ := e.Type(); !typ.Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Int("0")),
test.Plus(test.Int("2")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Number); !ok {
t.Error("lang.Number expected.")
}
if typ := e.Type(); !typ.Equal(lang.Int) {
t.Errorf("'int' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Float("0")),
test.Plus(test.Float("1.0")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Float); !ok {
t.Error("lang.Float expected.")
}
if typ := e.Type(); !typ.Equal(lang.Float64) {
t.Errorf("'float' expected, '%s' found.", typ)
}
}
for _, c := range []struct {
n node.Node
t string
}{
{test.Minus(test.String(`"test"`)), lang.String},
{test.Minus(test.String(`""`)), lang.String},
{test.Minus(test.Int("0")), lang.Int},
{test.Minus(test.Int("2")), lang.Int},
{test.Minus(test.Float("0")), lang.Float64},
{test.Minus(test.Float("1.0")), lang.Float64},
} {
e := parser.expression(parent, c.n)
u, ok := e.(*lang.UnaryMinus)
if !ok {
t.Fatal("lang.UnaryMinus expected.")
}
if u.Parent() != parent {
t.Error("Parent not set.")
}
if u.Expr.Parent() != u {
t.Error("Parent not set.")
}
if typ := u.Type(); !typ.Equal(c.t) {
t.Errorf("'%s' expected, '%s' found.", c.t, typ)
}
}
}
func testStatements(t *testing.T) {
t.Helper()
gc := lang.NewGlobalContext()
funcs := NewFunc(gc)
parser := fileParser{
parser: &parser{
gc: gc,
funcs: funcs,
},
}
parser.file = lang.NewFile(gc, "dummy", false, true)
parser.funcs = &FileFunc{funcs, parser.file}
b := lang.NewCode(nil)
html := test.HTML("<html></html>")
parser.createFunction(b, []node.Node{html})
if len(b.Statements) != 1 {
t.Fatal("Wrong amount of statements in the block.")
}
h, ok := b.Statements[0].(*lang.FunctionCall)
if !ok {
t.Fatal("That one statement should be function call.")
}
if h.Parent() != b {
t.Error("Parent not set.")
}
if !h.Return.Equal(lang.Void) {
t.Errorf("'void' expected, '%s' found.", h.Return)
}
if h.Name != "fmt.Print" {
t.Errorf("'fmt.Print' expected, '%s' found.", h.Name)
}
if len(h.Args) != 1 {
t.Fatal("'fmt.Print' should have only one argument.")
}
a, ok := h.Args[0].(*lang.Str)
if !ok {
t.Fatal("That one argument should be string.")
}
if a.Parent() != h {
t.Error("Parent not set.")
}
if a.Value != "`<html></html>`" {
t.Errorf("'`<html></html>`' expected, '%s' found", a.Value)
}
if !a.Type().Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", a.Type())
}
}
func testMain(tt *testing.T) {
tt.Helper()
tests := []struct {
source []byte
expected string
}{
// Sandbox
{
source: []byte(`<?php
function fc() {
$a = 1 + 2;
}
`),
expected: `func fc() {
a := 1 + 2
}`,
},
// examples/04.php
{
source: []byte(`<?php
function fc() {
$a = 2 + 3 + 4 * 2;
echo $a * $a;
}
`),
expected: `func fc() {
a := 2 + 3 + 4 * 2
fmt.Print(a * a)
}`,
},
// examples/05.php
{
source: []byte(`<?php
function fc() {
{
{
$a = "0";
// Added to compile it in Go. This var is not used.
echo $a;
}
$a = 1;
echo $a;
}
}
`),
expected: `func fc() {
{
{
a := "0"
fmt.Print(a)
}
a := 1
fmt.Print(a)
}
}`,
},
// examples/06.php
{
source: []byte(`<?php
function fc() {
{
$a = 0;
}
$a++;
echo $a;
}
`),
expected: `func fc() {
var a int
{
a = 0
}
a++
fmt.Print(a)
}`,
},
// examples/07.php
{
source: []byte(`<?php
function fc() {
$a = 0;
{
$a = "1";
echo $a;
}
echo $a;
$a = 2;
echo $a;
}
`),
expected: `func fc() {
var a interface{}
a = 0
{
a = "1"
fmt.Print(a.(string))
}
fmt.Print(a.(string))
a = 2
fmt.Print(a.(int))
}`,
},
}
for _, t := range tests {
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
out := parser.Run(parsePHP(t.source), "dummy", false)
for _, f := range out.Files {
if f.Name == "fc" {
main := f.String()
compare(tt, t.expected, main)
}
}
}
}
func parsePHP(source []byte) *node.Root {
parser := php7.NewParser(source, "")
parser.Parse()
return parser.GetRootNode().(*node.Root)
}
func compare(t *testing.T, ref, out string) | {
r := strings.Split(ref, "\n")
o := strings.Split(out, "\n")
i, j := 0, 0
for i < len(r) && j < len(o) {
c := true
s1 := strings.TrimLeft(r[i], "\t")
if s1 == "" {
i++
c = false
}
s2 := strings.TrimLeft(o[j], "\t")
if s2 == "" {
j++
c = false
}
if !c {
continue
}
if s1 != s2 {
t.Errorf("Line %d:\nExpected:\n%s\nFound:\n%s\n", i, s1, s2)
}
i++
j++
}
for i < len(r) {
s := strings.TrimLeft(r[i], "\t")
if s != "" {
t.Errorf("Whole string was not parsed")
return
}
i++
}
for j < len(o) {
s := strings.TrimLeft(o[j], "\t")
if s != "" {
t.Errorf("Whole string was not parsed")
return
}
j++
}
} | identifier_body | |
repo_polling.go | package polling
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"math/rand"
"regexp"
"strings"
"sync"
"time"
"github.com/ovh/cds/engine/api/application"
"github.com/ovh/cds/engine/api/cache"
"github.com/ovh/cds/engine/api/database"
"github.com/ovh/cds/engine/api/pipeline"
"github.com/ovh/cds/engine/api/poller"
"github.com/ovh/cds/engine/api/project"
"github.com/ovh/cds/engine/api/repositoriesmanager"
"github.com/ovh/cds/engine/log"
"github.com/ovh/cds/sdk"
)
//RunningPollers is the map of all runningPollers
var RunningPollers = struct {
Workers map[string]*Worker
mutex *sync.RWMutex
}{
Workers: map[string]*Worker{},
mutex: &sync.RWMutex{},
}
//Worker represent a goroutine for each project responsible of repo polling
type Worker struct {
ProjectKey string `json:"project"`
}
//NewWorker Initializes a new worker struct
func NewWorker(key string) *Worker {
return &Worker{key}
}
//WorkerExecution represents a worker execution for a poller instance
type WorkerExecution struct {
ID int64 `json:"id"`
Application string `json:"application"`
Pipeline string `json:"pipeline"`
Execution time.Time `json:"execution"`
Status string `json:"status"`
Events []sdk.VCSPushEvent `json:"events,omitempty"`
}
//Initialize all existing pollers (one poller per project)
func Initialize() {
for {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Second)
continue
}
proj, err := project.LoadAllProjects(db)
if err != nil {
log.Critical("Polling> Unable to load projects: %s", err)
time.Sleep(30 * time.Second)
continue
}
for _, p := range proj {
if RunningPollers.Workers[p.Key] == nil {
w := NewWorker(p.Key)
RunningPollers.mutex.Lock()
RunningPollers.Workers[p.Key] = w
RunningPollers.mutex.Unlock()
var pollerhasStop = func() {
RunningPollers.mutex.Lock()
delete(RunningPollers.Workers, w.ProjectKey)
RunningPollers.mutex.Unlock()
}
ok, quit, err := w.Poll()
if err != nil {
log.Warning("Polling> Unable to lauch worker %s: %s", p.Key, err)
continue
}
if !ok {
pollerhasStop()
}
go func() {
<-quit
pollerhasStop()
}()
}
}
time.Sleep(1 * time.Minute)
}
}
//Poll initiate a poller
func (w *Worker) Poll() (bool, chan bool, error) {
//Check database connection
db := database.DB()
if db == nil {
return false, nil, errors.New("Database is unavailable")
}
pollers, err := poller.LoadEnabledPollers(db) |
var quit chan bool
var atLeastOne bool
for i := range pollers {
p := &pollers[i]
b, _ := repositoriesmanager.CheckApplicationIsAttached(db, p.Name, w.ProjectKey, p.Application.Name)
if !b || p.Application.RepositoriesManager == nil || p.Application.RepositoryFullname == "" {
continue
}
if !p.Application.RepositoriesManager.PollingSupported {
log.Info("Polling is not supported by %s\n", p.Name)
continue
}
log.Info("Starting poller on %s %s %s", p.Name, p.Application.Name, p.Pipeline.Name)
atLeastOne = true
quit = make(chan bool)
go w.poll(p.Application.RepositoriesManager, p.Application.ID, p.Pipeline.ID, quit)
time.Sleep(2 * time.Minute)
}
if !atLeastOne {
return false, nil, nil
}
return true, quit, nil
}
func (w *Worker) poll(rm *sdk.RepositoriesManager, appID, pipID int64, quit chan bool) {
delay := time.Duration(60.0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var mayIWork string
log.Debug("Polling> Start on appID=%d, pipID=%d\n", appID, pipID)
for RunningPollers.Workers[w.ProjectKey] != nil {
//Check database connection
db := database.DB()
if db == nil {
time.Sleep(60 * time.Second)
continue
}
//Loading poller from database
p, err := poller.LoadPollerByApplicationAndPipeline(db, appID, pipID)
if err != nil {
log.Warning("Polling> Unable to load poller appID=%d pipID=%d: %s", appID, pipID, err)
break
}
//Check if poller is still enabled
if !p.Enabled {
log.Warning("Polling> Poller %s is disabled %s", p.Application.RepositoryFullname, err)
break
}
k := cache.Key("reposmanager", "polling", w.ProjectKey, p.Application.Name, p.Pipeline.Name, p.Name)
//Get fro mcache to know if someone is polling the repo
cache.Get(k, &mayIWork)
//If nobody is polling it
if mayIWork == "" {
log.Info("Polling> Polling repository %s for %s/%s\n", p.Application.RepositoryFullname, w.ProjectKey, p.Application.Name)
cache.SetWithTTL(k, "true", 300)
e := &WorkerExecution{
Status: "Running",
Execution: time.Now(),
}
if err := insertExecution(db, &p.Application, &p.Pipeline, e); err != nil {
log.Warning("Polling> Unable to save execution : %s", err)
}
//get the client for the repositories manager
client, err := repositoriesmanager.AuthorizedClient(db, w.ProjectKey, rm.Name)
if err != nil {
log.Warning("Polling> Unable to get client for %s %s : %s\n", w.ProjectKey, rm.Name, err)
break
}
var events []sdk.VCSPushEvent
events, delay, err = client.PushEvents(p.Application.RepositoryFullname, p.DateCreation)
s, err := triggerPipelines(db, w.ProjectKey, rm, p, events)
if err != nil {
log.Warning("Polling> Unable to trigger pipeline %s for repository %s\n", p.Pipeline.Name, p.Application.RepositoryFullname)
break
}
e.Status = fmt.Sprintf(s)
e.Events = events
if err := updateExecution(db, e); err != nil {
log.Warning("Polling> Unable to update execution : %s", err)
}
//Wait for the delay
time.Sleep(delay * time.Second)
cache.Delete(k)
}
//Wait for sometime between 0 and 10 seconds
time.Sleep(time.Duration(r.Float64()*10) * time.Second)
}
log.Debug("Polling> End\n")
quit <- true
}
func triggerPipelines(db *sql.DB, projectKey string, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, events []sdk.VCSPushEvent) (string, error) {
status := ""
for _, event := range events {
projectData, err := project.LoadProjectByPipelineID(db, poller.Pipeline.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project for pipeline %s: %s\n", poller.Pipeline.Name, err)
return "Error", err
}
projectsVar, err := project.GetAllVariableInProject(db, projectData.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project variable: %s\n", err)
return "Error", err
}
projectData.Variable = projectsVar
//begin a tx
tx, err := db.Begin()
if err != nil {
return "Error", err
}
ok, err := TriggerPipeline(tx, rm, poller, event, projectData)
if err != nil {
log.Warning("Polling.triggerPipelines> cannot trigger pipeline %d: %s\n", poller.Pipeline.ID, err)
tx.Rollback()
return "Error", err
}
// commit the tx
if err := tx.Commit(); err != nil {
log.Critical("Polling.triggerPipelines> Cannot commit tx; %s\n", err)
return "Error", err
}
if ok {
log.Debug("Polling.triggerPipelines> Triggered %s/%s/%s", projectKey, poller.Application.RepositoryFullname, event.Branch)
status = fmt.Sprintf("%s Pipeline %s triggered on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
} else {
log.Info("Polling.triggerPipelines> Did not trigger %s/%s/%s\n", projectKey, poller.Application.RepositoryFullname, event.Branch.ID)
status = fmt.Sprintf("%s Pipeline %s skipped on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
}
}
return status, nil
}
// TriggerPipeline linked to received hook
func TriggerPipeline(tx *sql.Tx, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, e sdk.VCSPushEvent, projectData *sdk.Project) (bool, error) {
client, err := repositoriesmanager.AuthorizedClient(tx, projectData.Key, rm.Name)
if err != nil {
return false, err
}
// Create pipeline args
var args []sdk.Parameter
args = append(args, sdk.Parameter{
Name: "git.branch",
Value: e.Branch.ID,
})
args = append(args, sdk.Parameter{
Name: "git.hash",
Value: e.Commit.Hash,
})
args = append(args, sdk.Parameter{
Name: "git.author",
Value: e.Commit.Author.Name,
})
args = append(args, sdk.Parameter{
Name: "git.repository",
Value: poller.Application.RepositoryFullname,
})
args = append(args, sdk.Parameter{
Name: "git.project",
Value: strings.Split(poller.Application.RepositoryFullname, "/")[0],
})
repo, _ := client.RepoByFullname(poller.Application.RepositoryFullname)
if repo.SSHCloneURL != "" {
args = append(args, sdk.Parameter{
Name: "git.url",
Value: repo.SSHCloneURL,
})
}
// Load pipeline Argument
parameters, err := pipeline.GetAllParametersInPipeline(tx, poller.Pipeline.ID)
if err != nil {
return false, err
}
poller.Pipeline.Parameter = parameters
applicationPipelineArgs, err := application.GetAllPipelineParam(tx, poller.Application.ID, poller.Pipeline.ID)
if err != nil {
return false, err
}
trigger := sdk.PipelineBuildTrigger{
ManualTrigger: false,
VCSChangesBranch: e.Branch.ID,
VCSChangesHash: e.Commit.Hash,
VCSChangesAuthor: e.Commit.Author.DisplayName,
}
// Get commit message to check if we have to skip the build
match, err := regexp.Match(".*\\[ci skip\\].*|.*\\[cd skip\\].*", []byte(e.Commit.Message))
if err != nil {
log.Warning("polling> Cannot check %s/%s for commit %s by %s : %s (%s)\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor, e.Commit.Message, err)
}
if match {
log.Debug("polling> Skipping build of %s/%s for commit %s by %s\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor)
return false, nil
}
if b, err := pipeline.BuildExists(tx, poller.Application.ID, poller.Pipeline.ID, sdk.DefaultEnv.ID, &trigger); err != nil || b {
if err != nil {
log.Warning("Polling> Error checking existing build : %s", err)
}
return false, nil
}
_, err = pipeline.InsertPipelineBuild(tx, projectData, &poller.Pipeline, &poller.Application, applicationPipelineArgs, args, &sdk.DefaultEnv, 0, trigger)
if err != nil {
return false, err
}
return true, nil
}
func insertExecution(db database.QueryExecuter, app *sdk.Application, pip *sdk.Pipeline, e *WorkerExecution) error {
query := `
insert into poller_execution (application_id, pipeline_id, execution_date, status, data)
values($1, $2, $3, $4, $5)
returning id
`
data, _ := json.Marshal(e.Events)
if err := db.QueryRow(query, app.ID, pip.ID, e.Execution, e.Status, data).Scan(&e.ID); err != nil {
return err
}
return nil
}
func updateExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
update poller_execution set status = $2, data = $3 where id = $1
`
data, _ := json.Marshal(e.Events)
if _, err := db.Exec(query, e.ID, e.Status, data); err != nil {
return err
}
return nil
}
func deleteExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
delete from poller_execution where id = $1
`
if _, err := db.Exec(query, e.ID); err != nil {
return err
}
return nil
}
//ExecutionCleaner is globale goroutine to remove all old polling traces
func ExecutionCleaner() {
for {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Minute)
continue
}
execs, _ := LoadExecutions(db)
for i := range execs {
tenDaysAGo := time.Now().Add(-10 * 24 * time.Hour)
if execs[i].Execution.Before(tenDaysAGo) {
deleteExecution(db, &execs[i])
}
}
time.Sleep(1 * time.Hour)
}
}
//LoadExecutions returns all executions in database
func LoadExecutions(db database.QueryExecuter) ([]WorkerExecution, error) {
query := `
select poller_execution.id, application.name, pipeline.name, poller_execution.execution_date, poller_execution.status, poller_execution.data
from poller_execution, application, pipeline
where poller_execution.application_id = application.id
and poller_execution.pipeline_id = pipeline.id
order by poller_execution.execution_date desc
`
rows, err := db.Query(query)
if err != nil {
return nil, err
}
defer rows.Close()
var es []WorkerExecution
for rows.Next() {
var e WorkerExecution
var j sql.NullString
if err := rows.Scan(&e.ID, &e.Application, &e.Pipeline, &e.Execution, &e.Status, &j); err != nil {
return nil, err
}
if j.Valid {
b := []byte(j.String)
json.Unmarshal(b, &e.Events)
}
es = append(es, e)
}
return es, nil
} | if err != nil {
return false, nil, err
} | random_line_split |
repo_polling.go | package polling
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"math/rand"
"regexp"
"strings"
"sync"
"time"
"github.com/ovh/cds/engine/api/application"
"github.com/ovh/cds/engine/api/cache"
"github.com/ovh/cds/engine/api/database"
"github.com/ovh/cds/engine/api/pipeline"
"github.com/ovh/cds/engine/api/poller"
"github.com/ovh/cds/engine/api/project"
"github.com/ovh/cds/engine/api/repositoriesmanager"
"github.com/ovh/cds/engine/log"
"github.com/ovh/cds/sdk"
)
//RunningPollers is the map of all runningPollers
var RunningPollers = struct {
Workers map[string]*Worker
mutex *sync.RWMutex
}{
Workers: map[string]*Worker{},
mutex: &sync.RWMutex{},
}
//Worker represent a goroutine for each project responsible of repo polling
type Worker struct {
ProjectKey string `json:"project"`
}
//NewWorker Initializes a new worker struct
func NewWorker(key string) *Worker {
return &Worker{key}
}
//WorkerExecution represents a worker execution for a poller instance
type WorkerExecution struct {
ID int64 `json:"id"`
Application string `json:"application"`
Pipeline string `json:"pipeline"`
Execution time.Time `json:"execution"`
Status string `json:"status"`
Events []sdk.VCSPushEvent `json:"events,omitempty"`
}
//Initialize all existing pollers (one poller per project)
func Initialize() {
for {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Second)
continue
}
proj, err := project.LoadAllProjects(db)
if err != nil {
log.Critical("Polling> Unable to load projects: %s", err)
time.Sleep(30 * time.Second)
continue
}
for _, p := range proj {
if RunningPollers.Workers[p.Key] == nil {
w := NewWorker(p.Key)
RunningPollers.mutex.Lock()
RunningPollers.Workers[p.Key] = w
RunningPollers.mutex.Unlock()
var pollerhasStop = func() {
RunningPollers.mutex.Lock()
delete(RunningPollers.Workers, w.ProjectKey)
RunningPollers.mutex.Unlock()
}
ok, quit, err := w.Poll()
if err != nil {
log.Warning("Polling> Unable to lauch worker %s: %s", p.Key, err)
continue
}
if !ok {
pollerhasStop()
}
go func() {
<-quit
pollerhasStop()
}()
}
}
time.Sleep(1 * time.Minute)
}
}
//Poll initiate a poller
func (w *Worker) Poll() (bool, chan bool, error) {
//Check database connection
db := database.DB()
if db == nil {
return false, nil, errors.New("Database is unavailable")
}
pollers, err := poller.LoadEnabledPollers(db)
if err != nil {
return false, nil, err
}
var quit chan bool
var atLeastOne bool
for i := range pollers {
p := &pollers[i]
b, _ := repositoriesmanager.CheckApplicationIsAttached(db, p.Name, w.ProjectKey, p.Application.Name)
if !b || p.Application.RepositoriesManager == nil || p.Application.RepositoryFullname == "" {
continue
}
if !p.Application.RepositoriesManager.PollingSupported {
log.Info("Polling is not supported by %s\n", p.Name)
continue
}
log.Info("Starting poller on %s %s %s", p.Name, p.Application.Name, p.Pipeline.Name)
atLeastOne = true
quit = make(chan bool)
go w.poll(p.Application.RepositoriesManager, p.Application.ID, p.Pipeline.ID, quit)
time.Sleep(2 * time.Minute)
}
if !atLeastOne {
return false, nil, nil
}
return true, quit, nil
}
func (w *Worker) poll(rm *sdk.RepositoriesManager, appID, pipID int64, quit chan bool) {
delay := time.Duration(60.0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var mayIWork string
log.Debug("Polling> Start on appID=%d, pipID=%d\n", appID, pipID)
for RunningPollers.Workers[w.ProjectKey] != nil {
//Check database connection
db := database.DB()
if db == nil {
time.Sleep(60 * time.Second)
continue
}
//Loading poller from database
p, err := poller.LoadPollerByApplicationAndPipeline(db, appID, pipID)
if err != nil {
log.Warning("Polling> Unable to load poller appID=%d pipID=%d: %s", appID, pipID, err)
break
}
//Check if poller is still enabled
if !p.Enabled {
log.Warning("Polling> Poller %s is disabled %s", p.Application.RepositoryFullname, err)
break
}
k := cache.Key("reposmanager", "polling", w.ProjectKey, p.Application.Name, p.Pipeline.Name, p.Name)
//Get fro mcache to know if someone is polling the repo
cache.Get(k, &mayIWork)
//If nobody is polling it
if mayIWork == "" {
log.Info("Polling> Polling repository %s for %s/%s\n", p.Application.RepositoryFullname, w.ProjectKey, p.Application.Name)
cache.SetWithTTL(k, "true", 300)
e := &WorkerExecution{
Status: "Running",
Execution: time.Now(),
}
if err := insertExecution(db, &p.Application, &p.Pipeline, e); err != nil {
log.Warning("Polling> Unable to save execution : %s", err)
}
//get the client for the repositories manager
client, err := repositoriesmanager.AuthorizedClient(db, w.ProjectKey, rm.Name)
if err != nil {
log.Warning("Polling> Unable to get client for %s %s : %s\n", w.ProjectKey, rm.Name, err)
break
}
var events []sdk.VCSPushEvent
events, delay, err = client.PushEvents(p.Application.RepositoryFullname, p.DateCreation)
s, err := triggerPipelines(db, w.ProjectKey, rm, p, events)
if err != nil {
log.Warning("Polling> Unable to trigger pipeline %s for repository %s\n", p.Pipeline.Name, p.Application.RepositoryFullname)
break
}
e.Status = fmt.Sprintf(s)
e.Events = events
if err := updateExecution(db, e); err != nil {
log.Warning("Polling> Unable to update execution : %s", err)
}
//Wait for the delay
time.Sleep(delay * time.Second)
cache.Delete(k)
}
//Wait for sometime between 0 and 10 seconds
time.Sleep(time.Duration(r.Float64()*10) * time.Second)
}
log.Debug("Polling> End\n")
quit <- true
}
func triggerPipelines(db *sql.DB, projectKey string, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, events []sdk.VCSPushEvent) (string, error) {
status := ""
for _, event := range events {
projectData, err := project.LoadProjectByPipelineID(db, poller.Pipeline.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project for pipeline %s: %s\n", poller.Pipeline.Name, err)
return "Error", err
}
projectsVar, err := project.GetAllVariableInProject(db, projectData.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project variable: %s\n", err)
return "Error", err
}
projectData.Variable = projectsVar
//begin a tx
tx, err := db.Begin()
if err != nil {
return "Error", err
}
ok, err := TriggerPipeline(tx, rm, poller, event, projectData)
if err != nil {
log.Warning("Polling.triggerPipelines> cannot trigger pipeline %d: %s\n", poller.Pipeline.ID, err)
tx.Rollback()
return "Error", err
}
// commit the tx
if err := tx.Commit(); err != nil {
log.Critical("Polling.triggerPipelines> Cannot commit tx; %s\n", err)
return "Error", err
}
if ok {
log.Debug("Polling.triggerPipelines> Triggered %s/%s/%s", projectKey, poller.Application.RepositoryFullname, event.Branch)
status = fmt.Sprintf("%s Pipeline %s triggered on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
} else {
log.Info("Polling.triggerPipelines> Did not trigger %s/%s/%s\n", projectKey, poller.Application.RepositoryFullname, event.Branch.ID)
status = fmt.Sprintf("%s Pipeline %s skipped on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
}
}
return status, nil
}
// TriggerPipeline linked to received hook
func TriggerPipeline(tx *sql.Tx, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, e sdk.VCSPushEvent, projectData *sdk.Project) (bool, error) {
client, err := repositoriesmanager.AuthorizedClient(tx, projectData.Key, rm.Name)
if err != nil {
return false, err
}
// Create pipeline args
var args []sdk.Parameter
args = append(args, sdk.Parameter{
Name: "git.branch",
Value: e.Branch.ID,
})
args = append(args, sdk.Parameter{
Name: "git.hash",
Value: e.Commit.Hash,
})
args = append(args, sdk.Parameter{
Name: "git.author",
Value: e.Commit.Author.Name,
})
args = append(args, sdk.Parameter{
Name: "git.repository",
Value: poller.Application.RepositoryFullname,
})
args = append(args, sdk.Parameter{
Name: "git.project",
Value: strings.Split(poller.Application.RepositoryFullname, "/")[0],
})
repo, _ := client.RepoByFullname(poller.Application.RepositoryFullname)
if repo.SSHCloneURL != "" {
args = append(args, sdk.Parameter{
Name: "git.url",
Value: repo.SSHCloneURL,
})
}
// Load pipeline Argument
parameters, err := pipeline.GetAllParametersInPipeline(tx, poller.Pipeline.ID)
if err != nil {
return false, err
}
poller.Pipeline.Parameter = parameters
applicationPipelineArgs, err := application.GetAllPipelineParam(tx, poller.Application.ID, poller.Pipeline.ID)
if err != nil {
return false, err
}
trigger := sdk.PipelineBuildTrigger{
ManualTrigger: false,
VCSChangesBranch: e.Branch.ID,
VCSChangesHash: e.Commit.Hash,
VCSChangesAuthor: e.Commit.Author.DisplayName,
}
// Get commit message to check if we have to skip the build
match, err := regexp.Match(".*\\[ci skip\\].*|.*\\[cd skip\\].*", []byte(e.Commit.Message))
if err != nil {
log.Warning("polling> Cannot check %s/%s for commit %s by %s : %s (%s)\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor, e.Commit.Message, err)
}
if match {
log.Debug("polling> Skipping build of %s/%s for commit %s by %s\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor)
return false, nil
}
if b, err := pipeline.BuildExists(tx, poller.Application.ID, poller.Pipeline.ID, sdk.DefaultEnv.ID, &trigger); err != nil || b {
if err != nil {
log.Warning("Polling> Error checking existing build : %s", err)
}
return false, nil
}
_, err = pipeline.InsertPipelineBuild(tx, projectData, &poller.Pipeline, &poller.Application, applicationPipelineArgs, args, &sdk.DefaultEnv, 0, trigger)
if err != nil {
return false, err
}
return true, nil
}
func insertExecution(db database.QueryExecuter, app *sdk.Application, pip *sdk.Pipeline, e *WorkerExecution) error {
query := `
insert into poller_execution (application_id, pipeline_id, execution_date, status, data)
values($1, $2, $3, $4, $5)
returning id
`
data, _ := json.Marshal(e.Events)
if err := db.QueryRow(query, app.ID, pip.ID, e.Execution, e.Status, data).Scan(&e.ID); err != nil {
return err
}
return nil
}
func updateExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
update poller_execution set status = $2, data = $3 where id = $1
`
data, _ := json.Marshal(e.Events)
if _, err := db.Exec(query, e.ID, e.Status, data); err != nil {
return err
}
return nil
}
func deleteExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
delete from poller_execution where id = $1
`
if _, err := db.Exec(query, e.ID); err != nil {
return err
}
return nil
}
//ExecutionCleaner is globale goroutine to remove all old polling traces
func ExecutionCleaner() {
for |
}
//LoadExecutions returns all executions in database
func LoadExecutions(db database.QueryExecuter) ([]WorkerExecution, error) {
query := `
select poller_execution.id, application.name, pipeline.name, poller_execution.execution_date, poller_execution.status, poller_execution.data
from poller_execution, application, pipeline
where poller_execution.application_id = application.id
and poller_execution.pipeline_id = pipeline.id
order by poller_execution.execution_date desc
`
rows, err := db.Query(query)
if err != nil {
return nil, err
}
defer rows.Close()
var es []WorkerExecution
for rows.Next() {
var e WorkerExecution
var j sql.NullString
if err := rows.Scan(&e.ID, &e.Application, &e.Pipeline, &e.Execution, &e.Status, &j); err != nil {
return nil, err
}
if j.Valid {
b := []byte(j.String)
json.Unmarshal(b, &e.Events)
}
es = append(es, e)
}
return es, nil
}
| {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Minute)
continue
}
execs, _ := LoadExecutions(db)
for i := range execs {
tenDaysAGo := time.Now().Add(-10 * 24 * time.Hour)
if execs[i].Execution.Before(tenDaysAGo) {
deleteExecution(db, &execs[i])
}
}
time.Sleep(1 * time.Hour)
} | conditional_block |
repo_polling.go | package polling
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"math/rand"
"regexp"
"strings"
"sync"
"time"
"github.com/ovh/cds/engine/api/application"
"github.com/ovh/cds/engine/api/cache"
"github.com/ovh/cds/engine/api/database"
"github.com/ovh/cds/engine/api/pipeline"
"github.com/ovh/cds/engine/api/poller"
"github.com/ovh/cds/engine/api/project"
"github.com/ovh/cds/engine/api/repositoriesmanager"
"github.com/ovh/cds/engine/log"
"github.com/ovh/cds/sdk"
)
//RunningPollers is the map of all runningPollers
var RunningPollers = struct {
Workers map[string]*Worker
mutex *sync.RWMutex
}{
Workers: map[string]*Worker{},
mutex: &sync.RWMutex{},
}
//Worker represent a goroutine for each project responsible of repo polling
type Worker struct {
ProjectKey string `json:"project"`
}
//NewWorker Initializes a new worker struct
func NewWorker(key string) *Worker {
return &Worker{key}
}
//WorkerExecution represents a worker execution for a poller instance
type WorkerExecution struct {
ID int64 `json:"id"`
Application string `json:"application"`
Pipeline string `json:"pipeline"`
Execution time.Time `json:"execution"`
Status string `json:"status"`
Events []sdk.VCSPushEvent `json:"events,omitempty"`
}
//Initialize all existing pollers (one poller per project)
func Initialize() {
for {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Second)
continue
}
proj, err := project.LoadAllProjects(db)
if err != nil {
log.Critical("Polling> Unable to load projects: %s", err)
time.Sleep(30 * time.Second)
continue
}
for _, p := range proj {
if RunningPollers.Workers[p.Key] == nil {
w := NewWorker(p.Key)
RunningPollers.mutex.Lock()
RunningPollers.Workers[p.Key] = w
RunningPollers.mutex.Unlock()
var pollerhasStop = func() {
RunningPollers.mutex.Lock()
delete(RunningPollers.Workers, w.ProjectKey)
RunningPollers.mutex.Unlock()
}
ok, quit, err := w.Poll()
if err != nil {
log.Warning("Polling> Unable to lauch worker %s: %s", p.Key, err)
continue
}
if !ok {
pollerhasStop()
}
go func() {
<-quit
pollerhasStop()
}()
}
}
time.Sleep(1 * time.Minute)
}
}
//Poll initiate a poller
func (w *Worker) Poll() (bool, chan bool, error) {
//Check database connection
db := database.DB()
if db == nil {
return false, nil, errors.New("Database is unavailable")
}
pollers, err := poller.LoadEnabledPollers(db)
if err != nil {
return false, nil, err
}
var quit chan bool
var atLeastOne bool
for i := range pollers {
p := &pollers[i]
b, _ := repositoriesmanager.CheckApplicationIsAttached(db, p.Name, w.ProjectKey, p.Application.Name)
if !b || p.Application.RepositoriesManager == nil || p.Application.RepositoryFullname == "" {
continue
}
if !p.Application.RepositoriesManager.PollingSupported {
log.Info("Polling is not supported by %s\n", p.Name)
continue
}
log.Info("Starting poller on %s %s %s", p.Name, p.Application.Name, p.Pipeline.Name)
atLeastOne = true
quit = make(chan bool)
go w.poll(p.Application.RepositoriesManager, p.Application.ID, p.Pipeline.ID, quit)
time.Sleep(2 * time.Minute)
}
if !atLeastOne {
return false, nil, nil
}
return true, quit, nil
}
func (w *Worker) poll(rm *sdk.RepositoriesManager, appID, pipID int64, quit chan bool) {
delay := time.Duration(60.0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var mayIWork string
log.Debug("Polling> Start on appID=%d, pipID=%d\n", appID, pipID)
for RunningPollers.Workers[w.ProjectKey] != nil {
//Check database connection
db := database.DB()
if db == nil {
time.Sleep(60 * time.Second)
continue
}
//Loading poller from database
p, err := poller.LoadPollerByApplicationAndPipeline(db, appID, pipID)
if err != nil {
log.Warning("Polling> Unable to load poller appID=%d pipID=%d: %s", appID, pipID, err)
break
}
//Check if poller is still enabled
if !p.Enabled {
log.Warning("Polling> Poller %s is disabled %s", p.Application.RepositoryFullname, err)
break
}
k := cache.Key("reposmanager", "polling", w.ProjectKey, p.Application.Name, p.Pipeline.Name, p.Name)
//Get fro mcache to know if someone is polling the repo
cache.Get(k, &mayIWork)
//If nobody is polling it
if mayIWork == "" {
log.Info("Polling> Polling repository %s for %s/%s\n", p.Application.RepositoryFullname, w.ProjectKey, p.Application.Name)
cache.SetWithTTL(k, "true", 300)
e := &WorkerExecution{
Status: "Running",
Execution: time.Now(),
}
if err := insertExecution(db, &p.Application, &p.Pipeline, e); err != nil {
log.Warning("Polling> Unable to save execution : %s", err)
}
//get the client for the repositories manager
client, err := repositoriesmanager.AuthorizedClient(db, w.ProjectKey, rm.Name)
if err != nil {
log.Warning("Polling> Unable to get client for %s %s : %s\n", w.ProjectKey, rm.Name, err)
break
}
var events []sdk.VCSPushEvent
events, delay, err = client.PushEvents(p.Application.RepositoryFullname, p.DateCreation)
s, err := triggerPipelines(db, w.ProjectKey, rm, p, events)
if err != nil {
log.Warning("Polling> Unable to trigger pipeline %s for repository %s\n", p.Pipeline.Name, p.Application.RepositoryFullname)
break
}
e.Status = fmt.Sprintf(s)
e.Events = events
if err := updateExecution(db, e); err != nil {
log.Warning("Polling> Unable to update execution : %s", err)
}
//Wait for the delay
time.Sleep(delay * time.Second)
cache.Delete(k)
}
//Wait for sometime between 0 and 10 seconds
time.Sleep(time.Duration(r.Float64()*10) * time.Second)
}
log.Debug("Polling> End\n")
quit <- true
}
func triggerPipelines(db *sql.DB, projectKey string, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, events []sdk.VCSPushEvent) (string, error) {
status := ""
for _, event := range events {
projectData, err := project.LoadProjectByPipelineID(db, poller.Pipeline.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project for pipeline %s: %s\n", poller.Pipeline.Name, err)
return "Error", err
}
projectsVar, err := project.GetAllVariableInProject(db, projectData.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project variable: %s\n", err)
return "Error", err
}
projectData.Variable = projectsVar
//begin a tx
tx, err := db.Begin()
if err != nil {
return "Error", err
}
ok, err := TriggerPipeline(tx, rm, poller, event, projectData)
if err != nil {
log.Warning("Polling.triggerPipelines> cannot trigger pipeline %d: %s\n", poller.Pipeline.ID, err)
tx.Rollback()
return "Error", err
}
// commit the tx
if err := tx.Commit(); err != nil {
log.Critical("Polling.triggerPipelines> Cannot commit tx; %s\n", err)
return "Error", err
}
if ok {
log.Debug("Polling.triggerPipelines> Triggered %s/%s/%s", projectKey, poller.Application.RepositoryFullname, event.Branch)
status = fmt.Sprintf("%s Pipeline %s triggered on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
} else {
log.Info("Polling.triggerPipelines> Did not trigger %s/%s/%s\n", projectKey, poller.Application.RepositoryFullname, event.Branch.ID)
status = fmt.Sprintf("%s Pipeline %s skipped on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
}
}
return status, nil
}
// TriggerPipeline linked to received hook
func TriggerPipeline(tx *sql.Tx, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, e sdk.VCSPushEvent, projectData *sdk.Project) (bool, error) {
client, err := repositoriesmanager.AuthorizedClient(tx, projectData.Key, rm.Name)
if err != nil {
return false, err
}
// Create pipeline args
var args []sdk.Parameter
args = append(args, sdk.Parameter{
Name: "git.branch",
Value: e.Branch.ID,
})
args = append(args, sdk.Parameter{
Name: "git.hash",
Value: e.Commit.Hash,
})
args = append(args, sdk.Parameter{
Name: "git.author",
Value: e.Commit.Author.Name,
})
args = append(args, sdk.Parameter{
Name: "git.repository",
Value: poller.Application.RepositoryFullname,
})
args = append(args, sdk.Parameter{
Name: "git.project",
Value: strings.Split(poller.Application.RepositoryFullname, "/")[0],
})
repo, _ := client.RepoByFullname(poller.Application.RepositoryFullname)
if repo.SSHCloneURL != "" {
args = append(args, sdk.Parameter{
Name: "git.url",
Value: repo.SSHCloneURL,
})
}
// Load pipeline Argument
parameters, err := pipeline.GetAllParametersInPipeline(tx, poller.Pipeline.ID)
if err != nil {
return false, err
}
poller.Pipeline.Parameter = parameters
applicationPipelineArgs, err := application.GetAllPipelineParam(tx, poller.Application.ID, poller.Pipeline.ID)
if err != nil {
return false, err
}
trigger := sdk.PipelineBuildTrigger{
ManualTrigger: false,
VCSChangesBranch: e.Branch.ID,
VCSChangesHash: e.Commit.Hash,
VCSChangesAuthor: e.Commit.Author.DisplayName,
}
// Get commit message to check if we have to skip the build
match, err := regexp.Match(".*\\[ci skip\\].*|.*\\[cd skip\\].*", []byte(e.Commit.Message))
if err != nil {
log.Warning("polling> Cannot check %s/%s for commit %s by %s : %s (%s)\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor, e.Commit.Message, err)
}
if match {
log.Debug("polling> Skipping build of %s/%s for commit %s by %s\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor)
return false, nil
}
if b, err := pipeline.BuildExists(tx, poller.Application.ID, poller.Pipeline.ID, sdk.DefaultEnv.ID, &trigger); err != nil || b {
if err != nil {
log.Warning("Polling> Error checking existing build : %s", err)
}
return false, nil
}
_, err = pipeline.InsertPipelineBuild(tx, projectData, &poller.Pipeline, &poller.Application, applicationPipelineArgs, args, &sdk.DefaultEnv, 0, trigger)
if err != nil {
return false, err
}
return true, nil
}
func insertExecution(db database.QueryExecuter, app *sdk.Application, pip *sdk.Pipeline, e *WorkerExecution) error {
query := `
insert into poller_execution (application_id, pipeline_id, execution_date, status, data)
values($1, $2, $3, $4, $5)
returning id
`
data, _ := json.Marshal(e.Events)
if err := db.QueryRow(query, app.ID, pip.ID, e.Execution, e.Status, data).Scan(&e.ID); err != nil {
return err
}
return nil
}
func updateExecution(db database.QueryExecuter, e *WorkerExecution) error |
func deleteExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
delete from poller_execution where id = $1
`
if _, err := db.Exec(query, e.ID); err != nil {
return err
}
return nil
}
//ExecutionCleaner is globale goroutine to remove all old polling traces
func ExecutionCleaner() {
for {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Minute)
continue
}
execs, _ := LoadExecutions(db)
for i := range execs {
tenDaysAGo := time.Now().Add(-10 * 24 * time.Hour)
if execs[i].Execution.Before(tenDaysAGo) {
deleteExecution(db, &execs[i])
}
}
time.Sleep(1 * time.Hour)
}
}
//LoadExecutions returns all executions in database
func LoadExecutions(db database.QueryExecuter) ([]WorkerExecution, error) {
query := `
select poller_execution.id, application.name, pipeline.name, poller_execution.execution_date, poller_execution.status, poller_execution.data
from poller_execution, application, pipeline
where poller_execution.application_id = application.id
and poller_execution.pipeline_id = pipeline.id
order by poller_execution.execution_date desc
`
rows, err := db.Query(query)
if err != nil {
return nil, err
}
defer rows.Close()
var es []WorkerExecution
for rows.Next() {
var e WorkerExecution
var j sql.NullString
if err := rows.Scan(&e.ID, &e.Application, &e.Pipeline, &e.Execution, &e.Status, &j); err != nil {
return nil, err
}
if j.Valid {
b := []byte(j.String)
json.Unmarshal(b, &e.Events)
}
es = append(es, e)
}
return es, nil
}
| {
query := `
update poller_execution set status = $2, data = $3 where id = $1
`
data, _ := json.Marshal(e.Events)
if _, err := db.Exec(query, e.ID, e.Status, data); err != nil {
return err
}
return nil
} | identifier_body |
repo_polling.go | package polling
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"math/rand"
"regexp"
"strings"
"sync"
"time"
"github.com/ovh/cds/engine/api/application"
"github.com/ovh/cds/engine/api/cache"
"github.com/ovh/cds/engine/api/database"
"github.com/ovh/cds/engine/api/pipeline"
"github.com/ovh/cds/engine/api/poller"
"github.com/ovh/cds/engine/api/project"
"github.com/ovh/cds/engine/api/repositoriesmanager"
"github.com/ovh/cds/engine/log"
"github.com/ovh/cds/sdk"
)
//RunningPollers is the map of all runningPollers
var RunningPollers = struct {
Workers map[string]*Worker
mutex *sync.RWMutex
}{
Workers: map[string]*Worker{},
mutex: &sync.RWMutex{},
}
//Worker represent a goroutine for each project responsible of repo polling
type Worker struct {
ProjectKey string `json:"project"`
}
//NewWorker Initializes a new worker struct
func NewWorker(key string) *Worker {
return &Worker{key}
}
//WorkerExecution represents a worker execution for a poller instance
type WorkerExecution struct {
ID int64 `json:"id"`
Application string `json:"application"`
Pipeline string `json:"pipeline"`
Execution time.Time `json:"execution"`
Status string `json:"status"`
Events []sdk.VCSPushEvent `json:"events,omitempty"`
}
//Initialize all existing pollers (one poller per project)
func Initialize() {
for {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Second)
continue
}
proj, err := project.LoadAllProjects(db)
if err != nil {
log.Critical("Polling> Unable to load projects: %s", err)
time.Sleep(30 * time.Second)
continue
}
for _, p := range proj {
if RunningPollers.Workers[p.Key] == nil {
w := NewWorker(p.Key)
RunningPollers.mutex.Lock()
RunningPollers.Workers[p.Key] = w
RunningPollers.mutex.Unlock()
var pollerhasStop = func() {
RunningPollers.mutex.Lock()
delete(RunningPollers.Workers, w.ProjectKey)
RunningPollers.mutex.Unlock()
}
ok, quit, err := w.Poll()
if err != nil {
log.Warning("Polling> Unable to lauch worker %s: %s", p.Key, err)
continue
}
if !ok {
pollerhasStop()
}
go func() {
<-quit
pollerhasStop()
}()
}
}
time.Sleep(1 * time.Minute)
}
}
//Poll initiate a poller
func (w *Worker) Poll() (bool, chan bool, error) {
//Check database connection
db := database.DB()
if db == nil {
return false, nil, errors.New("Database is unavailable")
}
pollers, err := poller.LoadEnabledPollers(db)
if err != nil {
return false, nil, err
}
var quit chan bool
var atLeastOne bool
for i := range pollers {
p := &pollers[i]
b, _ := repositoriesmanager.CheckApplicationIsAttached(db, p.Name, w.ProjectKey, p.Application.Name)
if !b || p.Application.RepositoriesManager == nil || p.Application.RepositoryFullname == "" {
continue
}
if !p.Application.RepositoriesManager.PollingSupported {
log.Info("Polling is not supported by %s\n", p.Name)
continue
}
log.Info("Starting poller on %s %s %s", p.Name, p.Application.Name, p.Pipeline.Name)
atLeastOne = true
quit = make(chan bool)
go w.poll(p.Application.RepositoriesManager, p.Application.ID, p.Pipeline.ID, quit)
time.Sleep(2 * time.Minute)
}
if !atLeastOne {
return false, nil, nil
}
return true, quit, nil
}
func (w *Worker) poll(rm *sdk.RepositoriesManager, appID, pipID int64, quit chan bool) {
delay := time.Duration(60.0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var mayIWork string
log.Debug("Polling> Start on appID=%d, pipID=%d\n", appID, pipID)
for RunningPollers.Workers[w.ProjectKey] != nil {
//Check database connection
db := database.DB()
if db == nil {
time.Sleep(60 * time.Second)
continue
}
//Loading poller from database
p, err := poller.LoadPollerByApplicationAndPipeline(db, appID, pipID)
if err != nil {
log.Warning("Polling> Unable to load poller appID=%d pipID=%d: %s", appID, pipID, err)
break
}
//Check if poller is still enabled
if !p.Enabled {
log.Warning("Polling> Poller %s is disabled %s", p.Application.RepositoryFullname, err)
break
}
k := cache.Key("reposmanager", "polling", w.ProjectKey, p.Application.Name, p.Pipeline.Name, p.Name)
//Get fro mcache to know if someone is polling the repo
cache.Get(k, &mayIWork)
//If nobody is polling it
if mayIWork == "" {
log.Info("Polling> Polling repository %s for %s/%s\n", p.Application.RepositoryFullname, w.ProjectKey, p.Application.Name)
cache.SetWithTTL(k, "true", 300)
e := &WorkerExecution{
Status: "Running",
Execution: time.Now(),
}
if err := insertExecution(db, &p.Application, &p.Pipeline, e); err != nil {
log.Warning("Polling> Unable to save execution : %s", err)
}
//get the client for the repositories manager
client, err := repositoriesmanager.AuthorizedClient(db, w.ProjectKey, rm.Name)
if err != nil {
log.Warning("Polling> Unable to get client for %s %s : %s\n", w.ProjectKey, rm.Name, err)
break
}
var events []sdk.VCSPushEvent
events, delay, err = client.PushEvents(p.Application.RepositoryFullname, p.DateCreation)
s, err := triggerPipelines(db, w.ProjectKey, rm, p, events)
if err != nil {
log.Warning("Polling> Unable to trigger pipeline %s for repository %s\n", p.Pipeline.Name, p.Application.RepositoryFullname)
break
}
e.Status = fmt.Sprintf(s)
e.Events = events
if err := updateExecution(db, e); err != nil {
log.Warning("Polling> Unable to update execution : %s", err)
}
//Wait for the delay
time.Sleep(delay * time.Second)
cache.Delete(k)
}
//Wait for sometime between 0 and 10 seconds
time.Sleep(time.Duration(r.Float64()*10) * time.Second)
}
log.Debug("Polling> End\n")
quit <- true
}
func triggerPipelines(db *sql.DB, projectKey string, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, events []sdk.VCSPushEvent) (string, error) {
status := ""
for _, event := range events {
projectData, err := project.LoadProjectByPipelineID(db, poller.Pipeline.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project for pipeline %s: %s\n", poller.Pipeline.Name, err)
return "Error", err
}
projectsVar, err := project.GetAllVariableInProject(db, projectData.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project variable: %s\n", err)
return "Error", err
}
projectData.Variable = projectsVar
//begin a tx
tx, err := db.Begin()
if err != nil {
return "Error", err
}
ok, err := TriggerPipeline(tx, rm, poller, event, projectData)
if err != nil {
log.Warning("Polling.triggerPipelines> cannot trigger pipeline %d: %s\n", poller.Pipeline.ID, err)
tx.Rollback()
return "Error", err
}
// commit the tx
if err := tx.Commit(); err != nil {
log.Critical("Polling.triggerPipelines> Cannot commit tx; %s\n", err)
return "Error", err
}
if ok {
log.Debug("Polling.triggerPipelines> Triggered %s/%s/%s", projectKey, poller.Application.RepositoryFullname, event.Branch)
status = fmt.Sprintf("%s Pipeline %s triggered on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
} else {
log.Info("Polling.triggerPipelines> Did not trigger %s/%s/%s\n", projectKey, poller.Application.RepositoryFullname, event.Branch.ID)
status = fmt.Sprintf("%s Pipeline %s skipped on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
}
}
return status, nil
}
// TriggerPipeline linked to received hook
func | (tx *sql.Tx, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, e sdk.VCSPushEvent, projectData *sdk.Project) (bool, error) {
client, err := repositoriesmanager.AuthorizedClient(tx, projectData.Key, rm.Name)
if err != nil {
return false, err
}
// Create pipeline args
var args []sdk.Parameter
args = append(args, sdk.Parameter{
Name: "git.branch",
Value: e.Branch.ID,
})
args = append(args, sdk.Parameter{
Name: "git.hash",
Value: e.Commit.Hash,
})
args = append(args, sdk.Parameter{
Name: "git.author",
Value: e.Commit.Author.Name,
})
args = append(args, sdk.Parameter{
Name: "git.repository",
Value: poller.Application.RepositoryFullname,
})
args = append(args, sdk.Parameter{
Name: "git.project",
Value: strings.Split(poller.Application.RepositoryFullname, "/")[0],
})
repo, _ := client.RepoByFullname(poller.Application.RepositoryFullname)
if repo.SSHCloneURL != "" {
args = append(args, sdk.Parameter{
Name: "git.url",
Value: repo.SSHCloneURL,
})
}
// Load pipeline Argument
parameters, err := pipeline.GetAllParametersInPipeline(tx, poller.Pipeline.ID)
if err != nil {
return false, err
}
poller.Pipeline.Parameter = parameters
applicationPipelineArgs, err := application.GetAllPipelineParam(tx, poller.Application.ID, poller.Pipeline.ID)
if err != nil {
return false, err
}
trigger := sdk.PipelineBuildTrigger{
ManualTrigger: false,
VCSChangesBranch: e.Branch.ID,
VCSChangesHash: e.Commit.Hash,
VCSChangesAuthor: e.Commit.Author.DisplayName,
}
// Get commit message to check if we have to skip the build
match, err := regexp.Match(".*\\[ci skip\\].*|.*\\[cd skip\\].*", []byte(e.Commit.Message))
if err != nil {
log.Warning("polling> Cannot check %s/%s for commit %s by %s : %s (%s)\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor, e.Commit.Message, err)
}
if match {
log.Debug("polling> Skipping build of %s/%s for commit %s by %s\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor)
return false, nil
}
if b, err := pipeline.BuildExists(tx, poller.Application.ID, poller.Pipeline.ID, sdk.DefaultEnv.ID, &trigger); err != nil || b {
if err != nil {
log.Warning("Polling> Error checking existing build : %s", err)
}
return false, nil
}
_, err = pipeline.InsertPipelineBuild(tx, projectData, &poller.Pipeline, &poller.Application, applicationPipelineArgs, args, &sdk.DefaultEnv, 0, trigger)
if err != nil {
return false, err
}
return true, nil
}
func insertExecution(db database.QueryExecuter, app *sdk.Application, pip *sdk.Pipeline, e *WorkerExecution) error {
query := `
insert into poller_execution (application_id, pipeline_id, execution_date, status, data)
values($1, $2, $3, $4, $5)
returning id
`
data, _ := json.Marshal(e.Events)
if err := db.QueryRow(query, app.ID, pip.ID, e.Execution, e.Status, data).Scan(&e.ID); err != nil {
return err
}
return nil
}
func updateExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
update poller_execution set status = $2, data = $3 where id = $1
`
data, _ := json.Marshal(e.Events)
if _, err := db.Exec(query, e.ID, e.Status, data); err != nil {
return err
}
return nil
}
func deleteExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
delete from poller_execution where id = $1
`
if _, err := db.Exec(query, e.ID); err != nil {
return err
}
return nil
}
//ExecutionCleaner is globale goroutine to remove all old polling traces
func ExecutionCleaner() {
for {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Minute)
continue
}
execs, _ := LoadExecutions(db)
for i := range execs {
tenDaysAGo := time.Now().Add(-10 * 24 * time.Hour)
if execs[i].Execution.Before(tenDaysAGo) {
deleteExecution(db, &execs[i])
}
}
time.Sleep(1 * time.Hour)
}
}
//LoadExecutions returns all executions in database
func LoadExecutions(db database.QueryExecuter) ([]WorkerExecution, error) {
query := `
select poller_execution.id, application.name, pipeline.name, poller_execution.execution_date, poller_execution.status, poller_execution.data
from poller_execution, application, pipeline
where poller_execution.application_id = application.id
and poller_execution.pipeline_id = pipeline.id
order by poller_execution.execution_date desc
`
rows, err := db.Query(query)
if err != nil {
return nil, err
}
defer rows.Close()
var es []WorkerExecution
for rows.Next() {
var e WorkerExecution
var j sql.NullString
if err := rows.Scan(&e.ID, &e.Application, &e.Pipeline, &e.Execution, &e.Status, &j); err != nil {
return nil, err
}
if j.Valid {
b := []byte(j.String)
json.Unmarshal(b, &e.Events)
}
es = append(es, e)
}
return es, nil
}
| TriggerPipeline | identifier_name |
keycap.js | import Vue from 'vue/dist/vue.js'
import KeyCustomizer from './components/KeyCustomizer.vue'
import KeySelector from './components/KeySelector.vue'
import NavBar from './components/NavBar.vue'
import KeycapProductInfo from './components/KeycapProductInfo'
Vue.config.productionTip = false
var customKey = new Vue({
el:'#app',
components:{KeyCustomizer, KeySelector, NavBar, KeycapProductInfo},
data: {
currentView:'topView',
selectedColor: {name:'white',color:'#ffffff'},
colors: [
{name:'black', color:'#1a1a1a'},
{name:'white',color:'#ffffff'},
{name:'gray', color:'#d0ccc0'},
{name:'dark-gray', color:'#96938e'},
{name:'graphite', color:'#60605b'},
{name:'charcoal', color:'#373534'},
{name:'pink', color:'#fbbbc9'},
{name:'red', color:'#c13828'},
{name:'maroon', color:'#5f3032'},
{name:'blue', color:'#5eb1e7'},
{name:'royal-blue', color:'#0046ad'},
{name:'navy', color:'#002e5f'},
{name:'mint', color:'#8ed7b0'},
{name:'green', color:'#1db63a'},
{name:'olive', color:'#53682b'},
{name:'yellow', color:'#f8d615'},
{name:'orange', color:'#f67f00'},
{name:'graybrown', color:'#766e54'},
{name:'brown', color:'#6f4c23'},
{name:'purple', color:'#ac97d8'},
{name:'aubergine', color:'#43165e'}
],
viewOptions: [
{name:'Top',value:'topView'},
{name:'Front',value:'frontView'},
{name:'Left side',value:'leftView'},
{name:'Right side',value:'rightView'},
{name:'Back',value:'backView'}
],
surfaces: {
topView:{preview:'',img:{x:'20',y:'16', width:'80', height:'80',value:'', filename:''},text:{x:'25',y:'15',value:''}},
frontView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80', value:'', filename:''},text:{x:'50',y:'120',value:''}},
leftView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}},
rightView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}},
backView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}}
},
keys:{},
selectedKey:{
type:"square",
sides:{
"topView":{"body":{"x":0,"y":0,"width":39,"height":39,"rx":2,"ry":2},"face":{"x":5.5,"y":3,"width":28,"height":31,"rx":4,"ry":4},"viewbox":"0 0 39 39"},
"frontView":{"path":"M37 29.5C35.31 19.71 34.25 13.6 33.83 11.15C33.71 10.45 33.21 9.86 32.53 9.63C32.44 9.6 32.98 9.78 32.81 9.73C32.33 9.56 31.82 9.53 31.32 9.64C30.5 9.82 30.48 9.82 30.08 9.91C28.18 10.33 26.25 10.6 24.31 10.73C23.35 10.79 24.23 10.73 23.48 10.78C20.83 10.95 18.17 10.94 15.52 10.73C14.52 10.66 14.01 10.62 12.64 10.51C11.24 10.4 9.84 10.18 8.47 9.85C8.16 9.78 7.85 9.7 7.18 9.54C7 9.5 6.81 9.49 6.62 9.52C6.41 9.55 6.59 9.52 6.51 9.53C5.96 9.61 5.52 10.04 5.42 10.58C4.97 13.11 3.83 19.41 2 29.5L37 29.5Z", "restrict":{"height":17,"width":27,"x":6,"y":12}},
"leftView":{"path":"M2 29.89L37 30L30.75 10L3.77 14.33L2 29.89Z", "restrict":{"height":14,"width":26,"x":6,"y":15}},
"rightView":{"path":"M36.94 29.89L1.94 30L8.18 10L35.16 14.33L36.94 29.89Z", "restrict":{"height":14,"width":26,"x":8,"y":15}},
"backView":{"path":"M37 27.5C35.31 19.68 34.25 14.8 33.83 12.84C33.71 12.27 33.27 11.81 32.71 11.65C32.58 11.62 32.88 11.7 32.69 11.65C32.29 11.54 31.86 11.52 31.45 11.59C30.6 11.74 30.09 11.83 29.6 11.91C29.6 11.91 24.82 12.45 24.82 12.45C23.76 12.51 23.67 12.51 22.79 12.56C22.79 12.56 16.22 12.53 16.22 12.53C15.08 12.46 13.72 12.38 12.27 12.29C12.27 12.29 8.82 11.85 8.82 11.85C8.44 11.77 7.81 11.65 7.13 11.52C6.98 11.49 6.82 11.49 6.67 11.51C6.45 11.53 6.46 11.53 6.35 11.54C5.9 11.6 5.52 11.93 5.42 12.38C4.96 14.4 3.82 19.44 2 27.5L37 27.5Z", "restrict":{"height":14,"width":27,"x":6,"y":13}}
}
},
sides:{},
active:false,
mainStyle:{
pointerEvents:'none',
opacity:0.3
}
},
computed: {
transformText () {
return {transform: 'translate(' + this.surfaces[this.currentView].text.x + 'px,' + this.surfaces[this.currentView].text.y + 'px)',fontSize:'26px'}
},
transformImg () {
return {transform: 'translate(' + this.surfaces[this.currentView].img.x + 'px,' + this.surfaces[this.currentView].img.y + 'px)'}
}
},
mounted: function () {
var self = this;
fetch('https://us-central1-hotsguide-188315.cloudfunctions.net/function-1?board=keyboard-104&sides=true', {
headers: {
"Content-Type": "application/json; charset=utf-8",
}
})
.then(response => response.json())
.then(data => {
self.keys = data.keys;
self.sides = data.sides
})
.catch(error => console.error(error));
// $.ajax({
// url: 'https://us-central1-hotsguide-188315.cloudfunctions.net/function-1', //read comments in search.php for more information usage
// type: 'GET',
// data: {board: 'keyboard-61', sides:true},
// dataType: 'json',
// success: function(json) {
// self.keys = json.keys;
// self.sides = json.sides;
// }
// });
},
methods: {
addToCart: function() {
var self = this
this.generatePreviews().then(function (){
var key = {name:self.selectedKey.name, color:self.selectedColor.color,surfaces:self.surfaces, price:1000}
var cartItems = JSON.parse(sessionStorage.getItem('customKeycaps')) || []
console.log(cartItems)
cartItems.push(key)
sessionStorage.setItem('customKeycaps', JSON.stringify(cartItems))
var cartLink = document.getElementById('cartLink')
var itemsSpan = cartLink.getElementsByTagName('span')[0]
if (itemsSpan) {
itemsSpan.innerHTML = ' ('+cartItems.length+')'
} else {
itemsSpan = document.createElement('span')
itemsSpan.innerHTML = ' ('+cartItems.length+')'
itemsSpan.setAttribute('style','color:blue;')
cartLink.appendChild(itemsSpan)
}
})
},
setColor: function(color) {
document.querySelector('.keyPicker[data-color="'+this.selectedColor.name+'"]').style.removeProperty('border')
document.querySelector('.keyPicker[data-color="'+color.name+'"]').style.border = '2px solid red'
this.selectedColor = color
},
changeView: function(ev) {
console.log(ev)
document.activeElement.blur();
this.currentView = ev.target.selectedOptions[0].value
var fileInput = document.querySelector('input[type=file]')
//var textInput = document.querySelector('input[type=text]')
if (this.surfaces[this.currentView].img.filename == ''){
fileInput.value = ''
if(!/safari/i.test(navigator.userAgent)){
fileInput.type = ''
fileInput.type = 'file'
}
}
//textInput.value = this.surfaces[this.currentView].text.value
this.updateSnapPoints()
},
updateSnapPoints: function () {
Vue.nextTick(function () {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var imgWidth = document.querySelector('.moveableImg').width
var textWidth = document.querySelector('.moveableText').style.width
var textHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface,textWidth,textHeight)
interact('.moveableText').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,imgWidth,imgWidth)
interact('.moveableImg').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
})
},
changeKey: function(name) {
var prevKey = this.selectedKey.name
var color = '#ffffff'
var keyType = JSON.parse(JSON.stringify(this.keys[name].type))
if (!this.active) {
this.active = true
this.mainStyle.pointerEvents = 'auto'
this.mainStyle.opacity = 1
Vue.nextTick(function () {
addInteractive()
})
}
if (prevKey){
this.keys[prevKey].body.color = '#0f0f0f'
this.keys[prevKey].face.stroke = '#272727'
this.keys[prevKey].face.color = '#1a1a1a'
}
this.selectedKey.sides = this.sides[keyType]
this.selectedKey.type = keyType
this.selectedKey.name = name
this.keys[name].body.color = this.shadeBlend(-0.25, color)
this.keys[name].face.stroke = this.shadeBlend(0.065, color)
this.keys[name].face.color = color
//this.selectedKey.topView.viewbox = "0 0 " + this.selectedKey.topView.body.width + " " + this.selectedKey.topView.body.height
this.updateSnapPoints()
},
updateText: function(text) {
this.surfaces[this.currentView].text.value = text
surface = document.querySelector('.restrictRect').getBoundingClientRect()
var newWidth = document.querySelector('.moveableText').style.width
var newHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface, newWidth, newHeight)
},
shadeBlend: function(p,c0,c1) {
var n=p<0?p*-1:p,u=Math.round,w=parseInt;
if(c0.length>7){
var f=c0.split(","),t=(c1?c1:p<0?"rgb(0,0,0)":"rgb(255,255,255)").split(","),R=w(f[0].slice(4)),G=w(f[1]),B=w(f[2]);
return "rgb("+(u((w(t[0].slice(4))-R)*n)+R)+","+(u((w(t[1])-G)*n)+G)+","+(u((w(t[2])-B)*n)+B)+")"
}else{
var f=w(c0.slice(1),16),t=w((c1?c1:p<0?"#000000":"#FFFFFF").slice(1),16),R1=f>>16,G1=f>>8&0x00FF,B1=f&0x0000FF;
return "#"+(0x1000000+(u(((t>>16)-R1)*n)+R1)*0x10000+(u(((t>>8&0x00FF)-G1)*n)+G1)*0x100+(u(((t&0x0000FF)-B1)*n)+B1)).toString(16).slice(1)
}
},
// ugly, must be a better way
generatePreviews: function() {
var svgns = "http://www.w3.org/2000/svg"
var allSides = []
for (var surface in this.surfaces) {
var self = this
var promise = new Promise(function(resolve, reject) {
var newSvg = document.createElementNS(svgns, "svg");
newSvg.setAttribute('width', 260)
newSvg.setAttribute('height', 210)
newSvg.setAttribute('preserveAspectRatio', 'xMidYMid meet')
newSvg.setAttribute('viewBox', self.selectedKey.sides.topView.viewbox)
if (surface == 'topView'){
var body = document.createElementNS(svgns, 'rect');
body.setAttribute('class','keySurface')
body.setAttribute('x', self.selectedKey.sides.topView.body.x)
body.setAttribute('y',self.selectedKey.sides.topView.body.y)
body.setAttribute('width',self.selectedKey.sides.topView.body.width)
body.setAttribute('height',self.selectedKey.sides.topView.body.height)
body.setAttribute('rx',self.selectedKey.sides.topView.body.rx)
body.setAttribute('ry',self.selectedKey.sides.topView.body.ry)
body.setAttribute('fill',self.shadeBlend(-0.2,self.selectedColor.color))
var face = document.createElementNS(svgns, 'rect');
face.setAttribute('class','restrictRect')
face.setAttribute('x', self.selectedKey.sides.topView.face.x)
face.setAttribute('y',self.selectedKey.sides.topView.face.y)
face.setAttribute('width',self.selectedKey.sides.topView.face.width)
face.setAttribute('height',self.selectedKey.sides.topView.face.height)
face.setAttribute('rx',self.selectedKey.sides.topView.face.rx)
face.setAttribute('ry',self.selectedKey.sides.topView.face.ry)
face.setAttribute('fill',self.selectedColor.color)
face.setAttribute('stroke',self.shadeBlend(0.065,self.selectedColor.color))
face.setAttribute('stroke-width','1px')
newSvg.appendChild(body)
newSvg.appendChild(face)
} else {
var path = document.createElementNS(svgns, 'path')
path.setAttribute('stroke-width','0.5px')
path.setAttribute('stroke',self.shadeBlend(-0.2,self.selectedColor.color))
path.setAttribute('fill',self.selectedColor.color)
path.setAttribute('class','keySurface')
path.setAttribute('d',self.selectedKey.sides[surface].path)
newSvg.appendChild(path)
}
if (self.surfaces[surface].img.value != '' || self.surfaces[surface].text.value != ''){
var img = new Image()
img.width = self.surfaces[surface].img.width
img.height = self.surfaces[surface].img.height
img.src = self.surfaces[surface].img.value
var thisSurface = surface
img.onload = function () {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
var context = canvas.getContext('2d');
console.log(img.width)
console.log(img.height)
context.drawImage(img, self.surfaces[thisSurface].img.x, self.surfaces[thisSurface].img.y, self.surfaces[thisSurface].img.width,self.surfaces[thisSurface].img.height);
self.surfaces[thisSurface].preview = canvas.toDataURL('image/png')
resolve()
}
//console.log(this.surfaces[surface].img)
//var img = document.getElementsByClassName('moveableImg')[0]
} else {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
self.surfaces[surface].preview = canvas.toDataURL('image/png')
resolve()
}
})
allSides.push(promise)
}
return Promise.all(allSides)
},
previewImg: function() {
var file = document.querySelector('input[type=file]').files[0]; //sames as here
var label = document.querySelector( '.inputfile' ).nextElementSibling
var target = this.surfaces[this.currentView].img
var reader = new FileReader();
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var width = document.querySelector('.moveableImg').width
reader.onloadend = function () {
target.value = reader.result
Vue.nextTick(function () {
var image = document.querySelector('#keyImage')
image.onload = function() {
target.width = image.width
target.height = image.height
}
})
}
if (file) {
target.filename = file.name
reader.readAsDataURL(file); //reads the data as a URL
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,width,width)
} else {
target.value = "";
}
}
}
}).$mount('#app')
function calcSnapTargets(surface, width, height){
var left = surface.left+(width / 2)
var center = surface.left + (surface.width / 2)
var right = surface.left + surface.width - (width / 2)
var top = surface.top + (height / 2)
var middle = surface.top + (surface.height / 2)
var bottom = surface.top + surface.height - (height / 2)
var snapTargets = [
{x:left, y:top},
{x:center,y:top},
{x:right,y:top},
{x:left, y:middle},
{x:center,y:middle},
{x:right,y:middle},
{x:left, y:bottom},
{x:center,y:bottom},
{x:right,y:bottom},
]
return snapTargets
}
function dragMoveListener (event) {
var target = event.target
if (target.className == "moveableImg"){
var node = customKey.surfaces[customKey.currentView].img
} else if (target.className == "moveableText") {
var node = customKey.surfaces[customKey.currentView].text
}
node.x = (parseFloat(node.x) || 0) + event.dx,
node.y = (parseFloat(node.y) || 0) + event.dy;
}
// this is used later in the resizing and gesture demos
window.dragMoveListener = dragMoveListener;
function addInteractive() |
var selectKey = function (e) {
var target = e.target;
if (target.className){
if (target.classList.contains('key-face')) {
customKey.changeKey(target.getAttribute('name'))
}
}
e.stopPropagation()
}
document.body.addEventListener('click', selectKey, false);
document.body.addEventListener('touch', selectKey, false); | {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
interact('.moveableText')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:20,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
},
elementRect: { top: 0, left: 0, bottom: 1, right: 1 }
},
})
interact('.moveableImg')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:10,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.left,
y:surface.top,
width:surface.width,
height:surface.height
},
elementRect: { top: 0, left: 0, bottom: 1, right: 1 }
},
})
.resizable({
// resize from all edges and corners
edges: {right: '.resize-corner', bottom: '.resize-corner', left: false, top:false},
// keep the edges inside the parent
preserveAspectRatio:true,
restrictEdges: {
outer:{
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
},
},
// minimum size
restrictSize: {
min: { width: 50, height: 50 }
},
inertia: false,
})
.on('resizemove', function (event) {
var target = event.target
//target.style.height = event.rect.height + 'px'
if (event.rect.height < surface.height) {
target.style.width = event.rect.width + 'px'
} else {
target.style.width = ((target.clientWidth / target.clientHeight) * surface.height) + 'px'
}
customKey.surfaces[customKey.currentView].img.width = target.clientWidth
//ugly hack, can't figure out how to shrink div to image size properly. Tried css tricks eg inline-block, float, table,
customKey.surfaces[customKey.currentView].img.height = target.clientHeight - 5
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface, event.rect.width, event.rect.height)
//placeImage()
});
} | identifier_body |
keycap.js | import Vue from 'vue/dist/vue.js'
import KeyCustomizer from './components/KeyCustomizer.vue'
import KeySelector from './components/KeySelector.vue'
import NavBar from './components/NavBar.vue'
import KeycapProductInfo from './components/KeycapProductInfo'
Vue.config.productionTip = false
var customKey = new Vue({
el:'#app',
components:{KeyCustomizer, KeySelector, NavBar, KeycapProductInfo},
data: {
currentView:'topView',
selectedColor: {name:'white',color:'#ffffff'},
colors: [
{name:'black', color:'#1a1a1a'},
{name:'white',color:'#ffffff'},
{name:'gray', color:'#d0ccc0'},
{name:'dark-gray', color:'#96938e'},
{name:'graphite', color:'#60605b'},
{name:'charcoal', color:'#373534'},
{name:'pink', color:'#fbbbc9'},
{name:'red', color:'#c13828'},
{name:'maroon', color:'#5f3032'},
{name:'blue', color:'#5eb1e7'},
{name:'royal-blue', color:'#0046ad'},
{name:'navy', color:'#002e5f'},
{name:'mint', color:'#8ed7b0'},
{name:'green', color:'#1db63a'},
{name:'olive', color:'#53682b'},
{name:'yellow', color:'#f8d615'},
{name:'orange', color:'#f67f00'},
{name:'graybrown', color:'#766e54'},
{name:'brown', color:'#6f4c23'},
{name:'purple', color:'#ac97d8'},
{name:'aubergine', color:'#43165e'}
],
viewOptions: [
{name:'Top',value:'topView'},
{name:'Front',value:'frontView'},
{name:'Left side',value:'leftView'},
{name:'Right side',value:'rightView'},
{name:'Back',value:'backView'}
],
surfaces: {
topView:{preview:'',img:{x:'20',y:'16', width:'80', height:'80',value:'', filename:''},text:{x:'25',y:'15',value:''}},
frontView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80', value:'', filename:''},text:{x:'50',y:'120',value:''}},
leftView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}},
rightView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}},
backView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}}
},
keys:{},
selectedKey:{
type:"square",
sides:{
"topView":{"body":{"x":0,"y":0,"width":39,"height":39,"rx":2,"ry":2},"face":{"x":5.5,"y":3,"width":28,"height":31,"rx":4,"ry":4},"viewbox":"0 0 39 39"},
"frontView":{"path":"M37 29.5C35.31 19.71 34.25 13.6 33.83 11.15C33.71 10.45 33.21 9.86 32.53 9.63C32.44 9.6 32.98 9.78 32.81 9.73C32.33 9.56 31.82 9.53 31.32 9.64C30.5 9.82 30.48 9.82 30.08 9.91C28.18 10.33 26.25 10.6 24.31 10.73C23.35 10.79 24.23 10.73 23.48 10.78C20.83 10.95 18.17 10.94 15.52 10.73C14.52 10.66 14.01 10.62 12.64 10.51C11.24 10.4 9.84 10.18 8.47 9.85C8.16 9.78 7.85 9.7 7.18 9.54C7 9.5 6.81 9.49 6.62 9.52C6.41 9.55 6.59 9.52 6.51 9.53C5.96 9.61 5.52 10.04 5.42 10.58C4.97 13.11 3.83 19.41 2 29.5L37 29.5Z", "restrict":{"height":17,"width":27,"x":6,"y":12}},
"leftView":{"path":"M2 29.89L37 30L30.75 10L3.77 14.33L2 29.89Z", "restrict":{"height":14,"width":26,"x":6,"y":15}},
"rightView":{"path":"M36.94 29.89L1.94 30L8.18 10L35.16 14.33L36.94 29.89Z", "restrict":{"height":14,"width":26,"x":8,"y":15}},
"backView":{"path":"M37 27.5C35.31 19.68 34.25 14.8 33.83 12.84C33.71 12.27 33.27 11.81 32.71 11.65C32.58 11.62 32.88 11.7 32.69 11.65C32.29 11.54 31.86 11.52 31.45 11.59C30.6 11.74 30.09 11.83 29.6 11.91C29.6 11.91 24.82 12.45 24.82 12.45C23.76 12.51 23.67 12.51 22.79 12.56C22.79 12.56 16.22 12.53 16.22 12.53C15.08 12.46 13.72 12.38 12.27 12.29C12.27 12.29 8.82 11.85 8.82 11.85C8.44 11.77 7.81 11.65 7.13 11.52C6.98 11.49 6.82 11.49 6.67 11.51C6.45 11.53 6.46 11.53 6.35 11.54C5.9 11.6 5.52 11.93 5.42 12.38C4.96 14.4 3.82 19.44 2 27.5L37 27.5Z", "restrict":{"height":14,"width":27,"x":6,"y":13}}
}
},
sides:{},
active:false,
mainStyle:{
pointerEvents:'none',
opacity:0.3
}
},
computed: {
transformText () {
return {transform: 'translate(' + this.surfaces[this.currentView].text.x + 'px,' + this.surfaces[this.currentView].text.y + 'px)',fontSize:'26px'}
},
| () {
return {transform: 'translate(' + this.surfaces[this.currentView].img.x + 'px,' + this.surfaces[this.currentView].img.y + 'px)'}
}
},
mounted: function () {
var self = this;
fetch('https://us-central1-hotsguide-188315.cloudfunctions.net/function-1?board=keyboard-104&sides=true', {
headers: {
"Content-Type": "application/json; charset=utf-8",
}
})
.then(response => response.json())
.then(data => {
self.keys = data.keys;
self.sides = data.sides
})
.catch(error => console.error(error));
// $.ajax({
// url: 'https://us-central1-hotsguide-188315.cloudfunctions.net/function-1', //read comments in search.php for more information usage
// type: 'GET',
// data: {board: 'keyboard-61', sides:true},
// dataType: 'json',
// success: function(json) {
// self.keys = json.keys;
// self.sides = json.sides;
// }
// });
},
methods: {
addToCart: function() {
var self = this
this.generatePreviews().then(function (){
var key = {name:self.selectedKey.name, color:self.selectedColor.color,surfaces:self.surfaces, price:1000}
var cartItems = JSON.parse(sessionStorage.getItem('customKeycaps')) || []
console.log(cartItems)
cartItems.push(key)
sessionStorage.setItem('customKeycaps', JSON.stringify(cartItems))
var cartLink = document.getElementById('cartLink')
var itemsSpan = cartLink.getElementsByTagName('span')[0]
if (itemsSpan) {
itemsSpan.innerHTML = ' ('+cartItems.length+')'
} else {
itemsSpan = document.createElement('span')
itemsSpan.innerHTML = ' ('+cartItems.length+')'
itemsSpan.setAttribute('style','color:blue;')
cartLink.appendChild(itemsSpan)
}
})
},
setColor: function(color) {
document.querySelector('.keyPicker[data-color="'+this.selectedColor.name+'"]').style.removeProperty('border')
document.querySelector('.keyPicker[data-color="'+color.name+'"]').style.border = '2px solid red'
this.selectedColor = color
},
changeView: function(ev) {
console.log(ev)
document.activeElement.blur();
this.currentView = ev.target.selectedOptions[0].value
var fileInput = document.querySelector('input[type=file]')
//var textInput = document.querySelector('input[type=text]')
if (this.surfaces[this.currentView].img.filename == ''){
fileInput.value = ''
if(!/safari/i.test(navigator.userAgent)){
fileInput.type = ''
fileInput.type = 'file'
}
}
//textInput.value = this.surfaces[this.currentView].text.value
this.updateSnapPoints()
},
updateSnapPoints: function () {
Vue.nextTick(function () {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var imgWidth = document.querySelector('.moveableImg').width
var textWidth = document.querySelector('.moveableText').style.width
var textHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface,textWidth,textHeight)
interact('.moveableText').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,imgWidth,imgWidth)
interact('.moveableImg').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
})
},
changeKey: function(name) {
var prevKey = this.selectedKey.name
var color = '#ffffff'
var keyType = JSON.parse(JSON.stringify(this.keys[name].type))
if (!this.active) {
this.active = true
this.mainStyle.pointerEvents = 'auto'
this.mainStyle.opacity = 1
Vue.nextTick(function () {
addInteractive()
})
}
if (prevKey){
this.keys[prevKey].body.color = '#0f0f0f'
this.keys[prevKey].face.stroke = '#272727'
this.keys[prevKey].face.color = '#1a1a1a'
}
this.selectedKey.sides = this.sides[keyType]
this.selectedKey.type = keyType
this.selectedKey.name = name
this.keys[name].body.color = this.shadeBlend(-0.25, color)
this.keys[name].face.stroke = this.shadeBlend(0.065, color)
this.keys[name].face.color = color
//this.selectedKey.topView.viewbox = "0 0 " + this.selectedKey.topView.body.width + " " + this.selectedKey.topView.body.height
this.updateSnapPoints()
},
updateText: function(text) {
this.surfaces[this.currentView].text.value = text
surface = document.querySelector('.restrictRect').getBoundingClientRect()
var newWidth = document.querySelector('.moveableText').style.width
var newHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface, newWidth, newHeight)
},
shadeBlend: function(p,c0,c1) {
var n=p<0?p*-1:p,u=Math.round,w=parseInt;
if(c0.length>7){
var f=c0.split(","),t=(c1?c1:p<0?"rgb(0,0,0)":"rgb(255,255,255)").split(","),R=w(f[0].slice(4)),G=w(f[1]),B=w(f[2]);
return "rgb("+(u((w(t[0].slice(4))-R)*n)+R)+","+(u((w(t[1])-G)*n)+G)+","+(u((w(t[2])-B)*n)+B)+")"
}else{
var f=w(c0.slice(1),16),t=w((c1?c1:p<0?"#000000":"#FFFFFF").slice(1),16),R1=f>>16,G1=f>>8&0x00FF,B1=f&0x0000FF;
return "#"+(0x1000000+(u(((t>>16)-R1)*n)+R1)*0x10000+(u(((t>>8&0x00FF)-G1)*n)+G1)*0x100+(u(((t&0x0000FF)-B1)*n)+B1)).toString(16).slice(1)
}
},
// ugly, must be a better way
generatePreviews: function() {
var svgns = "http://www.w3.org/2000/svg"
var allSides = []
for (var surface in this.surfaces) {
var self = this
var promise = new Promise(function(resolve, reject) {
var newSvg = document.createElementNS(svgns, "svg");
newSvg.setAttribute('width', 260)
newSvg.setAttribute('height', 210)
newSvg.setAttribute('preserveAspectRatio', 'xMidYMid meet')
newSvg.setAttribute('viewBox', self.selectedKey.sides.topView.viewbox)
if (surface == 'topView'){
var body = document.createElementNS(svgns, 'rect');
body.setAttribute('class','keySurface')
body.setAttribute('x', self.selectedKey.sides.topView.body.x)
body.setAttribute('y',self.selectedKey.sides.topView.body.y)
body.setAttribute('width',self.selectedKey.sides.topView.body.width)
body.setAttribute('height',self.selectedKey.sides.topView.body.height)
body.setAttribute('rx',self.selectedKey.sides.topView.body.rx)
body.setAttribute('ry',self.selectedKey.sides.topView.body.ry)
body.setAttribute('fill',self.shadeBlend(-0.2,self.selectedColor.color))
var face = document.createElementNS(svgns, 'rect');
face.setAttribute('class','restrictRect')
face.setAttribute('x', self.selectedKey.sides.topView.face.x)
face.setAttribute('y',self.selectedKey.sides.topView.face.y)
face.setAttribute('width',self.selectedKey.sides.topView.face.width)
face.setAttribute('height',self.selectedKey.sides.topView.face.height)
face.setAttribute('rx',self.selectedKey.sides.topView.face.rx)
face.setAttribute('ry',self.selectedKey.sides.topView.face.ry)
face.setAttribute('fill',self.selectedColor.color)
face.setAttribute('stroke',self.shadeBlend(0.065,self.selectedColor.color))
face.setAttribute('stroke-width','1px')
newSvg.appendChild(body)
newSvg.appendChild(face)
} else {
var path = document.createElementNS(svgns, 'path')
path.setAttribute('stroke-width','0.5px')
path.setAttribute('stroke',self.shadeBlend(-0.2,self.selectedColor.color))
path.setAttribute('fill',self.selectedColor.color)
path.setAttribute('class','keySurface')
path.setAttribute('d',self.selectedKey.sides[surface].path)
newSvg.appendChild(path)
}
if (self.surfaces[surface].img.value != '' || self.surfaces[surface].text.value != ''){
var img = new Image()
img.width = self.surfaces[surface].img.width
img.height = self.surfaces[surface].img.height
img.src = self.surfaces[surface].img.value
var thisSurface = surface
img.onload = function () {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
var context = canvas.getContext('2d');
console.log(img.width)
console.log(img.height)
context.drawImage(img, self.surfaces[thisSurface].img.x, self.surfaces[thisSurface].img.y, self.surfaces[thisSurface].img.width,self.surfaces[thisSurface].img.height);
self.surfaces[thisSurface].preview = canvas.toDataURL('image/png')
resolve()
}
//console.log(this.surfaces[surface].img)
//var img = document.getElementsByClassName('moveableImg')[0]
} else {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
self.surfaces[surface].preview = canvas.toDataURL('image/png')
resolve()
}
})
allSides.push(promise)
}
return Promise.all(allSides)
},
previewImg: function() {
var file = document.querySelector('input[type=file]').files[0]; //sames as here
var label = document.querySelector( '.inputfile' ).nextElementSibling
var target = this.surfaces[this.currentView].img
var reader = new FileReader();
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var width = document.querySelector('.moveableImg').width
reader.onloadend = function () {
target.value = reader.result
Vue.nextTick(function () {
var image = document.querySelector('#keyImage')
image.onload = function() {
target.width = image.width
target.height = image.height
}
})
}
if (file) {
target.filename = file.name
reader.readAsDataURL(file); //reads the data as a URL
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,width,width)
} else {
target.value = "";
}
}
}
}).$mount('#app')
function calcSnapTargets(surface, width, height){
var left = surface.left+(width / 2)
var center = surface.left + (surface.width / 2)
var right = surface.left + surface.width - (width / 2)
var top = surface.top + (height / 2)
var middle = surface.top + (surface.height / 2)
var bottom = surface.top + surface.height - (height / 2)
var snapTargets = [
{x:left, y:top},
{x:center,y:top},
{x:right,y:top},
{x:left, y:middle},
{x:center,y:middle},
{x:right,y:middle},
{x:left, y:bottom},
{x:center,y:bottom},
{x:right,y:bottom},
]
return snapTargets
}
function dragMoveListener (event) {
var target = event.target
if (target.className == "moveableImg"){
var node = customKey.surfaces[customKey.currentView].img
} else if (target.className == "moveableText") {
var node = customKey.surfaces[customKey.currentView].text
}
node.x = (parseFloat(node.x) || 0) + event.dx,
node.y = (parseFloat(node.y) || 0) + event.dy;
}
// this is used later in the resizing and gesture demos
window.dragMoveListener = dragMoveListener;
function addInteractive() {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
interact('.moveableText')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:20,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
},
elementRect: { top: 0, left: 0, bottom: 1, right: 1 }
},
})
interact('.moveableImg')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:10,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.left,
y:surface.top,
width:surface.width,
height:surface.height
},
elementRect: { top: 0, left: 0, bottom: 1, right: 1 }
},
})
.resizable({
// resize from all edges and corners
edges: {right: '.resize-corner', bottom: '.resize-corner', left: false, top:false},
// keep the edges inside the parent
preserveAspectRatio:true,
restrictEdges: {
outer:{
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
},
},
// minimum size
restrictSize: {
min: { width: 50, height: 50 }
},
inertia: false,
})
.on('resizemove', function (event) {
var target = event.target
//target.style.height = event.rect.height + 'px'
if (event.rect.height < surface.height) {
target.style.width = event.rect.width + 'px'
} else {
target.style.width = ((target.clientWidth / target.clientHeight) * surface.height) + 'px'
}
customKey.surfaces[customKey.currentView].img.width = target.clientWidth
//ugly hack, can't figure out how to shrink div to image size properly. Tried css tricks eg inline-block, float, table,
customKey.surfaces[customKey.currentView].img.height = target.clientHeight - 5
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface, event.rect.width, event.rect.height)
//placeImage()
});
}
var selectKey = function (e) {
var target = e.target;
if (target.className){
if (target.classList.contains('key-face')) {
customKey.changeKey(target.getAttribute('name'))
}
}
e.stopPropagation()
}
document.body.addEventListener('click', selectKey, false);
document.body.addEventListener('touch', selectKey, false); | transformImg | identifier_name |
keycap.js | import Vue from 'vue/dist/vue.js'
import KeyCustomizer from './components/KeyCustomizer.vue'
import KeySelector from './components/KeySelector.vue'
import NavBar from './components/NavBar.vue'
import KeycapProductInfo from './components/KeycapProductInfo'
Vue.config.productionTip = false
var customKey = new Vue({
el:'#app',
components:{KeyCustomizer, KeySelector, NavBar, KeycapProductInfo},
data: {
currentView:'topView',
selectedColor: {name:'white',color:'#ffffff'},
colors: [
{name:'black', color:'#1a1a1a'},
{name:'white',color:'#ffffff'},
{name:'gray', color:'#d0ccc0'},
{name:'dark-gray', color:'#96938e'},
{name:'graphite', color:'#60605b'},
{name:'charcoal', color:'#373534'},
{name:'pink', color:'#fbbbc9'},
{name:'red', color:'#c13828'},
{name:'maroon', color:'#5f3032'},
{name:'blue', color:'#5eb1e7'},
{name:'royal-blue', color:'#0046ad'},
{name:'navy', color:'#002e5f'},
{name:'mint', color:'#8ed7b0'},
{name:'green', color:'#1db63a'},
{name:'olive', color:'#53682b'},
{name:'yellow', color:'#f8d615'},
{name:'orange', color:'#f67f00'},
{name:'graybrown', color:'#766e54'},
{name:'brown', color:'#6f4c23'},
{name:'purple', color:'#ac97d8'},
{name:'aubergine', color:'#43165e'}
],
viewOptions: [
{name:'Top',value:'topView'},
{name:'Front',value:'frontView'},
{name:'Left side',value:'leftView'},
{name:'Right side',value:'rightView'},
{name:'Back',value:'backView'}
],
surfaces: {
topView:{preview:'',img:{x:'20',y:'16', width:'80', height:'80',value:'', filename:''},text:{x:'25',y:'15',value:''}},
frontView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80', value:'', filename:''},text:{x:'50',y:'120',value:''}},
leftView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}},
rightView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}},
backView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}}
},
keys:{},
selectedKey:{
type:"square",
sides:{
"topView":{"body":{"x":0,"y":0,"width":39,"height":39,"rx":2,"ry":2},"face":{"x":5.5,"y":3,"width":28,"height":31,"rx":4,"ry":4},"viewbox":"0 0 39 39"},
"frontView":{"path":"M37 29.5C35.31 19.71 34.25 13.6 33.83 11.15C33.71 10.45 33.21 9.86 32.53 9.63C32.44 9.6 32.98 9.78 32.81 9.73C32.33 9.56 31.82 9.53 31.32 9.64C30.5 9.82 30.48 9.82 30.08 9.91C28.18 10.33 26.25 10.6 24.31 10.73C23.35 10.79 24.23 10.73 23.48 10.78C20.83 10.95 18.17 10.94 15.52 10.73C14.52 10.66 14.01 10.62 12.64 10.51C11.24 10.4 9.84 10.18 8.47 9.85C8.16 9.78 7.85 9.7 7.18 9.54C7 9.5 6.81 9.49 6.62 9.52C6.41 9.55 6.59 9.52 6.51 9.53C5.96 9.61 5.52 10.04 5.42 10.58C4.97 13.11 3.83 19.41 2 29.5L37 29.5Z", "restrict":{"height":17,"width":27,"x":6,"y":12}},
"leftView":{"path":"M2 29.89L37 30L30.75 10L3.77 14.33L2 29.89Z", "restrict":{"height":14,"width":26,"x":6,"y":15}},
"rightView":{"path":"M36.94 29.89L1.94 30L8.18 10L35.16 14.33L36.94 29.89Z", "restrict":{"height":14,"width":26,"x":8,"y":15}},
"backView":{"path":"M37 27.5C35.31 19.68 34.25 14.8 33.83 12.84C33.71 12.27 33.27 11.81 32.71 11.65C32.58 11.62 32.88 11.7 32.69 11.65C32.29 11.54 31.86 11.52 31.45 11.59C30.6 11.74 30.09 11.83 29.6 11.91C29.6 11.91 24.82 12.45 24.82 12.45C23.76 12.51 23.67 12.51 22.79 12.56C22.79 12.56 16.22 12.53 16.22 12.53C15.08 12.46 13.72 12.38 12.27 12.29C12.27 12.29 8.82 11.85 8.82 11.85C8.44 11.77 7.81 11.65 7.13 11.52C6.98 11.49 6.82 11.49 6.67 11.51C6.45 11.53 6.46 11.53 6.35 11.54C5.9 11.6 5.52 11.93 5.42 12.38C4.96 14.4 3.82 19.44 2 27.5L37 27.5Z", "restrict":{"height":14,"width":27,"x":6,"y":13}}
}
},
sides:{},
active:false,
mainStyle:{
pointerEvents:'none',
opacity:0.3
}
},
computed: {
transformText () {
return {transform: 'translate(' + this.surfaces[this.currentView].text.x + 'px,' + this.surfaces[this.currentView].text.y + 'px)',fontSize:'26px'}
},
transformImg () {
return {transform: 'translate(' + this.surfaces[this.currentView].img.x + 'px,' + this.surfaces[this.currentView].img.y + 'px)'}
}
},
mounted: function () {
var self = this;
fetch('https://us-central1-hotsguide-188315.cloudfunctions.net/function-1?board=keyboard-104&sides=true', {
headers: {
"Content-Type": "application/json; charset=utf-8",
}
})
.then(response => response.json())
.then(data => {
self.keys = data.keys;
self.sides = data.sides
})
.catch(error => console.error(error));
// $.ajax({
// url: 'https://us-central1-hotsguide-188315.cloudfunctions.net/function-1', //read comments in search.php for more information usage
// type: 'GET',
// data: {board: 'keyboard-61', sides:true},
// dataType: 'json',
// success: function(json) {
// self.keys = json.keys;
// self.sides = json.sides;
// }
// });
},
methods: {
addToCart: function() {
var self = this
this.generatePreviews().then(function (){
var key = {name:self.selectedKey.name, color:self.selectedColor.color,surfaces:self.surfaces, price:1000}
var cartItems = JSON.parse(sessionStorage.getItem('customKeycaps')) || []
console.log(cartItems)
cartItems.push(key)
sessionStorage.setItem('customKeycaps', JSON.stringify(cartItems))
var cartLink = document.getElementById('cartLink')
var itemsSpan = cartLink.getElementsByTagName('span')[0]
if (itemsSpan) {
itemsSpan.innerHTML = ' ('+cartItems.length+')'
} else {
itemsSpan = document.createElement('span')
itemsSpan.innerHTML = ' ('+cartItems.length+')'
itemsSpan.setAttribute('style','color:blue;')
cartLink.appendChild(itemsSpan)
}
})
},
setColor: function(color) {
document.querySelector('.keyPicker[data-color="'+this.selectedColor.name+'"]').style.removeProperty('border')
document.querySelector('.keyPicker[data-color="'+color.name+'"]').style.border = '2px solid red'
this.selectedColor = color
},
changeView: function(ev) {
console.log(ev)
document.activeElement.blur();
this.currentView = ev.target.selectedOptions[0].value
var fileInput = document.querySelector('input[type=file]')
//var textInput = document.querySelector('input[type=text]')
if (this.surfaces[this.currentView].img.filename == ''){
fileInput.value = ''
if(!/safari/i.test(navigator.userAgent)){
fileInput.type = ''
fileInput.type = 'file'
}
}
//textInput.value = this.surfaces[this.currentView].text.value
this.updateSnapPoints()
},
updateSnapPoints: function () {
Vue.nextTick(function () {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var imgWidth = document.querySelector('.moveableImg').width
var textWidth = document.querySelector('.moveableText').style.width
var textHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface,textWidth,textHeight)
interact('.moveableText').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,imgWidth,imgWidth)
interact('.moveableImg').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
})
},
changeKey: function(name) {
var prevKey = this.selectedKey.name
var color = '#ffffff'
var keyType = JSON.parse(JSON.stringify(this.keys[name].type))
if (!this.active) {
this.active = true
this.mainStyle.pointerEvents = 'auto'
this.mainStyle.opacity = 1
Vue.nextTick(function () {
addInteractive()
})
}
if (prevKey){
this.keys[prevKey].body.color = '#0f0f0f'
this.keys[prevKey].face.stroke = '#272727'
this.keys[prevKey].face.color = '#1a1a1a'
}
this.selectedKey.sides = this.sides[keyType]
this.selectedKey.type = keyType
this.selectedKey.name = name
this.keys[name].body.color = this.shadeBlend(-0.25, color)
this.keys[name].face.stroke = this.shadeBlend(0.065, color)
this.keys[name].face.color = color
//this.selectedKey.topView.viewbox = "0 0 " + this.selectedKey.topView.body.width + " " + this.selectedKey.topView.body.height
this.updateSnapPoints()
},
updateText: function(text) {
this.surfaces[this.currentView].text.value = text
surface = document.querySelector('.restrictRect').getBoundingClientRect()
var newWidth = document.querySelector('.moveableText').style.width
var newHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface, newWidth, newHeight)
},
shadeBlend: function(p,c0,c1) {
var n=p<0?p*-1:p,u=Math.round,w=parseInt;
if(c0.length>7){
var f=c0.split(","),t=(c1?c1:p<0?"rgb(0,0,0)":"rgb(255,255,255)").split(","),R=w(f[0].slice(4)),G=w(f[1]),B=w(f[2]);
return "rgb("+(u((w(t[0].slice(4))-R)*n)+R)+","+(u((w(t[1])-G)*n)+G)+","+(u((w(t[2])-B)*n)+B)+")"
}else{
var f=w(c0.slice(1),16),t=w((c1?c1:p<0?"#000000":"#FFFFFF").slice(1),16),R1=f>>16,G1=f>>8&0x00FF,B1=f&0x0000FF;
return "#"+(0x1000000+(u(((t>>16)-R1)*n)+R1)*0x10000+(u(((t>>8&0x00FF)-G1)*n)+G1)*0x100+(u(((t&0x0000FF)-B1)*n)+B1)).toString(16).slice(1)
}
},
// ugly, must be a better way
generatePreviews: function() {
var svgns = "http://www.w3.org/2000/svg"
var allSides = []
for (var surface in this.surfaces) {
var self = this
var promise = new Promise(function(resolve, reject) {
var newSvg = document.createElementNS(svgns, "svg");
newSvg.setAttribute('width', 260)
newSvg.setAttribute('height', 210)
newSvg.setAttribute('preserveAspectRatio', 'xMidYMid meet')
newSvg.setAttribute('viewBox', self.selectedKey.sides.topView.viewbox)
if (surface == 'topView'){
var body = document.createElementNS(svgns, 'rect');
body.setAttribute('class','keySurface')
body.setAttribute('x', self.selectedKey.sides.topView.body.x)
body.setAttribute('y',self.selectedKey.sides.topView.body.y)
body.setAttribute('width',self.selectedKey.sides.topView.body.width)
body.setAttribute('height',self.selectedKey.sides.topView.body.height)
body.setAttribute('rx',self.selectedKey.sides.topView.body.rx)
body.setAttribute('ry',self.selectedKey.sides.topView.body.ry)
body.setAttribute('fill',self.shadeBlend(-0.2,self.selectedColor.color))
var face = document.createElementNS(svgns, 'rect');
face.setAttribute('class','restrictRect')
face.setAttribute('x', self.selectedKey.sides.topView.face.x)
face.setAttribute('y',self.selectedKey.sides.topView.face.y)
face.setAttribute('width',self.selectedKey.sides.topView.face.width)
face.setAttribute('height',self.selectedKey.sides.topView.face.height)
face.setAttribute('rx',self.selectedKey.sides.topView.face.rx)
face.setAttribute('ry',self.selectedKey.sides.topView.face.ry)
face.setAttribute('fill',self.selectedColor.color)
face.setAttribute('stroke',self.shadeBlend(0.065,self.selectedColor.color))
face.setAttribute('stroke-width','1px')
newSvg.appendChild(body)
newSvg.appendChild(face)
} else {
var path = document.createElementNS(svgns, 'path')
path.setAttribute('stroke-width','0.5px')
path.setAttribute('stroke',self.shadeBlend(-0.2,self.selectedColor.color))
path.setAttribute('fill',self.selectedColor.color)
path.setAttribute('class','keySurface')
path.setAttribute('d',self.selectedKey.sides[surface].path)
newSvg.appendChild(path)
}
if (self.surfaces[surface].img.value != '' || self.surfaces[surface].text.value != ''){
var img = new Image()
img.width = self.surfaces[surface].img.width
img.height = self.surfaces[surface].img.height
img.src = self.surfaces[surface].img.value | canvg('canvas', svg_xml, {useCORS: true});
var context = canvas.getContext('2d');
console.log(img.width)
console.log(img.height)
context.drawImage(img, self.surfaces[thisSurface].img.x, self.surfaces[thisSurface].img.y, self.surfaces[thisSurface].img.width,self.surfaces[thisSurface].img.height);
self.surfaces[thisSurface].preview = canvas.toDataURL('image/png')
resolve()
}
//console.log(this.surfaces[surface].img)
//var img = document.getElementsByClassName('moveableImg')[0]
} else {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
self.surfaces[surface].preview = canvas.toDataURL('image/png')
resolve()
}
})
allSides.push(promise)
}
return Promise.all(allSides)
},
previewImg: function() {
var file = document.querySelector('input[type=file]').files[0]; //sames as here
var label = document.querySelector( '.inputfile' ).nextElementSibling
var target = this.surfaces[this.currentView].img
var reader = new FileReader();
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var width = document.querySelector('.moveableImg').width
reader.onloadend = function () {
target.value = reader.result
Vue.nextTick(function () {
var image = document.querySelector('#keyImage')
image.onload = function() {
target.width = image.width
target.height = image.height
}
})
}
if (file) {
target.filename = file.name
reader.readAsDataURL(file); //reads the data as a URL
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,width,width)
} else {
target.value = "";
}
}
}
}).$mount('#app')
function calcSnapTargets(surface, width, height){
var left = surface.left+(width / 2)
var center = surface.left + (surface.width / 2)
var right = surface.left + surface.width - (width / 2)
var top = surface.top + (height / 2)
var middle = surface.top + (surface.height / 2)
var bottom = surface.top + surface.height - (height / 2)
var snapTargets = [
{x:left, y:top},
{x:center,y:top},
{x:right,y:top},
{x:left, y:middle},
{x:center,y:middle},
{x:right,y:middle},
{x:left, y:bottom},
{x:center,y:bottom},
{x:right,y:bottom},
]
return snapTargets
}
function dragMoveListener (event) {
var target = event.target
if (target.className == "moveableImg"){
var node = customKey.surfaces[customKey.currentView].img
} else if (target.className == "moveableText") {
var node = customKey.surfaces[customKey.currentView].text
}
node.x = (parseFloat(node.x) || 0) + event.dx,
node.y = (parseFloat(node.y) || 0) + event.dy;
}
// this is used later in the resizing and gesture demos
window.dragMoveListener = dragMoveListener;
function addInteractive() {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
interact('.moveableText')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:20,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
},
elementRect: { top: 0, left: 0, bottom: 1, right: 1 }
},
})
interact('.moveableImg')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:10,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.left,
y:surface.top,
width:surface.width,
height:surface.height
},
elementRect: { top: 0, left: 0, bottom: 1, right: 1 }
},
})
.resizable({
// resize from all edges and corners
edges: {right: '.resize-corner', bottom: '.resize-corner', left: false, top:false},
// keep the edges inside the parent
preserveAspectRatio:true,
restrictEdges: {
outer:{
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
},
},
// minimum size
restrictSize: {
min: { width: 50, height: 50 }
},
inertia: false,
})
.on('resizemove', function (event) {
var target = event.target
//target.style.height = event.rect.height + 'px'
if (event.rect.height < surface.height) {
target.style.width = event.rect.width + 'px'
} else {
target.style.width = ((target.clientWidth / target.clientHeight) * surface.height) + 'px'
}
customKey.surfaces[customKey.currentView].img.width = target.clientWidth
//ugly hack, can't figure out how to shrink div to image size properly. Tried css tricks eg inline-block, float, table,
customKey.surfaces[customKey.currentView].img.height = target.clientHeight - 5
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface, event.rect.width, event.rect.height)
//placeImage()
});
}
var selectKey = function (e) {
var target = e.target;
if (target.className){
if (target.classList.contains('key-face')) {
customKey.changeKey(target.getAttribute('name'))
}
}
e.stopPropagation()
}
document.body.addEventListener('click', selectKey, false);
document.body.addEventListener('touch', selectKey, false); | var thisSurface = surface
img.onload = function () {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg); | random_line_split |
keycap.js | import Vue from 'vue/dist/vue.js'
import KeyCustomizer from './components/KeyCustomizer.vue'
import KeySelector from './components/KeySelector.vue'
import NavBar from './components/NavBar.vue'
import KeycapProductInfo from './components/KeycapProductInfo'
Vue.config.productionTip = false
var customKey = new Vue({
el:'#app',
components:{KeyCustomizer, KeySelector, NavBar, KeycapProductInfo},
data: {
currentView:'topView',
selectedColor: {name:'white',color:'#ffffff'},
colors: [
{name:'black', color:'#1a1a1a'},
{name:'white',color:'#ffffff'},
{name:'gray', color:'#d0ccc0'},
{name:'dark-gray', color:'#96938e'},
{name:'graphite', color:'#60605b'},
{name:'charcoal', color:'#373534'},
{name:'pink', color:'#fbbbc9'},
{name:'red', color:'#c13828'},
{name:'maroon', color:'#5f3032'},
{name:'blue', color:'#5eb1e7'},
{name:'royal-blue', color:'#0046ad'},
{name:'navy', color:'#002e5f'},
{name:'mint', color:'#8ed7b0'},
{name:'green', color:'#1db63a'},
{name:'olive', color:'#53682b'},
{name:'yellow', color:'#f8d615'},
{name:'orange', color:'#f67f00'},
{name:'graybrown', color:'#766e54'},
{name:'brown', color:'#6f4c23'},
{name:'purple', color:'#ac97d8'},
{name:'aubergine', color:'#43165e'}
],
viewOptions: [
{name:'Top',value:'topView'},
{name:'Front',value:'frontView'},
{name:'Left side',value:'leftView'},
{name:'Right side',value:'rightView'},
{name:'Back',value:'backView'}
],
surfaces: {
topView:{preview:'',img:{x:'20',y:'16', width:'80', height:'80',value:'', filename:''},text:{x:'25',y:'15',value:''}},
frontView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80', value:'', filename:''},text:{x:'50',y:'120',value:''}},
leftView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}},
rightView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}},
backView:{preview:'',img:{x:'50',y:'120',width:'80', height:'80',value:'', filename:''},text:{x:'50',y:'120',value:''}}
},
keys:{},
selectedKey:{
type:"square",
sides:{
"topView":{"body":{"x":0,"y":0,"width":39,"height":39,"rx":2,"ry":2},"face":{"x":5.5,"y":3,"width":28,"height":31,"rx":4,"ry":4},"viewbox":"0 0 39 39"},
"frontView":{"path":"M37 29.5C35.31 19.71 34.25 13.6 33.83 11.15C33.71 10.45 33.21 9.86 32.53 9.63C32.44 9.6 32.98 9.78 32.81 9.73C32.33 9.56 31.82 9.53 31.32 9.64C30.5 9.82 30.48 9.82 30.08 9.91C28.18 10.33 26.25 10.6 24.31 10.73C23.35 10.79 24.23 10.73 23.48 10.78C20.83 10.95 18.17 10.94 15.52 10.73C14.52 10.66 14.01 10.62 12.64 10.51C11.24 10.4 9.84 10.18 8.47 9.85C8.16 9.78 7.85 9.7 7.18 9.54C7 9.5 6.81 9.49 6.62 9.52C6.41 9.55 6.59 9.52 6.51 9.53C5.96 9.61 5.52 10.04 5.42 10.58C4.97 13.11 3.83 19.41 2 29.5L37 29.5Z", "restrict":{"height":17,"width":27,"x":6,"y":12}},
"leftView":{"path":"M2 29.89L37 30L30.75 10L3.77 14.33L2 29.89Z", "restrict":{"height":14,"width":26,"x":6,"y":15}},
"rightView":{"path":"M36.94 29.89L1.94 30L8.18 10L35.16 14.33L36.94 29.89Z", "restrict":{"height":14,"width":26,"x":8,"y":15}},
"backView":{"path":"M37 27.5C35.31 19.68 34.25 14.8 33.83 12.84C33.71 12.27 33.27 11.81 32.71 11.65C32.58 11.62 32.88 11.7 32.69 11.65C32.29 11.54 31.86 11.52 31.45 11.59C30.6 11.74 30.09 11.83 29.6 11.91C29.6 11.91 24.82 12.45 24.82 12.45C23.76 12.51 23.67 12.51 22.79 12.56C22.79 12.56 16.22 12.53 16.22 12.53C15.08 12.46 13.72 12.38 12.27 12.29C12.27 12.29 8.82 11.85 8.82 11.85C8.44 11.77 7.81 11.65 7.13 11.52C6.98 11.49 6.82 11.49 6.67 11.51C6.45 11.53 6.46 11.53 6.35 11.54C5.9 11.6 5.52 11.93 5.42 12.38C4.96 14.4 3.82 19.44 2 27.5L37 27.5Z", "restrict":{"height":14,"width":27,"x":6,"y":13}}
}
},
sides:{},
active:false,
mainStyle:{
pointerEvents:'none',
opacity:0.3
}
},
computed: {
transformText () {
return {transform: 'translate(' + this.surfaces[this.currentView].text.x + 'px,' + this.surfaces[this.currentView].text.y + 'px)',fontSize:'26px'}
},
transformImg () {
return {transform: 'translate(' + this.surfaces[this.currentView].img.x + 'px,' + this.surfaces[this.currentView].img.y + 'px)'}
}
},
mounted: function () {
var self = this;
fetch('https://us-central1-hotsguide-188315.cloudfunctions.net/function-1?board=keyboard-104&sides=true', {
headers: {
"Content-Type": "application/json; charset=utf-8",
}
})
.then(response => response.json())
.then(data => {
self.keys = data.keys;
self.sides = data.sides
})
.catch(error => console.error(error));
// $.ajax({
// url: 'https://us-central1-hotsguide-188315.cloudfunctions.net/function-1', //read comments in search.php for more information usage
// type: 'GET',
// data: {board: 'keyboard-61', sides:true},
// dataType: 'json',
// success: function(json) {
// self.keys = json.keys;
// self.sides = json.sides;
// }
// });
},
methods: {
addToCart: function() {
var self = this
this.generatePreviews().then(function (){
var key = {name:self.selectedKey.name, color:self.selectedColor.color,surfaces:self.surfaces, price:1000}
var cartItems = JSON.parse(sessionStorage.getItem('customKeycaps')) || []
console.log(cartItems)
cartItems.push(key)
sessionStorage.setItem('customKeycaps', JSON.stringify(cartItems))
var cartLink = document.getElementById('cartLink')
var itemsSpan = cartLink.getElementsByTagName('span')[0]
if (itemsSpan) | else {
itemsSpan = document.createElement('span')
itemsSpan.innerHTML = ' ('+cartItems.length+')'
itemsSpan.setAttribute('style','color:blue;')
cartLink.appendChild(itemsSpan)
}
})
},
setColor: function(color) {
document.querySelector('.keyPicker[data-color="'+this.selectedColor.name+'"]').style.removeProperty('border')
document.querySelector('.keyPicker[data-color="'+color.name+'"]').style.border = '2px solid red'
this.selectedColor = color
},
changeView: function(ev) {
console.log(ev)
document.activeElement.blur();
this.currentView = ev.target.selectedOptions[0].value
var fileInput = document.querySelector('input[type=file]')
//var textInput = document.querySelector('input[type=text]')
if (this.surfaces[this.currentView].img.filename == ''){
fileInput.value = ''
if(!/safari/i.test(navigator.userAgent)){
fileInput.type = ''
fileInput.type = 'file'
}
}
//textInput.value = this.surfaces[this.currentView].text.value
this.updateSnapPoints()
},
updateSnapPoints: function () {
Vue.nextTick(function () {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var imgWidth = document.querySelector('.moveableImg').width
var textWidth = document.querySelector('.moveableText').style.width
var textHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface,textWidth,textHeight)
interact('.moveableText').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,imgWidth,imgWidth)
interact('.moveableImg').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
})
},
changeKey: function(name) {
var prevKey = this.selectedKey.name
var color = '#ffffff'
var keyType = JSON.parse(JSON.stringify(this.keys[name].type))
if (!this.active) {
this.active = true
this.mainStyle.pointerEvents = 'auto'
this.mainStyle.opacity = 1
Vue.nextTick(function () {
addInteractive()
})
}
if (prevKey){
this.keys[prevKey].body.color = '#0f0f0f'
this.keys[prevKey].face.stroke = '#272727'
this.keys[prevKey].face.color = '#1a1a1a'
}
this.selectedKey.sides = this.sides[keyType]
this.selectedKey.type = keyType
this.selectedKey.name = name
this.keys[name].body.color = this.shadeBlend(-0.25, color)
this.keys[name].face.stroke = this.shadeBlend(0.065, color)
this.keys[name].face.color = color
//this.selectedKey.topView.viewbox = "0 0 " + this.selectedKey.topView.body.width + " " + this.selectedKey.topView.body.height
this.updateSnapPoints()
},
updateText: function(text) {
this.surfaces[this.currentView].text.value = text
surface = document.querySelector('.restrictRect').getBoundingClientRect()
var newWidth = document.querySelector('.moveableText').style.width
var newHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface, newWidth, newHeight)
},
shadeBlend: function(p,c0,c1) {
var n=p<0?p*-1:p,u=Math.round,w=parseInt;
if(c0.length>7){
var f=c0.split(","),t=(c1?c1:p<0?"rgb(0,0,0)":"rgb(255,255,255)").split(","),R=w(f[0].slice(4)),G=w(f[1]),B=w(f[2]);
return "rgb("+(u((w(t[0].slice(4))-R)*n)+R)+","+(u((w(t[1])-G)*n)+G)+","+(u((w(t[2])-B)*n)+B)+")"
}else{
var f=w(c0.slice(1),16),t=w((c1?c1:p<0?"#000000":"#FFFFFF").slice(1),16),R1=f>>16,G1=f>>8&0x00FF,B1=f&0x0000FF;
return "#"+(0x1000000+(u(((t>>16)-R1)*n)+R1)*0x10000+(u(((t>>8&0x00FF)-G1)*n)+G1)*0x100+(u(((t&0x0000FF)-B1)*n)+B1)).toString(16).slice(1)
}
},
// ugly, must be a better way
generatePreviews: function() {
var svgns = "http://www.w3.org/2000/svg"
var allSides = []
for (var surface in this.surfaces) {
var self = this
var promise = new Promise(function(resolve, reject) {
var newSvg = document.createElementNS(svgns, "svg");
newSvg.setAttribute('width', 260)
newSvg.setAttribute('height', 210)
newSvg.setAttribute('preserveAspectRatio', 'xMidYMid meet')
newSvg.setAttribute('viewBox', self.selectedKey.sides.topView.viewbox)
if (surface == 'topView'){
var body = document.createElementNS(svgns, 'rect');
body.setAttribute('class','keySurface')
body.setAttribute('x', self.selectedKey.sides.topView.body.x)
body.setAttribute('y',self.selectedKey.sides.topView.body.y)
body.setAttribute('width',self.selectedKey.sides.topView.body.width)
body.setAttribute('height',self.selectedKey.sides.topView.body.height)
body.setAttribute('rx',self.selectedKey.sides.topView.body.rx)
body.setAttribute('ry',self.selectedKey.sides.topView.body.ry)
body.setAttribute('fill',self.shadeBlend(-0.2,self.selectedColor.color))
var face = document.createElementNS(svgns, 'rect');
face.setAttribute('class','restrictRect')
face.setAttribute('x', self.selectedKey.sides.topView.face.x)
face.setAttribute('y',self.selectedKey.sides.topView.face.y)
face.setAttribute('width',self.selectedKey.sides.topView.face.width)
face.setAttribute('height',self.selectedKey.sides.topView.face.height)
face.setAttribute('rx',self.selectedKey.sides.topView.face.rx)
face.setAttribute('ry',self.selectedKey.sides.topView.face.ry)
face.setAttribute('fill',self.selectedColor.color)
face.setAttribute('stroke',self.shadeBlend(0.065,self.selectedColor.color))
face.setAttribute('stroke-width','1px')
newSvg.appendChild(body)
newSvg.appendChild(face)
} else {
var path = document.createElementNS(svgns, 'path')
path.setAttribute('stroke-width','0.5px')
path.setAttribute('stroke',self.shadeBlend(-0.2,self.selectedColor.color))
path.setAttribute('fill',self.selectedColor.color)
path.setAttribute('class','keySurface')
path.setAttribute('d',self.selectedKey.sides[surface].path)
newSvg.appendChild(path)
}
if (self.surfaces[surface].img.value != '' || self.surfaces[surface].text.value != ''){
var img = new Image()
img.width = self.surfaces[surface].img.width
img.height = self.surfaces[surface].img.height
img.src = self.surfaces[surface].img.value
var thisSurface = surface
img.onload = function () {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
var context = canvas.getContext('2d');
console.log(img.width)
console.log(img.height)
context.drawImage(img, self.surfaces[thisSurface].img.x, self.surfaces[thisSurface].img.y, self.surfaces[thisSurface].img.width,self.surfaces[thisSurface].img.height);
self.surfaces[thisSurface].preview = canvas.toDataURL('image/png')
resolve()
}
//console.log(this.surfaces[surface].img)
//var img = document.getElementsByClassName('moveableImg')[0]
} else {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
self.surfaces[surface].preview = canvas.toDataURL('image/png')
resolve()
}
})
allSides.push(promise)
}
return Promise.all(allSides)
},
previewImg: function() {
var file = document.querySelector('input[type=file]').files[0]; //sames as here
var label = document.querySelector( '.inputfile' ).nextElementSibling
var target = this.surfaces[this.currentView].img
var reader = new FileReader();
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var width = document.querySelector('.moveableImg').width
reader.onloadend = function () {
target.value = reader.result
Vue.nextTick(function () {
var image = document.querySelector('#keyImage')
image.onload = function() {
target.width = image.width
target.height = image.height
}
})
}
if (file) {
target.filename = file.name
reader.readAsDataURL(file); //reads the data as a URL
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,width,width)
} else {
target.value = "";
}
}
}
}).$mount('#app')
function calcSnapTargets(surface, width, height){
var left = surface.left+(width / 2)
var center = surface.left + (surface.width / 2)
var right = surface.left + surface.width - (width / 2)
var top = surface.top + (height / 2)
var middle = surface.top + (surface.height / 2)
var bottom = surface.top + surface.height - (height / 2)
var snapTargets = [
{x:left, y:top},
{x:center,y:top},
{x:right,y:top},
{x:left, y:middle},
{x:center,y:middle},
{x:right,y:middle},
{x:left, y:bottom},
{x:center,y:bottom},
{x:right,y:bottom},
]
return snapTargets
}
function dragMoveListener (event) {
var target = event.target
if (target.className == "moveableImg"){
var node = customKey.surfaces[customKey.currentView].img
} else if (target.className == "moveableText") {
var node = customKey.surfaces[customKey.currentView].text
}
node.x = (parseFloat(node.x) || 0) + event.dx,
node.y = (parseFloat(node.y) || 0) + event.dy;
}
// this is used later in the resizing and gesture demos
window.dragMoveListener = dragMoveListener;
function addInteractive() {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
interact('.moveableText')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:20,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
},
elementRect: { top: 0, left: 0, bottom: 1, right: 1 }
},
})
interact('.moveableImg')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:10,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.left,
y:surface.top,
width:surface.width,
height:surface.height
},
elementRect: { top: 0, left: 0, bottom: 1, right: 1 }
},
})
.resizable({
// resize from all edges and corners
edges: {right: '.resize-corner', bottom: '.resize-corner', left: false, top:false},
// keep the edges inside the parent
preserveAspectRatio:true,
restrictEdges: {
outer:{
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
},
},
// minimum size
restrictSize: {
min: { width: 50, height: 50 }
},
inertia: false,
})
.on('resizemove', function (event) {
var target = event.target
//target.style.height = event.rect.height + 'px'
if (event.rect.height < surface.height) {
target.style.width = event.rect.width + 'px'
} else {
target.style.width = ((target.clientWidth / target.clientHeight) * surface.height) + 'px'
}
customKey.surfaces[customKey.currentView].img.width = target.clientWidth
//ugly hack, can't figure out how to shrink div to image size properly. Tried css tricks eg inline-block, float, table,
customKey.surfaces[customKey.currentView].img.height = target.clientHeight - 5
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface, event.rect.width, event.rect.height)
//placeImage()
});
}
var selectKey = function (e) {
var target = e.target;
if (target.className){
if (target.classList.contains('key-face')) {
customKey.changeKey(target.getAttribute('name'))
}
}
e.stopPropagation()
}
document.body.addEventListener('click', selectKey, false);
document.body.addEventListener('touch', selectKey, false); | {
itemsSpan.innerHTML = ' ('+cartItems.length+')'
} | conditional_block |
replicator.go | // Copyright (c) 2016-2018 iQIYI.com. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package pack
import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"time"
"go.uber.org/zap"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/iqiyi/auklet/common"
"github.com/iqiyi/auklet/common/conf"
"github.com/iqiyi/auklet/common/fs"
"github.com/iqiyi/auklet/common/pickle"
"github.com/iqiyi/auklet/common/ring"
"github.com/iqiyi/auklet/common/srv"
)
type ReplicationStat struct {
rehashed int64
replicated int64
}
func (s *ReplicationStat) reset() {
s.rehashed = 0
s.replicated = 0
}
type Replicator struct {
logger *zap.Logger
stat *ReplicationStat
driveRoot string
concurrency int
interval int
rpcPort int
srvPort int
rings map[int]ring.Ring
hashPrefix string
hashSuffix string
devices map[int][]*ring.Device
whitelist map[string]bool
rpc PackRpcServiceClient
http *http.Client
}
type NodeChain struct {
replicas int
primary []*ring.Device
begin int
handoffs ring.MoreNodes
}
func (c *NodeChain) Next() *ring.Device {
if c.begin < len(c.primary) {
next := c.primary[c.begin]
c.begin++
return next
}
if c.handoffs != nil {
return c.handoffs.Next()
}
return nil
}
func (r *Replicator) parseConf(cnf conf.Config) {
r.srvPort = int(cnf.GetInt("app:object-server", "bind_port", 6000))
r.driveRoot = cnf.GetDefault("app:object-server", "devices", "/srv/node")
r.rpcPort = int(cnf.GetInt("object-replicator", "rpc_port", 60000))
r.concurrency = int(cnf.GetInt("object-replicator", "concurrency", 1))
r.interval = int(cnf.GetInt("object-replicator", "interval", 60*60*24))
}
func (r *Replicator) collectDevices(policyFilter, deviceFilter string) {
pf := map[int]bool{}
for _, p := range strings.Split(policyFilter, ",") {
if p == "" {
continue
}
pi, err := strconv.Atoi(p)
if err != nil {
r.logger.Error("unable to parse policy filter, ignore",
zap.String("policies", policyFilter), zap.Error(err))
continue
}
pf[pi] = true
}
df := map[string]bool{}
for _, d := range strings.Split(deviceFilter, ",") {
if d != "" {
df[d] = true
}
}
r.rings = map[int]ring.Ring{}
for _, p := range conf.LoadPolicies() {
if p.Type != NAME || (len(pf) > 0 && !pf[p.Index]) {
continue
}
var err error
r.rings[p.Index], err = ring.GetRing(
"object", r.hashPrefix, r.hashSuffix, p.Index)
if err != nil {
r.logger.Error("unable to get ring",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
r.devices = map[int][]*ring.Device{}
devs, err := r.rings[p.Index].LocalDevices(r.srvPort)
if err != nil {
r.logger.Error("unable to list local device",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
for _, d := range devs {
if len(df) == 0 || df[d.Device] {
r.devices[p.Index] = append(r.devices[p.Index], d)
}
}
devices := r.devices[p.Index]
rand.Shuffle(len(devices), func(i, j int) {
devices[i], devices[j] = devices[j], devices[i]
})
}
}
func (r *Replicator) listPartitions(policy int, device string) []string {
objPath, _ := PackDevicePaths(device, r.driveRoot, policy)
suffixes, err := fs.ReadDirNames(objPath)
if err != nil {
r.logger.Error("unable to get partition list", zap.Error(err))
return nil
}
var partitions []string
for _, suff := range suffixes {
if (len(r.whitelist) > 0 && !r.whitelist[suff]) || !common.IsDecimal(suff) {
continue
}
partitions = append(partitions, suff)
}
rand.Shuffle(len(partitions), func(i, j int) {
partitions[i], partitions[j] = partitions[j], partitions[i]
})
return partitions
}
func (r *Replicator) getLocalHash(
policy int, device, partition string, rehash []string) (int64, map[string]string) {
// TODO: shall we need to add a timeout?
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
msg := &SuffixHashesMsg{
Device: device,
Policy: uint32(policy),
Partition: partition,
ReclaimAge: ONE_WEEK,
ListDir: rand.Intn(10) == 0,
Recalculate: rehash,
}
reply, err := r.rpc.GetHashes(ctx, msg)
if err != nil {
r.logger.Error("unable to get local hashes",
zap.Int("policy", policy),
zap.String("device", device),
zap.String("partition", partition),
zap.Error(err))
return 0, nil
}
return reply.Hashed, reply.Hashes
}
func (r *Replicator) getRemoteHash(policy int, node *ring.Device,
partition string, suffixes []string) (map[string]string, error) |
func (r *Replicator) replicateLocal(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
attempts := int(r.rings[policy].ReplicaCount()) - 1
for node := nodes.Next(); node != nil && attempts > 0; node = nodes.Next() {
attempts--
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
if err == ErrRemoteDiskUnmounted {
attempts++
}
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
continue
}
r.getRemoteHash(policy, node, partition, suffixes)
if reply.Success {
r.stat.replicated += int64(len(reply.Candidates))
}
}
}
func (r *Replicator) replicateHandoff(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
success := true
for node := nodes.Next(); node != nil; node = nodes.Next() {
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.Int("policy", policy),
zap.Any("node", node),
zap.Error(err))
success = false
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
success = false
continue
}
if reply.Success {
r.getRemoteHash(policy, node, partition, suffixes)
r.stat.replicated += int64(len(reply.Candidates))
} else {
success = false
}
}
if success {
arg := &Partition{
Policy: uint32(policy),
Device: device.Device,
Partition: partition,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
r.logger.Info("removing handoff partition",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition))
reply, err := r.rpc.DeleteHandoff(ctx, arg)
if err != nil || !reply.Success {
r.logger.Info("unable to remove handoff partition",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition),
zap.Error(err))
return
}
r.logger.Info("handoff partition removed",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition))
}
}
func (r *Replicator) replicateDevice(
policy int, device *ring.Device, pool chan bool, wg *sync.WaitGroup) {
defer func() {
<-pool
wg.Done()
}()
r.logger.Info("begin to replicate device",
zap.String("device", device.Device), zap.Int("policy", policy))
for _, p := range r.listPartitions(policy, device.Device) {
pi, err := strconv.ParseUint(p, 10, 64)
if err != nil {
r.logger.Error("unable to parse partition as integer",
zap.String("partition", p), zap.Error(err))
continue
}
// GetJobNodes will exclude the host itself
nodes, handoff := r.rings[policy].GetJobNodes(pi, device.Id)
chain := &NodeChain{
replicas: int(r.rings[policy].ReplicaCount()),
primary: nodes,
begin: 0,
}
if handoff {
r.replicateHandoff(policy, device, p, chain)
} else {
chain.handoffs = r.rings[policy].GetMoreNodes(pi)
r.replicateLocal(policy, device, p, chain)
}
}
}
func (r *Replicator) replicate() {
pool := make(chan bool, r.concurrency)
wg := &sync.WaitGroup{}
for p, devs := range r.devices {
for _, d := range devs {
pool <- true
wg.Add(1)
go r.replicateDevice(p, d, pool, wg)
}
}
wg.Wait()
}
func (r *Replicator) Run() {
r.logger.Info("running pack replicator for once")
r.replicate()
r.logger.Info("replicated one pass",
zap.Int64("rehashed", r.stat.rehashed),
zap.Int64("replicated", r.stat.replicated))
}
func (r *Replicator) RunForever() {
r.logger.Info("running pack replicator forever")
for {
r.logger.Info("begin new replication pass")
r.replicate()
r.logger.Info("replication pass done",
zap.Int64("rehashed", r.stat.rehashed),
zap.Int64("replicated", r.stat.replicated))
r.stat.reset()
time.Sleep(time.Second * time.Duration(r.interval))
}
}
func InitReplicator(cnf conf.Config, flags *flag.FlagSet) (srv.Daemon, error) {
logger, err := common.GetLogger(
flags.Lookup("l").Value.(flag.Getter).Get().(string), "pack-replicator")
if err != nil {
return nil, err
}
r := &Replicator{
logger: logger,
stat: &ReplicationStat{},
}
r.parseConf(cnf)
prefix, suffix, err := conf.GetHashPrefixAndSuffix()
if err != nil {
return nil, ErrHashConfNotFound
}
r.hashPrefix = prefix
r.hashSuffix = suffix
policyFilter := flags.Lookup("policies").Value.(flag.Getter).Get().(string)
deviceFilter := flags.Lookup("devices").Value.(flag.Getter).Get().(string)
r.collectDevices(policyFilter, deviceFilter)
pf := flags.Lookup("partitions").Value.(flag.Getter).Get().(string)
r.whitelist = map[string]bool{}
for _, p := range strings.Split(pf, ",") {
if p != "" {
r.whitelist[p] = true
}
}
conn, err := grpc.Dial(
fmt.Sprintf("localhost:%d", r.rpcPort), grpc.WithInsecure())
if err != nil {
logger.Error("unable to dial to rpc server",
zap.Int("port", r.rpcPort), zap.Error(err))
return nil, err
}
r.rpc = NewPackRpcServiceClient(conn)
r.http = &http.Client{Timeout: 5 * time.Minute}
return r, nil
}
| {
url := fmt.Sprintf("http://%s:%d/%s/%s",
node.Ip, node.Port, node.Device, partition)
if len(suffixes) > 0 {
url = fmt.Sprintf("%s/%s", url, strings.Join(suffixes, "-"))
}
req, err := http.NewRequest(common.REPLICATE, url, nil)
if err != nil {
r.logger.Error("unable to create diff request",
zap.String("url", url),
zap.Error(err))
return nil, err
}
req.Header.Set(common.XBackendPolicyIndex, strconv.Itoa(policy))
resp, err := r.http.Do(req)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.String("url", url), zap.Error(err))
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusInsufficientStorage {
return nil, ErrRemoteDiskUnmounted
}
if resp.StatusCode != http.StatusOK {
return nil, ErrRemoteHash
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.logger.Error("unable to read replicate response body",
zap.String("url", url), zap.Error(err))
return nil, err
}
v, err := pickle.PickleLoads(body)
if err != nil {
r.logger.Error("unable to deserialize pickle data",
zap.String("url", url), zap.Error(err))
return nil, err
}
pickledHashes, ok := v.(map[interface{}]interface{})
if !ok {
return nil, ErrMalformedData
}
hashes := make(map[string]string)
for suff, hash := range pickledHashes {
if hashes[suff.(string)], ok = hash.(string); !ok {
hashes[suff.(string)] = ""
}
}
return hashes, nil
} | identifier_body |
replicator.go | // Copyright (c) 2016-2018 iQIYI.com. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package pack
import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"time"
"go.uber.org/zap"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/iqiyi/auklet/common"
"github.com/iqiyi/auklet/common/conf"
"github.com/iqiyi/auklet/common/fs"
"github.com/iqiyi/auklet/common/pickle"
"github.com/iqiyi/auklet/common/ring"
"github.com/iqiyi/auklet/common/srv"
)
type ReplicationStat struct {
rehashed int64
replicated int64
}
func (s *ReplicationStat) reset() {
s.rehashed = 0
s.replicated = 0
}
type Replicator struct {
logger *zap.Logger
stat *ReplicationStat
driveRoot string
concurrency int
interval int
rpcPort int
srvPort int
rings map[int]ring.Ring
hashPrefix string
hashSuffix string
devices map[int][]*ring.Device
whitelist map[string]bool
rpc PackRpcServiceClient
http *http.Client
}
type NodeChain struct {
replicas int
primary []*ring.Device
begin int
handoffs ring.MoreNodes
}
func (c *NodeChain) Next() *ring.Device {
if c.begin < len(c.primary) {
next := c.primary[c.begin]
c.begin++
return next
}
if c.handoffs != nil {
return c.handoffs.Next()
}
return nil
}
func (r *Replicator) parseConf(cnf conf.Config) {
r.srvPort = int(cnf.GetInt("app:object-server", "bind_port", 6000))
r.driveRoot = cnf.GetDefault("app:object-server", "devices", "/srv/node")
r.rpcPort = int(cnf.GetInt("object-replicator", "rpc_port", 60000))
r.concurrency = int(cnf.GetInt("object-replicator", "concurrency", 1))
r.interval = int(cnf.GetInt("object-replicator", "interval", 60*60*24))
}
func (r *Replicator) collectDevices(policyFilter, deviceFilter string) {
pf := map[int]bool{}
for _, p := range strings.Split(policyFilter, ",") {
if p == "" {
continue
}
pi, err := strconv.Atoi(p)
if err != nil {
r.logger.Error("unable to parse policy filter, ignore",
zap.String("policies", policyFilter), zap.Error(err))
continue
}
pf[pi] = true
}
df := map[string]bool{}
for _, d := range strings.Split(deviceFilter, ",") {
if d != "" {
df[d] = true
}
}
r.rings = map[int]ring.Ring{}
for _, p := range conf.LoadPolicies() {
if p.Type != NAME || (len(pf) > 0 && !pf[p.Index]) {
continue
}
var err error
r.rings[p.Index], err = ring.GetRing(
"object", r.hashPrefix, r.hashSuffix, p.Index)
if err != nil {
r.logger.Error("unable to get ring",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
r.devices = map[int][]*ring.Device{}
devs, err := r.rings[p.Index].LocalDevices(r.srvPort)
if err != nil {
r.logger.Error("unable to list local device",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
for _, d := range devs {
if len(df) == 0 || df[d.Device] {
r.devices[p.Index] = append(r.devices[p.Index], d)
}
}
devices := r.devices[p.Index]
rand.Shuffle(len(devices), func(i, j int) {
devices[i], devices[j] = devices[j], devices[i]
})
}
}
func (r *Replicator) listPartitions(policy int, device string) []string {
objPath, _ := PackDevicePaths(device, r.driveRoot, policy)
suffixes, err := fs.ReadDirNames(objPath)
if err != nil {
r.logger.Error("unable to get partition list", zap.Error(err))
return nil
}
var partitions []string
for _, suff := range suffixes {
if (len(r.whitelist) > 0 && !r.whitelist[suff]) || !common.IsDecimal(suff) {
continue
}
partitions = append(partitions, suff)
}
rand.Shuffle(len(partitions), func(i, j int) {
partitions[i], partitions[j] = partitions[j], partitions[i]
})
return partitions
}
func (r *Replicator) getLocalHash(
policy int, device, partition string, rehash []string) (int64, map[string]string) {
// TODO: shall we need to add a timeout?
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
msg := &SuffixHashesMsg{
Device: device,
Policy: uint32(policy),
Partition: partition,
ReclaimAge: ONE_WEEK,
ListDir: rand.Intn(10) == 0,
Recalculate: rehash,
}
reply, err := r.rpc.GetHashes(ctx, msg)
if err != nil {
r.logger.Error("unable to get local hashes",
zap.Int("policy", policy),
zap.String("device", device),
zap.String("partition", partition),
zap.Error(err))
return 0, nil
}
return reply.Hashed, reply.Hashes
}
func (r *Replicator) getRemoteHash(policy int, node *ring.Device,
partition string, suffixes []string) (map[string]string, error) {
url := fmt.Sprintf("http://%s:%d/%s/%s",
node.Ip, node.Port, node.Device, partition)
if len(suffixes) > 0 {
url = fmt.Sprintf("%s/%s", url, strings.Join(suffixes, "-"))
}
req, err := http.NewRequest(common.REPLICATE, url, nil)
if err != nil {
r.logger.Error("unable to create diff request",
zap.String("url", url),
zap.Error(err))
return nil, err
}
req.Header.Set(common.XBackendPolicyIndex, strconv.Itoa(policy))
resp, err := r.http.Do(req)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.String("url", url), zap.Error(err))
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusInsufficientStorage {
return nil, ErrRemoteDiskUnmounted
}
if resp.StatusCode != http.StatusOK {
return nil, ErrRemoteHash
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.logger.Error("unable to read replicate response body",
zap.String("url", url), zap.Error(err))
return nil, err
}
v, err := pickle.PickleLoads(body)
if err != nil {
r.logger.Error("unable to deserialize pickle data",
zap.String("url", url), zap.Error(err))
return nil, err
}
pickledHashes, ok := v.(map[interface{}]interface{})
if !ok {
return nil, ErrMalformedData
}
hashes := make(map[string]string)
for suff, hash := range pickledHashes {
if hashes[suff.(string)], ok = hash.(string); !ok {
hashes[suff.(string)] = ""
}
}
return hashes, nil
}
func (r *Replicator) | (
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
attempts := int(r.rings[policy].ReplicaCount()) - 1
for node := nodes.Next(); node != nil && attempts > 0; node = nodes.Next() {
attempts--
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
if err == ErrRemoteDiskUnmounted {
attempts++
}
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
continue
}
r.getRemoteHash(policy, node, partition, suffixes)
if reply.Success {
r.stat.replicated += int64(len(reply.Candidates))
}
}
}
func (r *Replicator) replicateHandoff(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
success := true
for node := nodes.Next(); node != nil; node = nodes.Next() {
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.Int("policy", policy),
zap.Any("node", node),
zap.Error(err))
success = false
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
success = false
continue
}
if reply.Success {
r.getRemoteHash(policy, node, partition, suffixes)
r.stat.replicated += int64(len(reply.Candidates))
} else {
success = false
}
}
if success {
arg := &Partition{
Policy: uint32(policy),
Device: device.Device,
Partition: partition,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
r.logger.Info("removing handoff partition",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition))
reply, err := r.rpc.DeleteHandoff(ctx, arg)
if err != nil || !reply.Success {
r.logger.Info("unable to remove handoff partition",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition),
zap.Error(err))
return
}
r.logger.Info("handoff partition removed",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition))
}
}
func (r *Replicator) replicateDevice(
policy int, device *ring.Device, pool chan bool, wg *sync.WaitGroup) {
defer func() {
<-pool
wg.Done()
}()
r.logger.Info("begin to replicate device",
zap.String("device", device.Device), zap.Int("policy", policy))
for _, p := range r.listPartitions(policy, device.Device) {
pi, err := strconv.ParseUint(p, 10, 64)
if err != nil {
r.logger.Error("unable to parse partition as integer",
zap.String("partition", p), zap.Error(err))
continue
}
// GetJobNodes will exclude the host itself
nodes, handoff := r.rings[policy].GetJobNodes(pi, device.Id)
chain := &NodeChain{
replicas: int(r.rings[policy].ReplicaCount()),
primary: nodes,
begin: 0,
}
if handoff {
r.replicateHandoff(policy, device, p, chain)
} else {
chain.handoffs = r.rings[policy].GetMoreNodes(pi)
r.replicateLocal(policy, device, p, chain)
}
}
}
func (r *Replicator) replicate() {
pool := make(chan bool, r.concurrency)
wg := &sync.WaitGroup{}
for p, devs := range r.devices {
for _, d := range devs {
pool <- true
wg.Add(1)
go r.replicateDevice(p, d, pool, wg)
}
}
wg.Wait()
}
func (r *Replicator) Run() {
r.logger.Info("running pack replicator for once")
r.replicate()
r.logger.Info("replicated one pass",
zap.Int64("rehashed", r.stat.rehashed),
zap.Int64("replicated", r.stat.replicated))
}
func (r *Replicator) RunForever() {
r.logger.Info("running pack replicator forever")
for {
r.logger.Info("begin new replication pass")
r.replicate()
r.logger.Info("replication pass done",
zap.Int64("rehashed", r.stat.rehashed),
zap.Int64("replicated", r.stat.replicated))
r.stat.reset()
time.Sleep(time.Second * time.Duration(r.interval))
}
}
func InitReplicator(cnf conf.Config, flags *flag.FlagSet) (srv.Daemon, error) {
logger, err := common.GetLogger(
flags.Lookup("l").Value.(flag.Getter).Get().(string), "pack-replicator")
if err != nil {
return nil, err
}
r := &Replicator{
logger: logger,
stat: &ReplicationStat{},
}
r.parseConf(cnf)
prefix, suffix, err := conf.GetHashPrefixAndSuffix()
if err != nil {
return nil, ErrHashConfNotFound
}
r.hashPrefix = prefix
r.hashSuffix = suffix
policyFilter := flags.Lookup("policies").Value.(flag.Getter).Get().(string)
deviceFilter := flags.Lookup("devices").Value.(flag.Getter).Get().(string)
r.collectDevices(policyFilter, deviceFilter)
pf := flags.Lookup("partitions").Value.(flag.Getter).Get().(string)
r.whitelist = map[string]bool{}
for _, p := range strings.Split(pf, ",") {
if p != "" {
r.whitelist[p] = true
}
}
conn, err := grpc.Dial(
fmt.Sprintf("localhost:%d", r.rpcPort), grpc.WithInsecure())
if err != nil {
logger.Error("unable to dial to rpc server",
zap.Int("port", r.rpcPort), zap.Error(err))
return nil, err
}
r.rpc = NewPackRpcServiceClient(conn)
r.http = &http.Client{Timeout: 5 * time.Minute}
return r, nil
}
| replicateLocal | identifier_name |
replicator.go | // Copyright (c) 2016-2018 iQIYI.com. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package pack
import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"time"
"go.uber.org/zap"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/iqiyi/auklet/common"
"github.com/iqiyi/auklet/common/conf"
"github.com/iqiyi/auklet/common/fs"
"github.com/iqiyi/auklet/common/pickle"
"github.com/iqiyi/auklet/common/ring"
"github.com/iqiyi/auklet/common/srv"
)
type ReplicationStat struct {
rehashed int64
replicated int64
}
func (s *ReplicationStat) reset() {
s.rehashed = 0
s.replicated = 0
}
type Replicator struct {
logger *zap.Logger
stat *ReplicationStat
driveRoot string
concurrency int
interval int
rpcPort int
srvPort int
rings map[int]ring.Ring
hashPrefix string
hashSuffix string
devices map[int][]*ring.Device
whitelist map[string]bool
rpc PackRpcServiceClient
http *http.Client
}
type NodeChain struct {
replicas int
primary []*ring.Device
begin int
handoffs ring.MoreNodes
}
func (c *NodeChain) Next() *ring.Device {
if c.begin < len(c.primary) {
next := c.primary[c.begin]
c.begin++
return next
}
if c.handoffs != nil {
return c.handoffs.Next()
}
return nil
}
func (r *Replicator) parseConf(cnf conf.Config) {
r.srvPort = int(cnf.GetInt("app:object-server", "bind_port", 6000))
r.driveRoot = cnf.GetDefault("app:object-server", "devices", "/srv/node")
r.rpcPort = int(cnf.GetInt("object-replicator", "rpc_port", 60000))
r.concurrency = int(cnf.GetInt("object-replicator", "concurrency", 1))
r.interval = int(cnf.GetInt("object-replicator", "interval", 60*60*24))
}
func (r *Replicator) collectDevices(policyFilter, deviceFilter string) {
pf := map[int]bool{}
for _, p := range strings.Split(policyFilter, ",") {
if p == "" {
continue
}
pi, err := strconv.Atoi(p)
if err != nil {
r.logger.Error("unable to parse policy filter, ignore",
zap.String("policies", policyFilter), zap.Error(err))
continue
}
pf[pi] = true
}
df := map[string]bool{}
for _, d := range strings.Split(deviceFilter, ",") {
if d != "" {
df[d] = true
}
}
r.rings = map[int]ring.Ring{}
for _, p := range conf.LoadPolicies() {
if p.Type != NAME || (len(pf) > 0 && !pf[p.Index]) {
continue
}
var err error
r.rings[p.Index], err = ring.GetRing(
"object", r.hashPrefix, r.hashSuffix, p.Index)
if err != nil {
r.logger.Error("unable to get ring",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
r.devices = map[int][]*ring.Device{}
devs, err := r.rings[p.Index].LocalDevices(r.srvPort)
if err != nil {
r.logger.Error("unable to list local device",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
for _, d := range devs {
if len(df) == 0 || df[d.Device] {
r.devices[p.Index] = append(r.devices[p.Index], d)
}
}
devices := r.devices[p.Index]
rand.Shuffle(len(devices), func(i, j int) {
devices[i], devices[j] = devices[j], devices[i]
})
}
}
func (r *Replicator) listPartitions(policy int, device string) []string {
objPath, _ := PackDevicePaths(device, r.driveRoot, policy)
suffixes, err := fs.ReadDirNames(objPath)
if err != nil {
r.logger.Error("unable to get partition list", zap.Error(err))
return nil
}
var partitions []string
for _, suff := range suffixes {
if (len(r.whitelist) > 0 && !r.whitelist[suff]) || !common.IsDecimal(suff) {
continue
}
partitions = append(partitions, suff)
}
rand.Shuffle(len(partitions), func(i, j int) {
partitions[i], partitions[j] = partitions[j], partitions[i]
})
return partitions
}
func (r *Replicator) getLocalHash(
policy int, device, partition string, rehash []string) (int64, map[string]string) {
// TODO: shall we need to add a timeout?
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
msg := &SuffixHashesMsg{
Device: device,
Policy: uint32(policy),
Partition: partition,
ReclaimAge: ONE_WEEK,
ListDir: rand.Intn(10) == 0,
Recalculate: rehash,
}
reply, err := r.rpc.GetHashes(ctx, msg)
if err != nil {
r.logger.Error("unable to get local hashes",
zap.Int("policy", policy),
zap.String("device", device),
zap.String("partition", partition),
zap.Error(err))
return 0, nil
}
return reply.Hashed, reply.Hashes
}
func (r *Replicator) getRemoteHash(policy int, node *ring.Device,
partition string, suffixes []string) (map[string]string, error) {
url := fmt.Sprintf("http://%s:%d/%s/%s",
node.Ip, node.Port, node.Device, partition)
if len(suffixes) > 0 {
url = fmt.Sprintf("%s/%s", url, strings.Join(suffixes, "-"))
}
req, err := http.NewRequest(common.REPLICATE, url, nil)
if err != nil {
r.logger.Error("unable to create diff request",
zap.String("url", url),
zap.Error(err))
return nil, err
}
req.Header.Set(common.XBackendPolicyIndex, strconv.Itoa(policy))
resp, err := r.http.Do(req)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.String("url", url), zap.Error(err))
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusInsufficientStorage {
return nil, ErrRemoteDiskUnmounted
}
if resp.StatusCode != http.StatusOK {
return nil, ErrRemoteHash
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.logger.Error("unable to read replicate response body",
zap.String("url", url), zap.Error(err))
return nil, err
}
v, err := pickle.PickleLoads(body)
if err != nil {
r.logger.Error("unable to deserialize pickle data",
zap.String("url", url), zap.Error(err))
return nil, err
}
pickledHashes, ok := v.(map[interface{}]interface{})
if !ok {
return nil, ErrMalformedData
}
hashes := make(map[string]string)
for suff, hash := range pickledHashes {
if hashes[suff.(string)], ok = hash.(string); !ok {
hashes[suff.(string)] = ""
}
}
return hashes, nil
}
func (r *Replicator) replicateLocal(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
attempts := int(r.rings[policy].ReplicaCount()) - 1
for node := nodes.Next(); node != nil && attempts > 0; node = nodes.Next() {
attempts--
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
if err == ErrRemoteDiskUnmounted {
attempts++
}
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
continue
}
r.getRemoteHash(policy, node, partition, suffixes)
if reply.Success {
r.stat.replicated += int64(len(reply.Candidates))
}
}
}
func (r *Replicator) replicateHandoff(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
success := true
for node := nodes.Next(); node != nil; node = nodes.Next() {
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.Int("policy", policy),
zap.Any("node", node),
zap.Error(err))
success = false
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash { | msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
success = false
continue
}
if reply.Success {
r.getRemoteHash(policy, node, partition, suffixes)
r.stat.replicated += int64(len(reply.Candidates))
} else {
success = false
}
}
if success {
arg := &Partition{
Policy: uint32(policy),
Device: device.Device,
Partition: partition,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
r.logger.Info("removing handoff partition",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition))
reply, err := r.rpc.DeleteHandoff(ctx, arg)
if err != nil || !reply.Success {
r.logger.Info("unable to remove handoff partition",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition),
zap.Error(err))
return
}
r.logger.Info("handoff partition removed",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition))
}
}
func (r *Replicator) replicateDevice(
policy int, device *ring.Device, pool chan bool, wg *sync.WaitGroup) {
defer func() {
<-pool
wg.Done()
}()
r.logger.Info("begin to replicate device",
zap.String("device", device.Device), zap.Int("policy", policy))
for _, p := range r.listPartitions(policy, device.Device) {
pi, err := strconv.ParseUint(p, 10, 64)
if err != nil {
r.logger.Error("unable to parse partition as integer",
zap.String("partition", p), zap.Error(err))
continue
}
// GetJobNodes will exclude the host itself
nodes, handoff := r.rings[policy].GetJobNodes(pi, device.Id)
chain := &NodeChain{
replicas: int(r.rings[policy].ReplicaCount()),
primary: nodes,
begin: 0,
}
if handoff {
r.replicateHandoff(policy, device, p, chain)
} else {
chain.handoffs = r.rings[policy].GetMoreNodes(pi)
r.replicateLocal(policy, device, p, chain)
}
}
}
func (r *Replicator) replicate() {
pool := make(chan bool, r.concurrency)
wg := &sync.WaitGroup{}
for p, devs := range r.devices {
for _, d := range devs {
pool <- true
wg.Add(1)
go r.replicateDevice(p, d, pool, wg)
}
}
wg.Wait()
}
func (r *Replicator) Run() {
r.logger.Info("running pack replicator for once")
r.replicate()
r.logger.Info("replicated one pass",
zap.Int64("rehashed", r.stat.rehashed),
zap.Int64("replicated", r.stat.replicated))
}
func (r *Replicator) RunForever() {
r.logger.Info("running pack replicator forever")
for {
r.logger.Info("begin new replication pass")
r.replicate()
r.logger.Info("replication pass done",
zap.Int64("rehashed", r.stat.rehashed),
zap.Int64("replicated", r.stat.replicated))
r.stat.reset()
time.Sleep(time.Second * time.Duration(r.interval))
}
}
func InitReplicator(cnf conf.Config, flags *flag.FlagSet) (srv.Daemon, error) {
logger, err := common.GetLogger(
flags.Lookup("l").Value.(flag.Getter).Get().(string), "pack-replicator")
if err != nil {
return nil, err
}
r := &Replicator{
logger: logger,
stat: &ReplicationStat{},
}
r.parseConf(cnf)
prefix, suffix, err := conf.GetHashPrefixAndSuffix()
if err != nil {
return nil, ErrHashConfNotFound
}
r.hashPrefix = prefix
r.hashSuffix = suffix
policyFilter := flags.Lookup("policies").Value.(flag.Getter).Get().(string)
deviceFilter := flags.Lookup("devices").Value.(flag.Getter).Get().(string)
r.collectDevices(policyFilter, deviceFilter)
pf := flags.Lookup("partitions").Value.(flag.Getter).Get().(string)
r.whitelist = map[string]bool{}
for _, p := range strings.Split(pf, ",") {
if p != "" {
r.whitelist[p] = true
}
}
conn, err := grpc.Dial(
fmt.Sprintf("localhost:%d", r.rpcPort), grpc.WithInsecure())
if err != nil {
logger.Error("unable to dial to rpc server",
zap.Int("port", r.rpcPort), zap.Error(err))
return nil, err
}
r.rpc = NewPackRpcServiceClient(conn)
r.http = &http.Client{Timeout: 5 * time.Minute}
return r, nil
} | if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
| random_line_split |
replicator.go | // Copyright (c) 2016-2018 iQIYI.com. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package pack
import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"strconv"
"strings"
"sync"
"time"
"go.uber.org/zap"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/iqiyi/auklet/common"
"github.com/iqiyi/auklet/common/conf"
"github.com/iqiyi/auklet/common/fs"
"github.com/iqiyi/auklet/common/pickle"
"github.com/iqiyi/auklet/common/ring"
"github.com/iqiyi/auklet/common/srv"
)
type ReplicationStat struct {
rehashed int64
replicated int64
}
func (s *ReplicationStat) reset() {
s.rehashed = 0
s.replicated = 0
}
type Replicator struct {
logger *zap.Logger
stat *ReplicationStat
driveRoot string
concurrency int
interval int
rpcPort int
srvPort int
rings map[int]ring.Ring
hashPrefix string
hashSuffix string
devices map[int][]*ring.Device
whitelist map[string]bool
rpc PackRpcServiceClient
http *http.Client
}
type NodeChain struct {
replicas int
primary []*ring.Device
begin int
handoffs ring.MoreNodes
}
func (c *NodeChain) Next() *ring.Device {
if c.begin < len(c.primary) {
next := c.primary[c.begin]
c.begin++
return next
}
if c.handoffs != nil {
return c.handoffs.Next()
}
return nil
}
func (r *Replicator) parseConf(cnf conf.Config) {
r.srvPort = int(cnf.GetInt("app:object-server", "bind_port", 6000))
r.driveRoot = cnf.GetDefault("app:object-server", "devices", "/srv/node")
r.rpcPort = int(cnf.GetInt("object-replicator", "rpc_port", 60000))
r.concurrency = int(cnf.GetInt("object-replicator", "concurrency", 1))
r.interval = int(cnf.GetInt("object-replicator", "interval", 60*60*24))
}
func (r *Replicator) collectDevices(policyFilter, deviceFilter string) {
pf := map[int]bool{}
for _, p := range strings.Split(policyFilter, ",") {
if p == "" {
continue
}
pi, err := strconv.Atoi(p)
if err != nil {
r.logger.Error("unable to parse policy filter, ignore",
zap.String("policies", policyFilter), zap.Error(err))
continue
}
pf[pi] = true
}
df := map[string]bool{}
for _, d := range strings.Split(deviceFilter, ",") {
if d != "" {
df[d] = true
}
}
r.rings = map[int]ring.Ring{}
for _, p := range conf.LoadPolicies() {
if p.Type != NAME || (len(pf) > 0 && !pf[p.Index]) {
continue
}
var err error
r.rings[p.Index], err = ring.GetRing(
"object", r.hashPrefix, r.hashSuffix, p.Index)
if err != nil {
r.logger.Error("unable to get ring",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
r.devices = map[int][]*ring.Device{}
devs, err := r.rings[p.Index].LocalDevices(r.srvPort)
if err != nil {
r.logger.Error("unable to list local device",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
for _, d := range devs {
if len(df) == 0 || df[d.Device] {
r.devices[p.Index] = append(r.devices[p.Index], d)
}
}
devices := r.devices[p.Index]
rand.Shuffle(len(devices), func(i, j int) {
devices[i], devices[j] = devices[j], devices[i]
})
}
}
func (r *Replicator) listPartitions(policy int, device string) []string {
objPath, _ := PackDevicePaths(device, r.driveRoot, policy)
suffixes, err := fs.ReadDirNames(objPath)
if err != nil {
r.logger.Error("unable to get partition list", zap.Error(err))
return nil
}
var partitions []string
for _, suff := range suffixes {
if (len(r.whitelist) > 0 && !r.whitelist[suff]) || !common.IsDecimal(suff) {
continue
}
partitions = append(partitions, suff)
}
rand.Shuffle(len(partitions), func(i, j int) {
partitions[i], partitions[j] = partitions[j], partitions[i]
})
return partitions
}
func (r *Replicator) getLocalHash(
policy int, device, partition string, rehash []string) (int64, map[string]string) {
// TODO: shall we need to add a timeout?
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
msg := &SuffixHashesMsg{
Device: device,
Policy: uint32(policy),
Partition: partition,
ReclaimAge: ONE_WEEK,
ListDir: rand.Intn(10) == 0,
Recalculate: rehash,
}
reply, err := r.rpc.GetHashes(ctx, msg)
if err != nil {
r.logger.Error("unable to get local hashes",
zap.Int("policy", policy),
zap.String("device", device),
zap.String("partition", partition),
zap.Error(err))
return 0, nil
}
return reply.Hashed, reply.Hashes
}
func (r *Replicator) getRemoteHash(policy int, node *ring.Device,
partition string, suffixes []string) (map[string]string, error) {
url := fmt.Sprintf("http://%s:%d/%s/%s",
node.Ip, node.Port, node.Device, partition)
if len(suffixes) > 0 {
url = fmt.Sprintf("%s/%s", url, strings.Join(suffixes, "-"))
}
req, err := http.NewRequest(common.REPLICATE, url, nil)
if err != nil {
r.logger.Error("unable to create diff request",
zap.String("url", url),
zap.Error(err))
return nil, err
}
req.Header.Set(common.XBackendPolicyIndex, strconv.Itoa(policy))
resp, err := r.http.Do(req)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.String("url", url), zap.Error(err))
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusInsufficientStorage {
return nil, ErrRemoteDiskUnmounted
}
if resp.StatusCode != http.StatusOK {
return nil, ErrRemoteHash
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.logger.Error("unable to read replicate response body",
zap.String("url", url), zap.Error(err))
return nil, err
}
v, err := pickle.PickleLoads(body)
if err != nil {
r.logger.Error("unable to deserialize pickle data",
zap.String("url", url), zap.Error(err))
return nil, err
}
pickledHashes, ok := v.(map[interface{}]interface{})
if !ok {
return nil, ErrMalformedData
}
hashes := make(map[string]string)
for suff, hash := range pickledHashes {
if hashes[suff.(string)], ok = hash.(string); !ok |
}
return hashes, nil
}
func (r *Replicator) replicateLocal(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
attempts := int(r.rings[policy].ReplicaCount()) - 1
for node := nodes.Next(); node != nil && attempts > 0; node = nodes.Next() {
attempts--
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
if err == ErrRemoteDiskUnmounted {
attempts++
}
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
continue
}
r.getRemoteHash(policy, node, partition, suffixes)
if reply.Success {
r.stat.replicated += int64(len(reply.Candidates))
}
}
}
func (r *Replicator) replicateHandoff(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
success := true
for node := nodes.Next(); node != nil; node = nodes.Next() {
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.Int("policy", policy),
zap.Any("node", node),
zap.Error(err))
success = false
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
success = false
continue
}
if reply.Success {
r.getRemoteHash(policy, node, partition, suffixes)
r.stat.replicated += int64(len(reply.Candidates))
} else {
success = false
}
}
if success {
arg := &Partition{
Policy: uint32(policy),
Device: device.Device,
Partition: partition,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
r.logger.Info("removing handoff partition",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition))
reply, err := r.rpc.DeleteHandoff(ctx, arg)
if err != nil || !reply.Success {
r.logger.Info("unable to remove handoff partition",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition),
zap.Error(err))
return
}
r.logger.Info("handoff partition removed",
zap.Int("policy", policy),
zap.String("device", device.Device),
zap.String("partition", partition))
}
}
func (r *Replicator) replicateDevice(
policy int, device *ring.Device, pool chan bool, wg *sync.WaitGroup) {
defer func() {
<-pool
wg.Done()
}()
r.logger.Info("begin to replicate device",
zap.String("device", device.Device), zap.Int("policy", policy))
for _, p := range r.listPartitions(policy, device.Device) {
pi, err := strconv.ParseUint(p, 10, 64)
if err != nil {
r.logger.Error("unable to parse partition as integer",
zap.String("partition", p), zap.Error(err))
continue
}
// GetJobNodes will exclude the host itself
nodes, handoff := r.rings[policy].GetJobNodes(pi, device.Id)
chain := &NodeChain{
replicas: int(r.rings[policy].ReplicaCount()),
primary: nodes,
begin: 0,
}
if handoff {
r.replicateHandoff(policy, device, p, chain)
} else {
chain.handoffs = r.rings[policy].GetMoreNodes(pi)
r.replicateLocal(policy, device, p, chain)
}
}
}
func (r *Replicator) replicate() {
pool := make(chan bool, r.concurrency)
wg := &sync.WaitGroup{}
for p, devs := range r.devices {
for _, d := range devs {
pool <- true
wg.Add(1)
go r.replicateDevice(p, d, pool, wg)
}
}
wg.Wait()
}
func (r *Replicator) Run() {
r.logger.Info("running pack replicator for once")
r.replicate()
r.logger.Info("replicated one pass",
zap.Int64("rehashed", r.stat.rehashed),
zap.Int64("replicated", r.stat.replicated))
}
func (r *Replicator) RunForever() {
r.logger.Info("running pack replicator forever")
for {
r.logger.Info("begin new replication pass")
r.replicate()
r.logger.Info("replication pass done",
zap.Int64("rehashed", r.stat.rehashed),
zap.Int64("replicated", r.stat.replicated))
r.stat.reset()
time.Sleep(time.Second * time.Duration(r.interval))
}
}
func InitReplicator(cnf conf.Config, flags *flag.FlagSet) (srv.Daemon, error) {
logger, err := common.GetLogger(
flags.Lookup("l").Value.(flag.Getter).Get().(string), "pack-replicator")
if err != nil {
return nil, err
}
r := &Replicator{
logger: logger,
stat: &ReplicationStat{},
}
r.parseConf(cnf)
prefix, suffix, err := conf.GetHashPrefixAndSuffix()
if err != nil {
return nil, ErrHashConfNotFound
}
r.hashPrefix = prefix
r.hashSuffix = suffix
policyFilter := flags.Lookup("policies").Value.(flag.Getter).Get().(string)
deviceFilter := flags.Lookup("devices").Value.(flag.Getter).Get().(string)
r.collectDevices(policyFilter, deviceFilter)
pf := flags.Lookup("partitions").Value.(flag.Getter).Get().(string)
r.whitelist = map[string]bool{}
for _, p := range strings.Split(pf, ",") {
if p != "" {
r.whitelist[p] = true
}
}
conn, err := grpc.Dial(
fmt.Sprintf("localhost:%d", r.rpcPort), grpc.WithInsecure())
if err != nil {
logger.Error("unable to dial to rpc server",
zap.Int("port", r.rpcPort), zap.Error(err))
return nil, err
}
r.rpc = NewPackRpcServiceClient(conn)
r.http = &http.Client{Timeout: 5 * time.Minute}
return r, nil
}
| {
hashes[suff.(string)] = ""
} | conditional_block |
fleet.go | package cbcluster
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"path"
"path/filepath"
"strings"
"time"
"github.com/coreos/fleet/schema"
"github.com/coreos/go-systemd/unit"
"github.com/tleyden/go-etcd/etcd"
)
const (
UNIT_NAME_NODE = "couchbase_node"
UNIT_NAME_SIDEKICK = "couchbase_sidekick"
)
var (
FLEET_API_ENDPOINT = "http://localhost:49153/fleet/v1"
)
type CouchbaseFleet struct {
etcdClient *etcd.Client
UserPass string
NumNodes int
CbVersion string
ContainerTag string // Docker tag
EtcdServers []string
SkipCleanSlateCheck bool
}
func NewCouchbaseFleet(etcdServers []string) *CouchbaseFleet {
c := &CouchbaseFleet{}
if len(etcdServers) > 0 {
c.EtcdServers = etcdServers
log.Printf("Connect to explicit etcd servers: %v", c.EtcdServers)
} else {
c.EtcdServers = []string{}
log.Printf("Connect to etcd on localhost")
}
c.ConnectToEtcd()
return c
}
func (c *CouchbaseFleet) ConnectToEtcd() {
c.etcdClient = etcd.NewClient(c.EtcdServers)
c.etcdClient.SetConsistency(etcd.STRONG_CONSISTENCY)
}
// Is the Fleet API available? If not, return an error.
func (c CouchbaseFleet) VerifyFleetAPIAvailable() error {
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
jsonMap := map[string]interface{}{}
return getJsonData(endpointUrl, &jsonMap)
}
func (c *CouchbaseFleet) LaunchCouchbaseServer() error {
if err := c.VerifyFleetAPIAvailable(); err != nil {
msg := "Unable to connect to Fleet API, see http://bit.ly/1AC1iRX " +
"for instructions on how to fix this"
return fmt.Errorf(msg)
}
if err := c.verifyEnoughMachinesAvailable(); err != nil {
return err
}
// create an etcd client
// this need to check:
// no etcd key for /couchbase.com
// what else?
if err := c.verifyCleanSlate(); err != nil {
return err
}
if err := c.setUserNamePassEtcd(); err != nil {
return err
}
nodeFleetUnitJson, err := c.generateNodeFleetUnitJson()
if err != nil {
return err
}
for i := 1; i < c.NumNodes+1; i++ {
if err := launchFleetUnitN(
i,
UNIT_NAME_NODE,
nodeFleetUnitJson,
); err != nil {
return err
}
sidekickFleetUnitJson, err := c.generateSidekickFleetUnitJson(fmt.Sprintf("%v", i))
if err != nil {
return err
}
if err := launchFleetUnitN(
i,
UNIT_NAME_SIDEKICK,
sidekickFleetUnitJson,
); err != nil {
return err
}
}
if err := c.WaitForFleetLaunch(); err != nil {
log.Printf("Error waiting for couchbase cluster launch: %v", err)
return err
}
return nil
}
// Call Fleet API and tell it to stop units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) StopUnits(allUnits bool) error {
// set the /couchbase.com/remove-rebalance-disabled flag in etcd since
// otherwise, it will try to remove and rebalance the node, which is not
// what we want when stopping all units.
// set the ttl to be 5 minutes, since there's nothing in place yet to
// block until all the units have stopped
// (TODO: this should get added .. it waits for all units to stop, and then
// it removes the /couchbase.com/remove-rebalance-disabled flag)
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitStopper := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
log.Printf("Stop unit %v via PUT %v", unit.Name, endpointUrl)
return PUT(endpointUrl, `{"desiredState": "inactive"}`)
}
return c.ManipulateUnits(unitStopper, allUnits)
}
// Call Fleet API and tell it to destroy units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) | (allUnits bool) error {
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitDestroyer := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
return DELETE(endpointUrl)
}
return c.ManipulateUnits(unitDestroyer, allUnits)
}
type UnitManipulator func(unit *schema.Unit) error
func (c CouchbaseFleet) ManipulateUnits(unitManipulator UnitManipulator, manipulateAllUnits bool) error {
// find all the units
allUnits, err := c.findAllFleetUnits()
if err != nil {
return err
}
var units []*schema.Unit
if manipulateAllUnits {
units = allUnits
} else {
// filter the ones out that have the name pattern we care about (couchbase_node)
unitNamePatterns := []string{UNIT_NAME_NODE, UNIT_NAME_SIDEKICK}
units = c.filterFleetUnits(allUnits, unitNamePatterns)
}
for _, unit := range units {
if err := unitManipulator(unit); err != nil {
return err
}
}
return nil
}
func (c CouchbaseFleet) findAllFleetUnits() (units []*schema.Unit, err error) {
endpointUrl := ""
maxAttempts := 10000
sleepSeconds := 0
nextPageToken := ""
log.Printf("findAllFleetUnits()")
worker := func() (finished bool, err error) {
// append a next page token to url if needed
if len(nextPageToken) > 0 {
endpointUrl = fmt.Sprintf("%v/units?nextPageToken=%v", FLEET_API_ENDPOINT, nextPageToken)
} else {
endpointUrl = fmt.Sprintf("%v/units", FLEET_API_ENDPOINT)
}
log.Printf("Getting units from %v", endpointUrl)
unitPage := schema.UnitPage{}
if err := getJsonData(endpointUrl, &unitPage); err != nil {
return true, err
}
// add all units to return value
for _, unit := range unitPage.Units {
units = append(units, unit)
}
// if no more pages, we are finished
areWeFinished := len(unitPage.NextPageToken) == 0
return areWeFinished, nil
}
sleeper := func(numAttempts int) (bool, int) {
if numAttempts > maxAttempts {
return false, -1
}
return true, sleepSeconds
}
if err := RetryLoop(worker, sleeper); err != nil {
return nil, err
}
return units, nil
}
func (c CouchbaseFleet) filterFleetUnits(units []*schema.Unit, filters []string) (filteredUnits []*schema.Unit) {
stringContainsAny := func(s string, filters []string) bool {
for _, filter := range filters {
if strings.Contains(s, filter) {
return true
}
}
return false
}
for _, unit := range units {
if stringContainsAny(unit.Name, filters) {
filteredUnits = append(filteredUnits, unit)
}
}
return filteredUnits
}
func (c CouchbaseFleet) GenerateUnits(outputDir string) error {
// generate node unit
nodeFleetUnit, err := c.generateNodeFleetUnitFile()
if err != nil {
return err
}
filename := fmt.Sprintf("%v@.service", UNIT_NAME_NODE)
path := filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(nodeFleetUnit), 0644); err != nil {
return err
}
// generate sidekick unit
sidekickFleetUnit, err := c.generateSidekickFleetUnitFile("%i")
if err != nil {
return err
}
filename = fmt.Sprintf("%v@.service", UNIT_NAME_SIDEKICK)
path = filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(sidekickFleetUnit), 0644); err != nil {
return err
}
return nil
}
func (c CouchbaseFleet) WaitForFleetLaunch() error {
// wait until X nodes are up in cluster
log.Printf("Waiting for cluster to be up ..")
WaitUntilNumNodesRunning(c.NumNodes, c.EtcdServers)
// wait until no rebalance running
cb := NewCouchbaseCluster(c.EtcdServers)
if err := cb.LoadAdminCredsFromEtcd(); err != nil {
return err
}
liveNodeIp, err := cb.FindLiveNode()
if err != nil {
return err
}
// dirty hack to solve problem: the cluster might have
// 2 nodes which just finished rebalancing, and a third node
// that joins and triggers another rebalance. thus, it will briefly
// go into "no rebalances happening" state, followed by a rebalance.
// if we see the "no rebalances happening state", we'll be tricked and
// think we're done when we're really not.
// workaround: check twice, and sleep in between the check
for i := 0; i < c.NumNodes; i++ {
if err := cb.WaitUntilNoRebalanceRunning(liveNodeIp, 30); err != nil {
return err
}
log.Printf("No rebalance running, sleeping 15s. (%v/%v)", i+1, c.NumNodes)
<-time.After(time.Second * 15)
}
log.Println("No rebalance running after several checks")
// let user know its up
log.Printf("Cluster is up!")
return nil
}
func (c *CouchbaseFleet) ExtractDocOptArgs(arguments map[string]interface{}) error {
userpass, err := ExtractUserPass(arguments)
if err != nil {
return err
}
numnodes, err := ExtractNumNodes(arguments)
if err != nil {
return err
}
cbVersion, err := ExtractCbVersion(arguments)
if err != nil {
return err
}
c.UserPass = userpass
c.NumNodes = numnodes
c.CbVersion = cbVersion
c.ContainerTag = ExtractDockerTagOrLatest(arguments)
c.SkipCleanSlateCheck = ExtractSkipCheckCleanState(arguments)
return nil
}
// call fleetctl list-machines and verify that the number of nodes
// the user asked to kick off is LTE number of machines on cluster
func (c CouchbaseFleet) verifyEnoughMachinesAvailable() error {
log.Printf("verifyEnoughMachinesAvailable()")
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
// {"machines":[{"id":"a91c394439734375aa256d7da1410132","primaryIP":"172.17.8.101"}]}
jsonMap := map[string]interface{}{}
if err := getJsonData(endpointUrl, &jsonMap); err != nil {
log.Printf("getJsonData error: %v", err)
return err
}
machineListRaw := jsonMap["machines"]
machineList, ok := machineListRaw.([]interface{})
if !ok {
return fmt.Errorf("Unexpected value for machines: %v", jsonMap)
}
if len(machineList) < c.NumNodes {
return fmt.Errorf("User requested %v nodes, only %v available", c.NumNodes, len(machineList))
}
log.Printf("/verifyEnoughMachinesAvailable()")
return nil
}
// Make sure that /couchbase.com/couchbase-node-state is empty
func (c CouchbaseFleet) verifyCleanSlate() error {
if c.SkipCleanSlateCheck {
return nil
}
key := path.Join(KEY_NODE_STATE)
_, err := c.etcdClient.Get(key, false, false)
// if that key exists, there is residue and we should abort
if err == nil {
return fmt.Errorf("Found residue -- key: %v in etcd. You should destroy the cluster first, then try again.", KEY_NODE_STATE)
}
// if we get an error with "key not found", then we are starting
// with a clean slate
if strings.Contains(err.Error(), "Key not found") {
return nil
}
// if we got a different error rather than "Key not found", treat that as
// an error as well.
return fmt.Errorf("Unexpected error trying to get key: %v: %v", KEY_NODE_STATE, err)
}
func (c CouchbaseFleet) setUserNamePassEtcd() error {
_, err := c.etcdClient.Set(KEY_USER_PASS, c.UserPass, 0)
return err
}
func (c CouchbaseFleet) generateNodeFleetUnitJson() (string, error) {
unitFile, err := c.generateNodeFleetUnitFile()
if err != nil {
return "", err
}
log.Printf("Couchbase node fleet unit: %v", unitFile)
// convert from text -> json
jsonBytes, err := unitFileToJson(unitFile)
if err != nil {
return "", err
}
return string(jsonBytes), err
}
func (c CouchbaseFleet) generateSidekickFleetUnitJson(unitNumber string) (string, error) {
unitFile, err := c.generateSidekickFleetUnitFile(unitNumber)
if err != nil {
return "", err
}
log.Printf("Couchbase sidekick fleet unit: %v", unitFile)
jsonBytes, err := unitFileToJson(unitFile)
if err != nil {
return "", err
}
return string(jsonBytes), err
}
func unitFileToJson(unitFileContent string) ([]byte, error) {
// deserialize to units
opts, err := unit.Deserialize(strings.NewReader(unitFileContent))
if err != nil {
return nil, err
}
fleetUnit := struct {
Options []*unit.UnitOption `json:"options"`
DesiredState string `json:"desiredState"`
}{
Options: opts,
DesiredState: "launched",
}
bytes, err := json.Marshal(fleetUnit)
return bytes, err
}
func (c CouchbaseFleet) generateNodeFleetUnitFile() (string, error) {
assetName := "data/couchbase_node@.service.template"
content, err := Asset(assetName)
if err != nil {
return "", fmt.Errorf("could not find asset: %v. err: %v", assetName, err)
}
params := struct {
CB_VERSION string
CONTAINER_TAG string
}{
CB_VERSION: c.CbVersion,
CONTAINER_TAG: c.ContainerTag,
}
log.Printf("Generating node from %v with params: %+v", assetName, params)
return generateUnitFileFromTemplate(content, params)
}
func (c CouchbaseFleet) generateSidekickFleetUnitFile(unitNumber string) (string, error) {
assetName := "data/couchbase_sidekick@.service.template"
content, err := Asset(assetName)
if err != nil {
return "", fmt.Errorf("could not find asset: %v. err: %v", assetName, err)
}
params := struct {
CB_VERSION string
CONTAINER_TAG string
UNIT_NUMBER string
}{
CB_VERSION: c.CbVersion,
CONTAINER_TAG: c.ContainerTag,
UNIT_NUMBER: unitNumber,
}
log.Printf("Generating sidekick from %v with params: %+v", assetName, params)
return generateUnitFileFromTemplate(content, params)
}
func generateUnitFileFromTemplate(templateContent []byte, params interface{}) (string, error) {
// run through go template engine
tmpl, err := template.New("Template").Parse(string(templateContent))
if err != nil {
return "", err
}
out := &bytes.Buffer{}
// execute template and write to dest
err = tmpl.Execute(out, params)
if err != nil {
return "", err
}
return out.String(), nil
}
func launchFleetUnitN(unitNumber int, unitName, fleetUnitJson string) error {
log.Printf("Launch fleet unit %v (%v)", unitName, unitNumber)
endpointUrl := fmt.Sprintf("%v/units/%v@%v.service", FLEET_API_ENDPOINT, unitName, unitNumber)
return PUT(endpointUrl, fleetUnitJson)
}
// Launch a fleet unit file that is stored in the data dir (via go-bindata)
func launchFleetUnitFile(unitName, unitFilePath string) error {
log.Printf("Launch fleet unit file (%v)", unitName)
content, err := Asset(unitFilePath)
if err != nil {
return fmt.Errorf("could not find asset: %v. err: %v", unitFilePath, err)
}
// convert from text -> json
jsonBytes, err := unitFileToJson(string(content))
if err != nil {
return err
}
endpointUrl := fmt.Sprintf("%v/units/%v.service", FLEET_API_ENDPOINT, unitName)
return PUT(endpointUrl, string(jsonBytes))
}
func DELETE(endpointUrl string) error {
client := &http.Client{}
req, err := http.NewRequest("DELETE", endpointUrl, nil)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("DELETE: Unexpected status code in response")
}
return nil
}
func PUT(endpointUrl, json string) error {
client := &http.Client{}
req, err := http.NewRequest("PUT", endpointUrl, bytes.NewReader([]byte(json)))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
bodyStr, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
log.Printf("response body: %v", string(bodyStr))
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("PUT: Unexpected status code in response")
}
return nil
}
| DestroyUnits | identifier_name |
fleet.go | package cbcluster
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"path"
"path/filepath"
"strings"
"time"
"github.com/coreos/fleet/schema"
"github.com/coreos/go-systemd/unit"
"github.com/tleyden/go-etcd/etcd"
)
const (
UNIT_NAME_NODE = "couchbase_node"
UNIT_NAME_SIDEKICK = "couchbase_sidekick"
)
var (
FLEET_API_ENDPOINT = "http://localhost:49153/fleet/v1"
)
type CouchbaseFleet struct {
etcdClient *etcd.Client
UserPass string
NumNodes int
CbVersion string
ContainerTag string // Docker tag
EtcdServers []string
SkipCleanSlateCheck bool
}
func NewCouchbaseFleet(etcdServers []string) *CouchbaseFleet {
c := &CouchbaseFleet{}
if len(etcdServers) > 0 {
c.EtcdServers = etcdServers
log.Printf("Connect to explicit etcd servers: %v", c.EtcdServers)
} else {
c.EtcdServers = []string{}
log.Printf("Connect to etcd on localhost")
}
c.ConnectToEtcd()
return c
}
func (c *CouchbaseFleet) ConnectToEtcd() {
c.etcdClient = etcd.NewClient(c.EtcdServers)
c.etcdClient.SetConsistency(etcd.STRONG_CONSISTENCY)
}
// Is the Fleet API available? If not, return an error.
func (c CouchbaseFleet) VerifyFleetAPIAvailable() error {
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
jsonMap := map[string]interface{}{}
return getJsonData(endpointUrl, &jsonMap)
}
func (c *CouchbaseFleet) LaunchCouchbaseServer() error {
if err := c.VerifyFleetAPIAvailable(); err != nil {
msg := "Unable to connect to Fleet API, see http://bit.ly/1AC1iRX " +
"for instructions on how to fix this"
return fmt.Errorf(msg)
}
if err := c.verifyEnoughMachinesAvailable(); err != nil {
return err
}
// create an etcd client
// this need to check:
// no etcd key for /couchbase.com
// what else?
if err := c.verifyCleanSlate(); err != nil {
return err
}
if err := c.setUserNamePassEtcd(); err != nil {
return err
}
nodeFleetUnitJson, err := c.generateNodeFleetUnitJson()
if err != nil {
return err
}
for i := 1; i < c.NumNodes+1; i++ {
if err := launchFleetUnitN(
i,
UNIT_NAME_NODE,
nodeFleetUnitJson,
); err != nil {
return err
}
sidekickFleetUnitJson, err := c.generateSidekickFleetUnitJson(fmt.Sprintf("%v", i))
if err != nil {
return err
}
if err := launchFleetUnitN(
i,
UNIT_NAME_SIDEKICK,
sidekickFleetUnitJson,
); err != nil {
return err
}
}
if err := c.WaitForFleetLaunch(); err != nil {
log.Printf("Error waiting for couchbase cluster launch: %v", err)
return err
}
return nil
}
// Call Fleet API and tell it to stop units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) StopUnits(allUnits bool) error {
// set the /couchbase.com/remove-rebalance-disabled flag in etcd since
// otherwise, it will try to remove and rebalance the node, which is not
// what we want when stopping all units.
// set the ttl to be 5 minutes, since there's nothing in place yet to
// block until all the units have stopped
// (TODO: this should get added .. it waits for all units to stop, and then
// it removes the /couchbase.com/remove-rebalance-disabled flag)
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitStopper := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
log.Printf("Stop unit %v via PUT %v", unit.Name, endpointUrl)
return PUT(endpointUrl, `{"desiredState": "inactive"}`)
}
return c.ManipulateUnits(unitStopper, allUnits)
}
// Call Fleet API and tell it to destroy units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) DestroyUnits(allUnits bool) error {
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitDestroyer := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
return DELETE(endpointUrl)
}
return c.ManipulateUnits(unitDestroyer, allUnits)
}
type UnitManipulator func(unit *schema.Unit) error
func (c CouchbaseFleet) ManipulateUnits(unitManipulator UnitManipulator, manipulateAllUnits bool) error {
// find all the units
allUnits, err := c.findAllFleetUnits()
if err != nil {
return err
}
var units []*schema.Unit
if manipulateAllUnits {
units = allUnits
} else {
// filter the ones out that have the name pattern we care about (couchbase_node)
unitNamePatterns := []string{UNIT_NAME_NODE, UNIT_NAME_SIDEKICK}
units = c.filterFleetUnits(allUnits, unitNamePatterns)
}
for _, unit := range units {
if err := unitManipulator(unit); err != nil {
return err
}
}
return nil
}
func (c CouchbaseFleet) findAllFleetUnits() (units []*schema.Unit, err error) {
endpointUrl := ""
maxAttempts := 10000
sleepSeconds := 0
nextPageToken := ""
log.Printf("findAllFleetUnits()")
worker := func() (finished bool, err error) {
// append a next page token to url if needed
if len(nextPageToken) > 0 {
endpointUrl = fmt.Sprintf("%v/units?nextPageToken=%v", FLEET_API_ENDPOINT, nextPageToken)
} else {
endpointUrl = fmt.Sprintf("%v/units", FLEET_API_ENDPOINT)
}
log.Printf("Getting units from %v", endpointUrl)
unitPage := schema.UnitPage{}
if err := getJsonData(endpointUrl, &unitPage); err != nil {
return true, err
}
// add all units to return value
for _, unit := range unitPage.Units {
units = append(units, unit)
}
// if no more pages, we are finished
areWeFinished := len(unitPage.NextPageToken) == 0
return areWeFinished, nil
}
sleeper := func(numAttempts int) (bool, int) {
if numAttempts > maxAttempts {
return false, -1
}
return true, sleepSeconds
}
if err := RetryLoop(worker, sleeper); err != nil {
return nil, err
}
return units, nil
}
func (c CouchbaseFleet) filterFleetUnits(units []*schema.Unit, filters []string) (filteredUnits []*schema.Unit) |
func (c CouchbaseFleet) GenerateUnits(outputDir string) error {
// generate node unit
nodeFleetUnit, err := c.generateNodeFleetUnitFile()
if err != nil {
return err
}
filename := fmt.Sprintf("%v@.service", UNIT_NAME_NODE)
path := filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(nodeFleetUnit), 0644); err != nil {
return err
}
// generate sidekick unit
sidekickFleetUnit, err := c.generateSidekickFleetUnitFile("%i")
if err != nil {
return err
}
filename = fmt.Sprintf("%v@.service", UNIT_NAME_SIDEKICK)
path = filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(sidekickFleetUnit), 0644); err != nil {
return err
}
return nil
}
func (c CouchbaseFleet) WaitForFleetLaunch() error {
// wait until X nodes are up in cluster
log.Printf("Waiting for cluster to be up ..")
WaitUntilNumNodesRunning(c.NumNodes, c.EtcdServers)
// wait until no rebalance running
cb := NewCouchbaseCluster(c.EtcdServers)
if err := cb.LoadAdminCredsFromEtcd(); err != nil {
return err
}
liveNodeIp, err := cb.FindLiveNode()
if err != nil {
return err
}
// dirty hack to solve problem: the cluster might have
// 2 nodes which just finished rebalancing, and a third node
// that joins and triggers another rebalance. thus, it will briefly
// go into "no rebalances happening" state, followed by a rebalance.
// if we see the "no rebalances happening state", we'll be tricked and
// think we're done when we're really not.
// workaround: check twice, and sleep in between the check
for i := 0; i < c.NumNodes; i++ {
if err := cb.WaitUntilNoRebalanceRunning(liveNodeIp, 30); err != nil {
return err
}
log.Printf("No rebalance running, sleeping 15s. (%v/%v)", i+1, c.NumNodes)
<-time.After(time.Second * 15)
}
log.Println("No rebalance running after several checks")
// let user know its up
log.Printf("Cluster is up!")
return nil
}
func (c *CouchbaseFleet) ExtractDocOptArgs(arguments map[string]interface{}) error {
userpass, err := ExtractUserPass(arguments)
if err != nil {
return err
}
numnodes, err := ExtractNumNodes(arguments)
if err != nil {
return err
}
cbVersion, err := ExtractCbVersion(arguments)
if err != nil {
return err
}
c.UserPass = userpass
c.NumNodes = numnodes
c.CbVersion = cbVersion
c.ContainerTag = ExtractDockerTagOrLatest(arguments)
c.SkipCleanSlateCheck = ExtractSkipCheckCleanState(arguments)
return nil
}
// call fleetctl list-machines and verify that the number of nodes
// the user asked to kick off is LTE number of machines on cluster
func (c CouchbaseFleet) verifyEnoughMachinesAvailable() error {
log.Printf("verifyEnoughMachinesAvailable()")
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
// {"machines":[{"id":"a91c394439734375aa256d7da1410132","primaryIP":"172.17.8.101"}]}
jsonMap := map[string]interface{}{}
if err := getJsonData(endpointUrl, &jsonMap); err != nil {
log.Printf("getJsonData error: %v", err)
return err
}
machineListRaw := jsonMap["machines"]
machineList, ok := machineListRaw.([]interface{})
if !ok {
return fmt.Errorf("Unexpected value for machines: %v", jsonMap)
}
if len(machineList) < c.NumNodes {
return fmt.Errorf("User requested %v nodes, only %v available", c.NumNodes, len(machineList))
}
log.Printf("/verifyEnoughMachinesAvailable()")
return nil
}
// Make sure that /couchbase.com/couchbase-node-state is empty
func (c CouchbaseFleet) verifyCleanSlate() error {
if c.SkipCleanSlateCheck {
return nil
}
key := path.Join(KEY_NODE_STATE)
_, err := c.etcdClient.Get(key, false, false)
// if that key exists, there is residue and we should abort
if err == nil {
return fmt.Errorf("Found residue -- key: %v in etcd. You should destroy the cluster first, then try again.", KEY_NODE_STATE)
}
// if we get an error with "key not found", then we are starting
// with a clean slate
if strings.Contains(err.Error(), "Key not found") {
return nil
}
// if we got a different error rather than "Key not found", treat that as
// an error as well.
return fmt.Errorf("Unexpected error trying to get key: %v: %v", KEY_NODE_STATE, err)
}
func (c CouchbaseFleet) setUserNamePassEtcd() error {
_, err := c.etcdClient.Set(KEY_USER_PASS, c.UserPass, 0)
return err
}
func (c CouchbaseFleet) generateNodeFleetUnitJson() (string, error) {
unitFile, err := c.generateNodeFleetUnitFile()
if err != nil {
return "", err
}
log.Printf("Couchbase node fleet unit: %v", unitFile)
// convert from text -> json
jsonBytes, err := unitFileToJson(unitFile)
if err != nil {
return "", err
}
return string(jsonBytes), err
}
func (c CouchbaseFleet) generateSidekickFleetUnitJson(unitNumber string) (string, error) {
unitFile, err := c.generateSidekickFleetUnitFile(unitNumber)
if err != nil {
return "", err
}
log.Printf("Couchbase sidekick fleet unit: %v", unitFile)
jsonBytes, err := unitFileToJson(unitFile)
if err != nil {
return "", err
}
return string(jsonBytes), err
}
func unitFileToJson(unitFileContent string) ([]byte, error) {
// deserialize to units
opts, err := unit.Deserialize(strings.NewReader(unitFileContent))
if err != nil {
return nil, err
}
fleetUnit := struct {
Options []*unit.UnitOption `json:"options"`
DesiredState string `json:"desiredState"`
}{
Options: opts,
DesiredState: "launched",
}
bytes, err := json.Marshal(fleetUnit)
return bytes, err
}
func (c CouchbaseFleet) generateNodeFleetUnitFile() (string, error) {
assetName := "data/couchbase_node@.service.template"
content, err := Asset(assetName)
if err != nil {
return "", fmt.Errorf("could not find asset: %v. err: %v", assetName, err)
}
params := struct {
CB_VERSION string
CONTAINER_TAG string
}{
CB_VERSION: c.CbVersion,
CONTAINER_TAG: c.ContainerTag,
}
log.Printf("Generating node from %v with params: %+v", assetName, params)
return generateUnitFileFromTemplate(content, params)
}
func (c CouchbaseFleet) generateSidekickFleetUnitFile(unitNumber string) (string, error) {
assetName := "data/couchbase_sidekick@.service.template"
content, err := Asset(assetName)
if err != nil {
return "", fmt.Errorf("could not find asset: %v. err: %v", assetName, err)
}
params := struct {
CB_VERSION string
CONTAINER_TAG string
UNIT_NUMBER string
}{
CB_VERSION: c.CbVersion,
CONTAINER_TAG: c.ContainerTag,
UNIT_NUMBER: unitNumber,
}
log.Printf("Generating sidekick from %v with params: %+v", assetName, params)
return generateUnitFileFromTemplate(content, params)
}
func generateUnitFileFromTemplate(templateContent []byte, params interface{}) (string, error) {
// run through go template engine
tmpl, err := template.New("Template").Parse(string(templateContent))
if err != nil {
return "", err
}
out := &bytes.Buffer{}
// execute template and write to dest
err = tmpl.Execute(out, params)
if err != nil {
return "", err
}
return out.String(), nil
}
func launchFleetUnitN(unitNumber int, unitName, fleetUnitJson string) error {
log.Printf("Launch fleet unit %v (%v)", unitName, unitNumber)
endpointUrl := fmt.Sprintf("%v/units/%v@%v.service", FLEET_API_ENDPOINT, unitName, unitNumber)
return PUT(endpointUrl, fleetUnitJson)
}
// Launch a fleet unit file that is stored in the data dir (via go-bindata)
func launchFleetUnitFile(unitName, unitFilePath string) error {
log.Printf("Launch fleet unit file (%v)", unitName)
content, err := Asset(unitFilePath)
if err != nil {
return fmt.Errorf("could not find asset: %v. err: %v", unitFilePath, err)
}
// convert from text -> json
jsonBytes, err := unitFileToJson(string(content))
if err != nil {
return err
}
endpointUrl := fmt.Sprintf("%v/units/%v.service", FLEET_API_ENDPOINT, unitName)
return PUT(endpointUrl, string(jsonBytes))
}
func DELETE(endpointUrl string) error {
client := &http.Client{}
req, err := http.NewRequest("DELETE", endpointUrl, nil)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("DELETE: Unexpected status code in response")
}
return nil
}
func PUT(endpointUrl, json string) error {
client := &http.Client{}
req, err := http.NewRequest("PUT", endpointUrl, bytes.NewReader([]byte(json)))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
bodyStr, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
log.Printf("response body: %v", string(bodyStr))
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("PUT: Unexpected status code in response")
}
return nil
}
| {
stringContainsAny := func(s string, filters []string) bool {
for _, filter := range filters {
if strings.Contains(s, filter) {
return true
}
}
return false
}
for _, unit := range units {
if stringContainsAny(unit.Name, filters) {
filteredUnits = append(filteredUnits, unit)
}
}
return filteredUnits
} | identifier_body |
fleet.go | package cbcluster
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"path"
"path/filepath"
"strings"
"time"
"github.com/coreos/fleet/schema"
"github.com/coreos/go-systemd/unit"
"github.com/tleyden/go-etcd/etcd"
)
const (
UNIT_NAME_NODE = "couchbase_node"
UNIT_NAME_SIDEKICK = "couchbase_sidekick"
)
var (
FLEET_API_ENDPOINT = "http://localhost:49153/fleet/v1"
)
type CouchbaseFleet struct {
etcdClient *etcd.Client
UserPass string
NumNodes int
CbVersion string
ContainerTag string // Docker tag
EtcdServers []string
SkipCleanSlateCheck bool
}
func NewCouchbaseFleet(etcdServers []string) *CouchbaseFleet {
c := &CouchbaseFleet{}
if len(etcdServers) > 0 {
c.EtcdServers = etcdServers
log.Printf("Connect to explicit etcd servers: %v", c.EtcdServers)
} else {
c.EtcdServers = []string{}
log.Printf("Connect to etcd on localhost")
}
c.ConnectToEtcd()
return c
}
func (c *CouchbaseFleet) ConnectToEtcd() {
c.etcdClient = etcd.NewClient(c.EtcdServers)
c.etcdClient.SetConsistency(etcd.STRONG_CONSISTENCY)
}
// Is the Fleet API available? If not, return an error.
func (c CouchbaseFleet) VerifyFleetAPIAvailable() error {
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
jsonMap := map[string]interface{}{}
return getJsonData(endpointUrl, &jsonMap)
}
func (c *CouchbaseFleet) LaunchCouchbaseServer() error {
if err := c.VerifyFleetAPIAvailable(); err != nil {
msg := "Unable to connect to Fleet API, see http://bit.ly/1AC1iRX " +
"for instructions on how to fix this"
return fmt.Errorf(msg)
}
if err := c.verifyEnoughMachinesAvailable(); err != nil {
return err
}
// create an etcd client
// this need to check:
// no etcd key for /couchbase.com
// what else?
if err := c.verifyCleanSlate(); err != nil {
return err
}
if err := c.setUserNamePassEtcd(); err != nil {
return err
}
nodeFleetUnitJson, err := c.generateNodeFleetUnitJson()
if err != nil {
return err
}
for i := 1; i < c.NumNodes+1; i++ {
if err := launchFleetUnitN(
i,
UNIT_NAME_NODE,
nodeFleetUnitJson,
); err != nil {
return err
}
sidekickFleetUnitJson, err := c.generateSidekickFleetUnitJson(fmt.Sprintf("%v", i))
if err != nil {
return err
}
if err := launchFleetUnitN(
i,
UNIT_NAME_SIDEKICK,
sidekickFleetUnitJson,
); err != nil {
return err
}
}
if err := c.WaitForFleetLaunch(); err != nil {
log.Printf("Error waiting for couchbase cluster launch: %v", err)
return err
}
return nil
}
// Call Fleet API and tell it to stop units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) StopUnits(allUnits bool) error {
// set the /couchbase.com/remove-rebalance-disabled flag in etcd since
// otherwise, it will try to remove and rebalance the node, which is not
// what we want when stopping all units.
// set the ttl to be 5 minutes, since there's nothing in place yet to
// block until all the units have stopped
// (TODO: this should get added .. it waits for all units to stop, and then
// it removes the /couchbase.com/remove-rebalance-disabled flag)
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitStopper := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
log.Printf("Stop unit %v via PUT %v", unit.Name, endpointUrl)
return PUT(endpointUrl, `{"desiredState": "inactive"}`)
}
return c.ManipulateUnits(unitStopper, allUnits)
}
// Call Fleet API and tell it to destroy units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) DestroyUnits(allUnits bool) error {
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitDestroyer := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
return DELETE(endpointUrl)
}
return c.ManipulateUnits(unitDestroyer, allUnits)
}
type UnitManipulator func(unit *schema.Unit) error
func (c CouchbaseFleet) ManipulateUnits(unitManipulator UnitManipulator, manipulateAllUnits bool) error {
// find all the units
allUnits, err := c.findAllFleetUnits()
if err != nil {
return err
}
var units []*schema.Unit
if manipulateAllUnits {
units = allUnits
} else {
// filter the ones out that have the name pattern we care about (couchbase_node)
unitNamePatterns := []string{UNIT_NAME_NODE, UNIT_NAME_SIDEKICK}
units = c.filterFleetUnits(allUnits, unitNamePatterns)
}
for _, unit := range units {
if err := unitManipulator(unit); err != nil {
return err
}
}
return nil
}
func (c CouchbaseFleet) findAllFleetUnits() (units []*schema.Unit, err error) {
endpointUrl := ""
maxAttempts := 10000
sleepSeconds := 0
nextPageToken := ""
log.Printf("findAllFleetUnits()")
worker := func() (finished bool, err error) {
// append a next page token to url if needed
if len(nextPageToken) > 0 {
endpointUrl = fmt.Sprintf("%v/units?nextPageToken=%v", FLEET_API_ENDPOINT, nextPageToken)
} else {
endpointUrl = fmt.Sprintf("%v/units", FLEET_API_ENDPOINT)
}
log.Printf("Getting units from %v", endpointUrl)
unitPage := schema.UnitPage{}
if err := getJsonData(endpointUrl, &unitPage); err != nil {
return true, err
}
// add all units to return value
for _, unit := range unitPage.Units {
units = append(units, unit)
}
// if no more pages, we are finished
areWeFinished := len(unitPage.NextPageToken) == 0
return areWeFinished, nil
}
sleeper := func(numAttempts int) (bool, int) {
if numAttempts > maxAttempts {
return false, -1
}
return true, sleepSeconds
}
if err := RetryLoop(worker, sleeper); err != nil {
return nil, err
}
return units, nil
}
func (c CouchbaseFleet) filterFleetUnits(units []*schema.Unit, filters []string) (filteredUnits []*schema.Unit) {
stringContainsAny := func(s string, filters []string) bool {
for _, filter := range filters {
if strings.Contains(s, filter) {
return true
}
}
return false
}
for _, unit := range units {
if stringContainsAny(unit.Name, filters) {
filteredUnits = append(filteredUnits, unit)
}
}
return filteredUnits
}
func (c CouchbaseFleet) GenerateUnits(outputDir string) error {
// generate node unit
nodeFleetUnit, err := c.generateNodeFleetUnitFile()
if err != nil {
return err
}
filename := fmt.Sprintf("%v@.service", UNIT_NAME_NODE)
path := filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(nodeFleetUnit), 0644); err != nil {
return err
}
// generate sidekick unit
sidekickFleetUnit, err := c.generateSidekickFleetUnitFile("%i")
if err != nil {
return err
}
filename = fmt.Sprintf("%v@.service", UNIT_NAME_SIDEKICK)
path = filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(sidekickFleetUnit), 0644); err != nil {
return err
}
return nil
}
func (c CouchbaseFleet) WaitForFleetLaunch() error {
// wait until X nodes are up in cluster
log.Printf("Waiting for cluster to be up ..")
WaitUntilNumNodesRunning(c.NumNodes, c.EtcdServers)
// wait until no rebalance running
cb := NewCouchbaseCluster(c.EtcdServers)
if err := cb.LoadAdminCredsFromEtcd(); err != nil {
return err
}
liveNodeIp, err := cb.FindLiveNode()
if err != nil |
// dirty hack to solve problem: the cluster might have
// 2 nodes which just finished rebalancing, and a third node
// that joins and triggers another rebalance. thus, it will briefly
// go into "no rebalances happening" state, followed by a rebalance.
// if we see the "no rebalances happening state", we'll be tricked and
// think we're done when we're really not.
// workaround: check twice, and sleep in between the check
for i := 0; i < c.NumNodes; i++ {
if err := cb.WaitUntilNoRebalanceRunning(liveNodeIp, 30); err != nil {
return err
}
log.Printf("No rebalance running, sleeping 15s. (%v/%v)", i+1, c.NumNodes)
<-time.After(time.Second * 15)
}
log.Println("No rebalance running after several checks")
// let user know its up
log.Printf("Cluster is up!")
return nil
}
func (c *CouchbaseFleet) ExtractDocOptArgs(arguments map[string]interface{}) error {
userpass, err := ExtractUserPass(arguments)
if err != nil {
return err
}
numnodes, err := ExtractNumNodes(arguments)
if err != nil {
return err
}
cbVersion, err := ExtractCbVersion(arguments)
if err != nil {
return err
}
c.UserPass = userpass
c.NumNodes = numnodes
c.CbVersion = cbVersion
c.ContainerTag = ExtractDockerTagOrLatest(arguments)
c.SkipCleanSlateCheck = ExtractSkipCheckCleanState(arguments)
return nil
}
// call fleetctl list-machines and verify that the number of nodes
// the user asked to kick off is LTE number of machines on cluster
func (c CouchbaseFleet) verifyEnoughMachinesAvailable() error {
log.Printf("verifyEnoughMachinesAvailable()")
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
// {"machines":[{"id":"a91c394439734375aa256d7da1410132","primaryIP":"172.17.8.101"}]}
jsonMap := map[string]interface{}{}
if err := getJsonData(endpointUrl, &jsonMap); err != nil {
log.Printf("getJsonData error: %v", err)
return err
}
machineListRaw := jsonMap["machines"]
machineList, ok := machineListRaw.([]interface{})
if !ok {
return fmt.Errorf("Unexpected value for machines: %v", jsonMap)
}
if len(machineList) < c.NumNodes {
return fmt.Errorf("User requested %v nodes, only %v available", c.NumNodes, len(machineList))
}
log.Printf("/verifyEnoughMachinesAvailable()")
return nil
}
// Make sure that /couchbase.com/couchbase-node-state is empty
func (c CouchbaseFleet) verifyCleanSlate() error {
if c.SkipCleanSlateCheck {
return nil
}
key := path.Join(KEY_NODE_STATE)
_, err := c.etcdClient.Get(key, false, false)
// if that key exists, there is residue and we should abort
if err == nil {
return fmt.Errorf("Found residue -- key: %v in etcd. You should destroy the cluster first, then try again.", KEY_NODE_STATE)
}
// if we get an error with "key not found", then we are starting
// with a clean slate
if strings.Contains(err.Error(), "Key not found") {
return nil
}
// if we got a different error rather than "Key not found", treat that as
// an error as well.
return fmt.Errorf("Unexpected error trying to get key: %v: %v", KEY_NODE_STATE, err)
}
func (c CouchbaseFleet) setUserNamePassEtcd() error {
_, err := c.etcdClient.Set(KEY_USER_PASS, c.UserPass, 0)
return err
}
func (c CouchbaseFleet) generateNodeFleetUnitJson() (string, error) {
unitFile, err := c.generateNodeFleetUnitFile()
if err != nil {
return "", err
}
log.Printf("Couchbase node fleet unit: %v", unitFile)
// convert from text -> json
jsonBytes, err := unitFileToJson(unitFile)
if err != nil {
return "", err
}
return string(jsonBytes), err
}
func (c CouchbaseFleet) generateSidekickFleetUnitJson(unitNumber string) (string, error) {
unitFile, err := c.generateSidekickFleetUnitFile(unitNumber)
if err != nil {
return "", err
}
log.Printf("Couchbase sidekick fleet unit: %v", unitFile)
jsonBytes, err := unitFileToJson(unitFile)
if err != nil {
return "", err
}
return string(jsonBytes), err
}
func unitFileToJson(unitFileContent string) ([]byte, error) {
// deserialize to units
opts, err := unit.Deserialize(strings.NewReader(unitFileContent))
if err != nil {
return nil, err
}
fleetUnit := struct {
Options []*unit.UnitOption `json:"options"`
DesiredState string `json:"desiredState"`
}{
Options: opts,
DesiredState: "launched",
}
bytes, err := json.Marshal(fleetUnit)
return bytes, err
}
func (c CouchbaseFleet) generateNodeFleetUnitFile() (string, error) {
assetName := "data/couchbase_node@.service.template"
content, err := Asset(assetName)
if err != nil {
return "", fmt.Errorf("could not find asset: %v. err: %v", assetName, err)
}
params := struct {
CB_VERSION string
CONTAINER_TAG string
}{
CB_VERSION: c.CbVersion,
CONTAINER_TAG: c.ContainerTag,
}
log.Printf("Generating node from %v with params: %+v", assetName, params)
return generateUnitFileFromTemplate(content, params)
}
func (c CouchbaseFleet) generateSidekickFleetUnitFile(unitNumber string) (string, error) {
assetName := "data/couchbase_sidekick@.service.template"
content, err := Asset(assetName)
if err != nil {
return "", fmt.Errorf("could not find asset: %v. err: %v", assetName, err)
}
params := struct {
CB_VERSION string
CONTAINER_TAG string
UNIT_NUMBER string
}{
CB_VERSION: c.CbVersion,
CONTAINER_TAG: c.ContainerTag,
UNIT_NUMBER: unitNumber,
}
log.Printf("Generating sidekick from %v with params: %+v", assetName, params)
return generateUnitFileFromTemplate(content, params)
}
func generateUnitFileFromTemplate(templateContent []byte, params interface{}) (string, error) {
// run through go template engine
tmpl, err := template.New("Template").Parse(string(templateContent))
if err != nil {
return "", err
}
out := &bytes.Buffer{}
// execute template and write to dest
err = tmpl.Execute(out, params)
if err != nil {
return "", err
}
return out.String(), nil
}
func launchFleetUnitN(unitNumber int, unitName, fleetUnitJson string) error {
log.Printf("Launch fleet unit %v (%v)", unitName, unitNumber)
endpointUrl := fmt.Sprintf("%v/units/%v@%v.service", FLEET_API_ENDPOINT, unitName, unitNumber)
return PUT(endpointUrl, fleetUnitJson)
}
// Launch a fleet unit file that is stored in the data dir (via go-bindata)
func launchFleetUnitFile(unitName, unitFilePath string) error {
log.Printf("Launch fleet unit file (%v)", unitName)
content, err := Asset(unitFilePath)
if err != nil {
return fmt.Errorf("could not find asset: %v. err: %v", unitFilePath, err)
}
// convert from text -> json
jsonBytes, err := unitFileToJson(string(content))
if err != nil {
return err
}
endpointUrl := fmt.Sprintf("%v/units/%v.service", FLEET_API_ENDPOINT, unitName)
return PUT(endpointUrl, string(jsonBytes))
}
func DELETE(endpointUrl string) error {
client := &http.Client{}
req, err := http.NewRequest("DELETE", endpointUrl, nil)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("DELETE: Unexpected status code in response")
}
return nil
}
func PUT(endpointUrl, json string) error {
client := &http.Client{}
req, err := http.NewRequest("PUT", endpointUrl, bytes.NewReader([]byte(json)))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
bodyStr, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
log.Printf("response body: %v", string(bodyStr))
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("PUT: Unexpected status code in response")
}
return nil
}
| {
return err
} | conditional_block |
fleet.go | package cbcluster
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"path"
"path/filepath"
"strings"
"time"
"github.com/coreos/fleet/schema"
"github.com/coreos/go-systemd/unit"
"github.com/tleyden/go-etcd/etcd"
)
const (
UNIT_NAME_NODE = "couchbase_node"
UNIT_NAME_SIDEKICK = "couchbase_sidekick"
)
var (
FLEET_API_ENDPOINT = "http://localhost:49153/fleet/v1"
)
type CouchbaseFleet struct {
etcdClient *etcd.Client
UserPass string
NumNodes int
CbVersion string
ContainerTag string // Docker tag
EtcdServers []string
SkipCleanSlateCheck bool
}
func NewCouchbaseFleet(etcdServers []string) *CouchbaseFleet {
c := &CouchbaseFleet{}
if len(etcdServers) > 0 {
c.EtcdServers = etcdServers
log.Printf("Connect to explicit etcd servers: %v", c.EtcdServers)
} else {
c.EtcdServers = []string{}
log.Printf("Connect to etcd on localhost")
}
c.ConnectToEtcd()
return c
}
func (c *CouchbaseFleet) ConnectToEtcd() {
c.etcdClient = etcd.NewClient(c.EtcdServers)
c.etcdClient.SetConsistency(etcd.STRONG_CONSISTENCY)
}
// Is the Fleet API available? If not, return an error.
func (c CouchbaseFleet) VerifyFleetAPIAvailable() error {
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
jsonMap := map[string]interface{}{}
return getJsonData(endpointUrl, &jsonMap)
}
func (c *CouchbaseFleet) LaunchCouchbaseServer() error {
if err := c.VerifyFleetAPIAvailable(); err != nil {
msg := "Unable to connect to Fleet API, see http://bit.ly/1AC1iRX " +
"for instructions on how to fix this"
return fmt.Errorf(msg)
}
if err := c.verifyEnoughMachinesAvailable(); err != nil {
return err
}
// create an etcd client
// this need to check:
// no etcd key for /couchbase.com
// what else?
if err := c.verifyCleanSlate(); err != nil {
return err
}
if err := c.setUserNamePassEtcd(); err != nil {
return err
}
nodeFleetUnitJson, err := c.generateNodeFleetUnitJson()
if err != nil {
return err
}
for i := 1; i < c.NumNodes+1; i++ {
if err := launchFleetUnitN(
i,
UNIT_NAME_NODE,
nodeFleetUnitJson,
); err != nil {
return err
}
sidekickFleetUnitJson, err := c.generateSidekickFleetUnitJson(fmt.Sprintf("%v", i))
if err != nil {
return err
}
if err := launchFleetUnitN(
i,
UNIT_NAME_SIDEKICK,
sidekickFleetUnitJson,
); err != nil {
return err
}
}
if err := c.WaitForFleetLaunch(); err != nil {
log.Printf("Error waiting for couchbase cluster launch: %v", err)
return err
}
return nil
}
// Call Fleet API and tell it to stop units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) StopUnits(allUnits bool) error {
// set the /couchbase.com/remove-rebalance-disabled flag in etcd since
// otherwise, it will try to remove and rebalance the node, which is not
// what we want when stopping all units.
// set the ttl to be 5 minutes, since there's nothing in place yet to
// block until all the units have stopped
// (TODO: this should get added .. it waits for all units to stop, and then
// it removes the /couchbase.com/remove-rebalance-disabled flag)
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitStopper := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
log.Printf("Stop unit %v via PUT %v", unit.Name, endpointUrl)
return PUT(endpointUrl, `{"desiredState": "inactive"}`)
}
return c.ManipulateUnits(unitStopper, allUnits)
}
// Call Fleet API and tell it to destroy units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) DestroyUnits(allUnits bool) error {
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitDestroyer := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
return DELETE(endpointUrl)
}
return c.ManipulateUnits(unitDestroyer, allUnits)
}
type UnitManipulator func(unit *schema.Unit) error
func (c CouchbaseFleet) ManipulateUnits(unitManipulator UnitManipulator, manipulateAllUnits bool) error {
// find all the units
allUnits, err := c.findAllFleetUnits()
if err != nil {
return err
}
var units []*schema.Unit
if manipulateAllUnits {
units = allUnits
} else {
// filter the ones out that have the name pattern we care about (couchbase_node)
unitNamePatterns := []string{UNIT_NAME_NODE, UNIT_NAME_SIDEKICK}
units = c.filterFleetUnits(allUnits, unitNamePatterns)
}
for _, unit := range units {
if err := unitManipulator(unit); err != nil {
return err
}
}
return nil
}
func (c CouchbaseFleet) findAllFleetUnits() (units []*schema.Unit, err error) {
endpointUrl := ""
maxAttempts := 10000
sleepSeconds := 0
nextPageToken := ""
log.Printf("findAllFleetUnits()")
worker := func() (finished bool, err error) {
// append a next page token to url if needed
if len(nextPageToken) > 0 {
endpointUrl = fmt.Sprintf("%v/units?nextPageToken=%v", FLEET_API_ENDPOINT, nextPageToken)
} else {
endpointUrl = fmt.Sprintf("%v/units", FLEET_API_ENDPOINT)
}
log.Printf("Getting units from %v", endpointUrl)
unitPage := schema.UnitPage{}
if err := getJsonData(endpointUrl, &unitPage); err != nil {
return true, err
}
// add all units to return value
for _, unit := range unitPage.Units {
units = append(units, unit)
}
// if no more pages, we are finished
areWeFinished := len(unitPage.NextPageToken) == 0
return areWeFinished, nil
}
sleeper := func(numAttempts int) (bool, int) {
if numAttempts > maxAttempts {
return false, -1
}
return true, sleepSeconds
}
if err := RetryLoop(worker, sleeper); err != nil {
return nil, err
}
return units, nil
}
func (c CouchbaseFleet) filterFleetUnits(units []*schema.Unit, filters []string) (filteredUnits []*schema.Unit) {
stringContainsAny := func(s string, filters []string) bool {
for _, filter := range filters {
if strings.Contains(s, filter) {
return true
}
}
return false
}
for _, unit := range units {
if stringContainsAny(unit.Name, filters) {
filteredUnits = append(filteredUnits, unit)
}
}
return filteredUnits
}
func (c CouchbaseFleet) GenerateUnits(outputDir string) error {
// generate node unit
nodeFleetUnit, err := c.generateNodeFleetUnitFile()
if err != nil {
return err
}
filename := fmt.Sprintf("%v@.service", UNIT_NAME_NODE)
path := filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(nodeFleetUnit), 0644); err != nil {
return err
}
| // generate sidekick unit
sidekickFleetUnit, err := c.generateSidekickFleetUnitFile("%i")
if err != nil {
return err
}
filename = fmt.Sprintf("%v@.service", UNIT_NAME_SIDEKICK)
path = filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(sidekickFleetUnit), 0644); err != nil {
return err
}
return nil
}
func (c CouchbaseFleet) WaitForFleetLaunch() error {
// wait until X nodes are up in cluster
log.Printf("Waiting for cluster to be up ..")
WaitUntilNumNodesRunning(c.NumNodes, c.EtcdServers)
// wait until no rebalance running
cb := NewCouchbaseCluster(c.EtcdServers)
if err := cb.LoadAdminCredsFromEtcd(); err != nil {
return err
}
liveNodeIp, err := cb.FindLiveNode()
if err != nil {
return err
}
// dirty hack to solve problem: the cluster might have
// 2 nodes which just finished rebalancing, and a third node
// that joins and triggers another rebalance. thus, it will briefly
// go into "no rebalances happening" state, followed by a rebalance.
// if we see the "no rebalances happening state", we'll be tricked and
// think we're done when we're really not.
// workaround: check twice, and sleep in between the check
for i := 0; i < c.NumNodes; i++ {
if err := cb.WaitUntilNoRebalanceRunning(liveNodeIp, 30); err != nil {
return err
}
log.Printf("No rebalance running, sleeping 15s. (%v/%v)", i+1, c.NumNodes)
<-time.After(time.Second * 15)
}
log.Println("No rebalance running after several checks")
// let user know its up
log.Printf("Cluster is up!")
return nil
}
func (c *CouchbaseFleet) ExtractDocOptArgs(arguments map[string]interface{}) error {
userpass, err := ExtractUserPass(arguments)
if err != nil {
return err
}
numnodes, err := ExtractNumNodes(arguments)
if err != nil {
return err
}
cbVersion, err := ExtractCbVersion(arguments)
if err != nil {
return err
}
c.UserPass = userpass
c.NumNodes = numnodes
c.CbVersion = cbVersion
c.ContainerTag = ExtractDockerTagOrLatest(arguments)
c.SkipCleanSlateCheck = ExtractSkipCheckCleanState(arguments)
return nil
}
// call fleetctl list-machines and verify that the number of nodes
// the user asked to kick off is LTE number of machines on cluster
func (c CouchbaseFleet) verifyEnoughMachinesAvailable() error {
log.Printf("verifyEnoughMachinesAvailable()")
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
// {"machines":[{"id":"a91c394439734375aa256d7da1410132","primaryIP":"172.17.8.101"}]}
jsonMap := map[string]interface{}{}
if err := getJsonData(endpointUrl, &jsonMap); err != nil {
log.Printf("getJsonData error: %v", err)
return err
}
machineListRaw := jsonMap["machines"]
machineList, ok := machineListRaw.([]interface{})
if !ok {
return fmt.Errorf("Unexpected value for machines: %v", jsonMap)
}
if len(machineList) < c.NumNodes {
return fmt.Errorf("User requested %v nodes, only %v available", c.NumNodes, len(machineList))
}
log.Printf("/verifyEnoughMachinesAvailable()")
return nil
}
// Make sure that /couchbase.com/couchbase-node-state is empty
func (c CouchbaseFleet) verifyCleanSlate() error {
if c.SkipCleanSlateCheck {
return nil
}
key := path.Join(KEY_NODE_STATE)
_, err := c.etcdClient.Get(key, false, false)
// if that key exists, there is residue and we should abort
if err == nil {
return fmt.Errorf("Found residue -- key: %v in etcd. You should destroy the cluster first, then try again.", KEY_NODE_STATE)
}
// if we get an error with "key not found", then we are starting
// with a clean slate
if strings.Contains(err.Error(), "Key not found") {
return nil
}
// if we got a different error rather than "Key not found", treat that as
// an error as well.
return fmt.Errorf("Unexpected error trying to get key: %v: %v", KEY_NODE_STATE, err)
}
func (c CouchbaseFleet) setUserNamePassEtcd() error {
_, err := c.etcdClient.Set(KEY_USER_PASS, c.UserPass, 0)
return err
}
func (c CouchbaseFleet) generateNodeFleetUnitJson() (string, error) {
unitFile, err := c.generateNodeFleetUnitFile()
if err != nil {
return "", err
}
log.Printf("Couchbase node fleet unit: %v", unitFile)
// convert from text -> json
jsonBytes, err := unitFileToJson(unitFile)
if err != nil {
return "", err
}
return string(jsonBytes), err
}
func (c CouchbaseFleet) generateSidekickFleetUnitJson(unitNumber string) (string, error) {
unitFile, err := c.generateSidekickFleetUnitFile(unitNumber)
if err != nil {
return "", err
}
log.Printf("Couchbase sidekick fleet unit: %v", unitFile)
jsonBytes, err := unitFileToJson(unitFile)
if err != nil {
return "", err
}
return string(jsonBytes), err
}
func unitFileToJson(unitFileContent string) ([]byte, error) {
// deserialize to units
opts, err := unit.Deserialize(strings.NewReader(unitFileContent))
if err != nil {
return nil, err
}
fleetUnit := struct {
Options []*unit.UnitOption `json:"options"`
DesiredState string `json:"desiredState"`
}{
Options: opts,
DesiredState: "launched",
}
bytes, err := json.Marshal(fleetUnit)
return bytes, err
}
func (c CouchbaseFleet) generateNodeFleetUnitFile() (string, error) {
assetName := "data/couchbase_node@.service.template"
content, err := Asset(assetName)
if err != nil {
return "", fmt.Errorf("could not find asset: %v. err: %v", assetName, err)
}
params := struct {
CB_VERSION string
CONTAINER_TAG string
}{
CB_VERSION: c.CbVersion,
CONTAINER_TAG: c.ContainerTag,
}
log.Printf("Generating node from %v with params: %+v", assetName, params)
return generateUnitFileFromTemplate(content, params)
}
func (c CouchbaseFleet) generateSidekickFleetUnitFile(unitNumber string) (string, error) {
assetName := "data/couchbase_sidekick@.service.template"
content, err := Asset(assetName)
if err != nil {
return "", fmt.Errorf("could not find asset: %v. err: %v", assetName, err)
}
params := struct {
CB_VERSION string
CONTAINER_TAG string
UNIT_NUMBER string
}{
CB_VERSION: c.CbVersion,
CONTAINER_TAG: c.ContainerTag,
UNIT_NUMBER: unitNumber,
}
log.Printf("Generating sidekick from %v with params: %+v", assetName, params)
return generateUnitFileFromTemplate(content, params)
}
func generateUnitFileFromTemplate(templateContent []byte, params interface{}) (string, error) {
// run through go template engine
tmpl, err := template.New("Template").Parse(string(templateContent))
if err != nil {
return "", err
}
out := &bytes.Buffer{}
// execute template and write to dest
err = tmpl.Execute(out, params)
if err != nil {
return "", err
}
return out.String(), nil
}
func launchFleetUnitN(unitNumber int, unitName, fleetUnitJson string) error {
log.Printf("Launch fleet unit %v (%v)", unitName, unitNumber)
endpointUrl := fmt.Sprintf("%v/units/%v@%v.service", FLEET_API_ENDPOINT, unitName, unitNumber)
return PUT(endpointUrl, fleetUnitJson)
}
// Launch a fleet unit file that is stored in the data dir (via go-bindata)
func launchFleetUnitFile(unitName, unitFilePath string) error {
log.Printf("Launch fleet unit file (%v)", unitName)
content, err := Asset(unitFilePath)
if err != nil {
return fmt.Errorf("could not find asset: %v. err: %v", unitFilePath, err)
}
// convert from text -> json
jsonBytes, err := unitFileToJson(string(content))
if err != nil {
return err
}
endpointUrl := fmt.Sprintf("%v/units/%v.service", FLEET_API_ENDPOINT, unitName)
return PUT(endpointUrl, string(jsonBytes))
}
func DELETE(endpointUrl string) error {
client := &http.Client{}
req, err := http.NewRequest("DELETE", endpointUrl, nil)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("DELETE: Unexpected status code in response")
}
return nil
}
func PUT(endpointUrl, json string) error {
client := &http.Client{}
req, err := http.NewRequest("PUT", endpointUrl, bytes.NewReader([]byte(json)))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
bodyStr, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
log.Printf("response body: %v", string(bodyStr))
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("PUT: Unexpected status code in response")
}
return nil
} | random_line_split | |
aplicacao.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#####################################################
# Camada Física da Computação
# Henry Rocha
# 11/08/2019
# Exemplo de uso do ArgParse e do TkInter.
#####################################################
import sys
import time
import argparse
from enlace import *
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# Serial Com Port
# para saber a sua porta, execute no terminal :
# python -m serial.tools.list_ports
serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de)
#serialName = "COM11" # Windows(variacao de)
class Client():
def __init__(self, serialName, debug=False):
self.com = enlace(serialName)
self.com.enable()
self.debug = debug
self.fileName = None
self.results = []
if debug:
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(self.com.fisica.name))
self.run()
def run(self):
self.shouldStop = False
while not self.shouldStop:
self.configure()
if self.fileName != None:
self.emit()
self.getResults()
self.fileName = None
meanImageSize = 0
meanDeltaTime = 0
meanTransferRate = 0
for result in self.results:
meanImageSize += result[0]
meanDeltaTime += result[1]
meanTransferRate += result[2]
meanImageSize = meanImageSize/len(self.results)
meanDeltaTime = meanDeltaTime/len(self.results)
meanTransferRate = meanTransferRate/len(self.results)
print("[LOG] Tamanho Médio de Imagem.........{:.3f} b".format(meanImageSize))
print("[LOG] Tempo Médio de Transferência....{:.3f} s".format(meanDeltaTime))
print("[LOG] Taxa Média de transferência.....{:.3f} b/s".format(meanTransferRate))
def configure(self):
if self.debug:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
self.fileName = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(self.fileName) is tuple or self.fileName == "":
self.shouldStop = True
self.fileName = None
return None
with open(self.fileName, 'rb') as image:
if self.debug:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
self.imageByteArray = bytearray(imageFile)
self.imageSize = bytes(str(len(self.imageByteArray)), 'UTF-8')
if self.debug:
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(self.imageSize)))
self.textBuffer = self.imageSize + bytearray(b"start") + self.imageByteArray
def emit(self):
if self.debug:
print("[LOG] Tentado transmitir........{} bytes.".format(len(self.textBuffer)))
self.startTime = time.time()
self.com.sendData(self.textBuffer)
# Esperando o fim da transmissão do arquivo.
while(self.com.tx.getIsBussy()):
pass
txSize = self.com.tx.getStatus()
if self.debug:
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
rxBuffer = self.com.getData(len(self.imageSize))[0]
self.endTime = time.time()
if self.debug:
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(self.imageSize):
if self.debug:
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
self.com.disable()
if self.debug:
print("[LOG] Comunicação encerrada.")
self.shouldStop = True
if self.debug:
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
def getResults(self):
deltaTime = self.endTime - self.startTime
transferRate = int(self.imageSize) / deltaTime
if self.debug:
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
self.results.append([int(self.imageSize), deltaTime, transferRate])
def client(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace(serialName) # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
shouldClose = False
while not shouldClose:
# Verifica se o arquivo a ser transferido foi passado como
# argumento ou se deve ser escolhido pelo GUI.
if args.file is None:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
filePath = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(filePath) is tuple or filePath == "":
shouldClose = True
sys.exit("[ERRO] Arquivo não escolhido. Abortando... Usar CTRL+C")
else:
print("\n[LOG] Arquivo fornecido como argumento.")
filePath = args.file
with open(filePath, "rb") as image:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
imageByteArray = bytearray(imageFile)
imageSize = bytes(str(len(imageByteArray)), 'UTF-8')
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(imageSize)))
# Criando o buffer a ser transmitido.
txBuffer = imageSize + bytearray(b"start") + imageByteArray
# Envia dado.
print("[LOG] Tentado transmitir........{} bytes.".format(len(txBuffer)))
startTime = time.time()
com.sendData(txBuffer)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
rxBuffer, nRx = com.getData(len(imageSize))
endTime = time.time()
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(imageSize):
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
com.disable()
print("[LOG] Comunicação encerrada.")
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
# Calculando o tempo e a taxa de transferência.
deltaTime = endTime - startTime
transferRate = int(imageSize) / deltaTime
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
# Encerra a comunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
def server(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace("/dev/ttyACM1") # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão |
while True:
# Faz a recepção dos dados
print("\n[LOG] Recebendo dados...")
keywordRecognized = False
receiveBuffer = bytearray()
# Espera até receber uma keyword.
while not keywordRecognized:
rxBuffer, nRx = com.getData(1)
receiveBuffer += rxBuffer
if b"start" in receiveBuffer:
keywordRecognized = True
# Cortando a keyword do buffer recebido.
imageSize = receiveBuffer[:-5]
print("[LOG] Começou a receber a arquivo. Tamanho do arquivo a ser recebido: {} bytes.".format(int(imageSize)))
# Agora recebemos a arquivo em si.
rxBuffer, nRx = com.getData(int(imageSize))
# Salvando a arquivo recebida.
with open("receivedImage.png", "wb") as receivedImage:
receivedImage.write(rxBuffer)
# LOG
print("[LOG] Lido....{} bytes ".format(nRx))
# Retornando o tamanho da arquivo para mostrar que ela foi recebida.
print("[LOG] Retornando o tamanho do arquivo para mostrar que ele foi recebido.")
print("[LOG] Tentado transmitir.......{} bytes.".format(len(imageSize)))
com.sendData(imageSize)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido..............{} bytes.".format(int(txSize)))
# Encerra a cmunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
if __name__ == "__main__":
argParser = argparse.ArgumentParser(description="Programa que manda e recebe um arquivo usando o Arduino.")
argParser.add_argument("type", help="Tipo de conexão [client, server].", type=str)
argParser.add_argument("-d", "--debug", help="Deve debugar o processo ou não.", action="store_true")
args = argParser.parse_args()
if args.type == "client":
client = Client(serialName, args.debug)
elif args.type == "server":
server(args)
else:
print("[ERRO] Tipo de conexão inválido.") | com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name)) | random_line_split |
aplicacao.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#####################################################
# Camada Física da Computação
# Henry Rocha
# 11/08/2019
# Exemplo de uso do ArgParse e do TkInter.
#####################################################
import sys
import time
import argparse
from enlace import *
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# Serial Com Port
# para saber a sua porta, execute no terminal :
# python -m serial.tools.list_ports
serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de)
#serialName = "COM11" # Windows(variacao de)
class Cli |
def __init__(self, serialName, debug=False):
self.com = enlace(serialName)
self.com.enable()
self.debug = debug
self.fileName = None
self.results = []
if debug:
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(self.com.fisica.name))
self.run()
def run(self):
self.shouldStop = False
while not self.shouldStop:
self.configure()
if self.fileName != None:
self.emit()
self.getResults()
self.fileName = None
meanImageSize = 0
meanDeltaTime = 0
meanTransferRate = 0
for result in self.results:
meanImageSize += result[0]
meanDeltaTime += result[1]
meanTransferRate += result[2]
meanImageSize = meanImageSize/len(self.results)
meanDeltaTime = meanDeltaTime/len(self.results)
meanTransferRate = meanTransferRate/len(self.results)
print("[LOG] Tamanho Médio de Imagem.........{:.3f} b".format(meanImageSize))
print("[LOG] Tempo Médio de Transferência....{:.3f} s".format(meanDeltaTime))
print("[LOG] Taxa Média de transferência.....{:.3f} b/s".format(meanTransferRate))
def configure(self):
if self.debug:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
self.fileName = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(self.fileName) is tuple or self.fileName == "":
self.shouldStop = True
self.fileName = None
return None
with open(self.fileName, 'rb') as image:
if self.debug:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
self.imageByteArray = bytearray(imageFile)
self.imageSize = bytes(str(len(self.imageByteArray)), 'UTF-8')
if self.debug:
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(self.imageSize)))
self.textBuffer = self.imageSize + bytearray(b"start") + self.imageByteArray
def emit(self):
if self.debug:
print("[LOG] Tentado transmitir........{} bytes.".format(len(self.textBuffer)))
self.startTime = time.time()
self.com.sendData(self.textBuffer)
# Esperando o fim da transmissão do arquivo.
while(self.com.tx.getIsBussy()):
pass
txSize = self.com.tx.getStatus()
if self.debug:
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
rxBuffer = self.com.getData(len(self.imageSize))[0]
self.endTime = time.time()
if self.debug:
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(self.imageSize):
if self.debug:
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
self.com.disable()
if self.debug:
print("[LOG] Comunicação encerrada.")
self.shouldStop = True
if self.debug:
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
def getResults(self):
deltaTime = self.endTime - self.startTime
transferRate = int(self.imageSize) / deltaTime
if self.debug:
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
self.results.append([int(self.imageSize), deltaTime, transferRate])
def client(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace(serialName) # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
shouldClose = False
while not shouldClose:
# Verifica se o arquivo a ser transferido foi passado como
# argumento ou se deve ser escolhido pelo GUI.
if args.file is None:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
filePath = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(filePath) is tuple or filePath == "":
shouldClose = True
sys.exit("[ERRO] Arquivo não escolhido. Abortando... Usar CTRL+C")
else:
print("\n[LOG] Arquivo fornecido como argumento.")
filePath = args.file
with open(filePath, "rb") as image:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
imageByteArray = bytearray(imageFile)
imageSize = bytes(str(len(imageByteArray)), 'UTF-8')
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(imageSize)))
# Criando o buffer a ser transmitido.
txBuffer = imageSize + bytearray(b"start") + imageByteArray
# Envia dado.
print("[LOG] Tentado transmitir........{} bytes.".format(len(txBuffer)))
startTime = time.time()
com.sendData(txBuffer)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
rxBuffer, nRx = com.getData(len(imageSize))
endTime = time.time()
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(imageSize):
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
com.disable()
print("[LOG] Comunicação encerrada.")
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
# Calculando o tempo e a taxa de transferência.
deltaTime = endTime - startTime
transferRate = int(imageSize) / deltaTime
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
# Encerra a comunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
def server(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace("/dev/ttyACM1") # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
while True:
# Faz a recepção dos dados
print("\n[LOG] Recebendo dados...")
keywordRecognized = False
receiveBuffer = bytearray()
# Espera até receber uma keyword.
while not keywordRecognized:
rxBuffer, nRx = com.getData(1)
receiveBuffer += rxBuffer
if b"start" in receiveBuffer:
keywordRecognized = True
# Cortando a keyword do buffer recebido.
imageSize = receiveBuffer[:-5]
print("[LOG] Começou a receber a arquivo. Tamanho do arquivo a ser recebido: {} bytes.".format(int(imageSize)))
# Agora recebemos a arquivo em si.
rxBuffer, nRx = com.getData(int(imageSize))
# Salvando a arquivo recebida.
with open("receivedImage.png", "wb") as receivedImage:
receivedImage.write(rxBuffer)
# LOG
print("[LOG] Lido....{} bytes ".format(nRx))
# Retornando o tamanho da arquivo para mostrar que ela foi recebida.
print("[LOG] Retornando o tamanho do arquivo para mostrar que ele foi recebido.")
print("[LOG] Tentado transmitir.......{} bytes.".format(len(imageSize)))
com.sendData(imageSize)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido..............{} bytes.".format(int(txSize)))
# Encerra a cmunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
if __name__ == "__main__":
argParser = argparse.ArgumentParser(description="Programa que manda e recebe um arquivo usando o Arduino.")
argParser.add_argument("type", help="Tipo de conexão [client, server].", type=str)
argParser.add_argument("-d", "--debug", help="Deve debugar o processo ou não.", action="store_true")
args = argParser.parse_args()
if args.type == "client":
client = Client(serialName, args.debug)
elif args.type == "server":
server(args)
else:
print("[ERRO] Tipo de conexão inválido.")
| ent(): | identifier_name |
aplicacao.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#####################################################
# Camada Física da Computação
# Henry Rocha
# 11/08/2019
# Exemplo de uso do ArgParse e do TkInter.
#####################################################
import sys
import time
import argparse
from enlace import *
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# Serial Com Port
# para saber a sua porta, execute no terminal :
# python -m serial.tools.list_ports
serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de)
#serialName = "COM11" # Windows(variacao de)
class Client():
def __init__(self, serialName, debug=False):
self.com = enlace(serialName)
self.com.enable()
self.debug = debug
self.fileName = None
self.results = []
if debug:
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(self.com.fisica.name))
self.run()
def run(self):
self.shouldStop = False
while not self.shouldStop:
self.configure()
if self.fileName != None:
self.emit()
self.getResults()
self.fileName = None
meanImageSize = 0
meanDeltaTime = 0
meanTransferRate = 0
for result in self.results:
meanImageSize += result[0]
meanDeltaTime += result[1]
meanTransferRate += result[2]
meanImageSize = meanImageSize/len(self.results)
meanDeltaTime = meanDeltaTime/len(self.results)
meanTransferRate = meanTransferRate/len(self.results)
print("[LOG] Tamanho Médio de Imagem.........{:.3f} b".format(meanImageSize))
print("[LOG] Tempo Médio de Transferência....{:.3f} s".format(meanDeltaTime))
print("[LOG] Taxa Média de transferência.....{:.3f} b/s".format(meanTransferRate))
def configure(self):
if self.debug:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
self.fileName = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(self.fileName) is tuple or self.fileName == "":
self.shouldStop = True
self.fileName = None
return None
with open(self.fileName, 'rb') as image:
if self.debug:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
self.imageByteArray = bytearray(imageFile)
self.imageSize = bytes(str(len(self.imageByteArray)), 'UTF-8')
if self.debug:
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(self.imageSize)))
self.textBuffer = self.imageSize + bytearray(b"start") + self.imageByteArray
def emit(self):
if self.debug:
print("[LOG] Tentado transmitir........{} bytes.".format(len(self.textBuffer)))
self.startTime = time.time()
self.com.sendData(self.textBuffer)
# Esperando o fim da transmissão do arquivo.
while(self.com.tx.getIsBussy()):
pass
txSize = self.com.tx.getStatus()
if self.debug:
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
rxBuffer = self.com.getData(len(self.imageSize))[0]
self.endTime = time.time()
if self.debug:
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(self.imageSize):
if self.debug:
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
self.com.disable()
if self.debug:
print("[LOG] Comunicação encerrada.")
self.shouldStop = True
if self.debug:
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
def getResults(self):
deltaTime = self.endTime - self.startTime
transferRate = int(self.imageSize) / deltaTime
if self.debug:
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
self.results.append([int(self.imageSize), deltaTime, transferRate])
def client(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace(serialN | enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace("/dev/ttyACM1") # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
while True:
# Faz a recepção dos dados
print("\n[LOG] Recebendo dados...")
keywordRecognized = False
receiveBuffer = bytearray()
# Espera até receber uma keyword.
while not keywordRecognized:
rxBuffer, nRx = com.getData(1)
receiveBuffer += rxBuffer
if b"start" in receiveBuffer:
keywordRecognized = True
# Cortando a keyword do buffer recebido.
imageSize = receiveBuffer[:-5]
print("[LOG] Começou a receber a arquivo. Tamanho do arquivo a ser recebido: {} bytes.".format(int(imageSize)))
# Agora recebemos a arquivo em si.
rxBuffer, nRx = com.getData(int(imageSize))
# Salvando a arquivo recebida.
with open("receivedImage.png", "wb") as receivedImage:
receivedImage.write(rxBuffer)
# LOG
print("[LOG] Lido....{} bytes ".format(nRx))
# Retornando o tamanho da arquivo para mostrar que ela foi recebida.
print("[LOG] Retornando o tamanho do arquivo para mostrar que ele foi recebido.")
print("[LOG] Tentado transmitir.......{} bytes.".format(len(imageSize)))
com.sendData(imageSize)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido..............{} bytes.".format(int(txSize)))
# Encerra a cmunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
if __name__ == "__main__":
argParser = argparse.ArgumentParser(description="Programa que manda e recebe um arquivo usando o Arduino.")
argParser.add_argument("type", help="Tipo de conexão [client, server].", type=str)
argParser.add_argument("-d", "--debug", help="Deve debugar o processo ou não.", action="store_true")
args = argParser.parse_args()
if args.type == "client":
client = Client(serialName, args.debug)
elif args.type == "server":
server(args)
else:
print("[ERRO] Tipo de conexão inválido.")
| ame) # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
shouldClose = False
while not shouldClose:
# Verifica se o arquivo a ser transferido foi passado como
# argumento ou se deve ser escolhido pelo GUI.
if args.file is None:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
filePath = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(filePath) is tuple or filePath == "":
shouldClose = True
sys.exit("[ERRO] Arquivo não escolhido. Abortando... Usar CTRL+C")
else:
print("\n[LOG] Arquivo fornecido como argumento.")
filePath = args.file
with open(filePath, "rb") as image:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
imageByteArray = bytearray(imageFile)
imageSize = bytes(str(len(imageByteArray)), 'UTF-8')
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(imageSize)))
# Criando o buffer a ser transmitido.
txBuffer = imageSize + bytearray(b"start") + imageByteArray
# Envia dado.
print("[LOG] Tentado transmitir........{} bytes.".format(len(txBuffer)))
startTime = time.time()
com.sendData(txBuffer)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
rxBuffer, nRx = com.getData(len(imageSize))
endTime = time.time()
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(imageSize):
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
com.disable()
print("[LOG] Comunicação encerrada.")
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
# Calculando o tempo e a taxa de transferência.
deltaTime = endTime - startTime
transferRate = int(imageSize) / deltaTime
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
# Encerra a comunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
def server(args):
# Inicializa | identifier_body |
aplicacao.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#####################################################
# Camada Física da Computação
# Henry Rocha
# 11/08/2019
# Exemplo de uso do ArgParse e do TkInter.
#####################################################
import sys
import time
import argparse
from enlace import *
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# Serial Com Port
# para saber a sua porta, execute no terminal :
# python -m serial.tools.list_ports
serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de)
#serialName = "COM11" # Windows(variacao de)
class Client():
def __init__(self, serialName, debug=False):
self.com = enlace(serialName)
self.com.enable()
self.debug = debug
self.fileName = None
self.results = []
if debug:
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(self.com.fisica.name))
self.run()
def run(self):
self.shouldStop = False
while not self.shouldStop:
self.configure()
if self.fileName != None:
self.emit()
self.getResults()
self.fileName = None
meanImageSize = 0
meanDeltaTime = 0
meanTransferRate = 0
for result in self.results:
meanImageSize += result[0]
meanDeltaTime += result[1]
meanTransferRate += result[2]
meanImageSize = meanImageSize/len(self.results)
meanDeltaTime = meanDeltaTime/len(self.results)
meanTransferRate = meanTransferRate/len(self.results)
print("[LOG] Tamanho Médio de Imagem.........{:.3f} b".format(meanImageSize))
print("[LOG] Tempo Médio de Transferência....{:.3f} s".format(meanDeltaTime))
print("[LOG] Taxa Média de transferência.....{:.3f} b/s".format(meanTransferRate))
def configure(self):
if self.debug:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
self.fileName = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(self.fileName) is tuple or self.fileName == "":
self.shouldStop = True
self.fileName = None
return None
with open(self.fileName, 'rb') as image:
if self.debug:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
self.imageByteArray = bytearray(imageFile)
self.imageSize = bytes(str(len(self.imageByteArray)), 'UTF-8')
if self.debug:
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(self.imageSize)))
self.textBuffer = self.imageSize + bytearray(b"start") + self.imageByteArray
def emit(self):
if self.debug:
print("[LOG] Tentado transmitir........{} bytes.".format(len(self.textBuffer)))
self.startTime = time.time()
self.com.sendData(self.textBuffer)
# Esperando o fim da transmissão do arquivo.
while(self.com.tx.getIsBussy()):
pass
txSize = self.com.tx.getStatus()
if self.debug:
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
rxBuffer = self.com.getData(len(self.imageSize))[0]
self.endTime = time.time()
if self.debug:
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(self.imageSize):
if self.debug:
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
self.com.disable()
if self.debug:
print("[LOG] Comunicação encerrada.")
self.shouldStop = True
if self.debug:
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
def getResults(self):
deltaTime = self.endTime - self.startTime
transferRate = int(self.imageSize) / deltaTime
if self.debug:
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
self.results.append([int(self.imageSize), deltaTime, transferRate])
def client(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace(serialName) # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
shouldClose = False
while not shouldClose:
# Verifica se o arquivo a ser transferido foi passado como
# argumento ou se deve ser escolhido pelo GUI.
if args.file is None:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
filePath = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(filePath) is tuple or filePath == "":
shouldClose = True
sys.exit("[ERRO] Arquivo não escolhido. Abortando... Usar CTRL+C")
else:
print("\n[LOG] Arquivo fornecido como argumento.")
filePath = args.file
with open(filePath, "rb") as image:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
imageByteArray = bytearray(imageFile)
imageSize = bytes(str(len(imageByteArray)), 'UTF-8')
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(imageSize)))
# Criando o buffer a ser transmitido.
txBuffer = imageSize + bytearray(b"start") + imageByteArray
# Envia dado.
print("[LOG] Tentado transmitir........{} bytes.".format(len(txBuffer)))
startTime = time.time()
com.sendData(txBuffer)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
| ualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
rxBuffer, nRx = com.getData(len(imageSize))
endTime = time.time()
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(imageSize):
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
com.disable()
print("[LOG] Comunicação encerrada.")
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
# Calculando o tempo e a taxa de transferência.
deltaTime = endTime - startTime
transferRate = int(imageSize) / deltaTime
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
# Encerra a comunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
def server(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace("/dev/ttyACM1") # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
while True:
# Faz a recepção dos dados
print("\n[LOG] Recebendo dados...")
keywordRecognized = False
receiveBuffer = bytearray()
# Espera até receber uma keyword.
while not keywordRecognized:
rxBuffer, nRx = com.getData(1)
receiveBuffer += rxBuffer
if b"start" in receiveBuffer:
keywordRecognized = True
# Cortando a keyword do buffer recebido.
imageSize = receiveBuffer[:-5]
print("[LOG] Começou a receber a arquivo. Tamanho do arquivo a ser recebido: {} bytes.".format(int(imageSize)))
# Agora recebemos a arquivo em si.
rxBuffer, nRx = com.getData(int(imageSize))
# Salvando a arquivo recebida.
with open("receivedImage.png", "wb") as receivedImage:
receivedImage.write(rxBuffer)
# LOG
print("[LOG] Lido....{} bytes ".format(nRx))
# Retornando o tamanho da arquivo para mostrar que ela foi recebida.
print("[LOG] Retornando o tamanho do arquivo para mostrar que ele foi recebido.")
print("[LOG] Tentado transmitir.......{} bytes.".format(len(imageSize)))
com.sendData(imageSize)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido..............{} bytes.".format(int(txSize)))
# Encerra a cmunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
if __name__ == "__main__":
argParser = argparse.ArgumentParser(description="Programa que manda e recebe um arquivo usando o Arduino.")
argParser.add_argument("type", help="Tipo de conexão [client, server].", type=str)
argParser.add_argument("-d", "--debug", help="Deve debugar o processo ou não.", action="store_true")
args = argParser.parse_args()
if args.type == "client":
client = Client(serialName, args.debug)
elif args.type == "server":
server(args)
else:
print("[ERRO] Tipo de conexão inválido.")
| # At | conditional_block |
lib.rs | //! A slab allocator implementation for small objects
//! (< architecture page size).
//!
//! The organization is as follows (top-down):
//!
//! * A `ZoneAllocator` manages many `SCAllocator` and can
//! satisfy requests for different allocation sizes.
//! * A `SCAllocator` allocates objects of exactly one size.
//! It holds its data in a ObjectPageList.
//! * A `ObjectPage` contains allocated objects and associated meta-data.
//! * A `PageProvider` is provided by the client and used by the
//! SCAllocator to allocate ObjectPage.
//!
#![allow(unused_features, dead_code, unused_variables)]
#![cfg_attr(feature = "unstable", feature(const_fn))]
#![cfg_attr(test, feature(prelude_import, test, raw, libc))]
#![no_std]
#![crate_name = "slabmalloc"]
#![crate_type = "lib"]
extern crate spin;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate env_logger;
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate libc;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
extern crate test;
#[cfg(test)]
mod tests;
use core::alloc::{GlobalAlloc, Layout};
use core::fmt;
use core::mem;
use core::ptr;
use spin::Mutex;
#[cfg(target_arch = "x86_64")]
const CACHE_LINE_SIZE: usize = 64;
#[cfg(target_arch = "x86_64")]
const BASE_PAGE_SIZE: usize = 4096;
#[cfg(target_arch = "x86_64")]
type VAddr = usize;
const MAX_SIZE_CLASSES: usize = 10;
pub struct SafeZoneAllocator(Mutex<ZoneAllocator<'static>>);
impl SafeZoneAllocator {
#[cfg(feature = "unstable")]
pub const fn new(provider: &'static Mutex<PageProvider>) -> SafeZoneAllocator {
SafeZoneAllocator(Mutex::new(ZoneAllocator::new(provider)))
}
#[cfg(not(feature = "unstable"))]
pub fn new(provider: &'static Mutex<PageProvider>) -> SafeZoneAllocator {
SafeZoneAllocator(Mutex::new(ZoneAllocator::new(provider)))
}
}
unsafe impl GlobalAlloc for SafeZoneAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align().is_power_of_two());
self.0.lock().allocate(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//let ptr = NonNull::new_unchecked(ptr);
self.0.lock().deallocate(ptr, layout);
}
}
/// The memory backing as used by the SCAllocator.
///
/// A client that wants to use the zone or size class allocators
/// has to provide this interface and stick an implementation of it
/// into every SCAllocator.
pub trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, &'a mut ObjectPage<'a>);
}
/// A zone allocator.
///
/// Has a bunch of size class allocators and can serve
/// allocation requests for many different (MAX_SIZE_CLASSES) object sizes
/// (by selecting the right slab allocator).
pub struct ZoneAllocator<'a> {
pager: &'a Mutex<PageProvider<'a>>,
slabs: [SCAllocator<'a>; MAX_SIZE_CLASSES],
}
impl<'a> ZoneAllocator<'a> {
pub const MAX_ALLOC_SIZE: usize = 4032;
#[cfg(feature = "unstable")]
pub const fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn get_max_size(current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if !ptr.is_null() {
return ptr;
} else {
continue;
}
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) & !(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> |
/// Check if the current `idx` is allocated.
///
/// # Notes
/// In case `idx` is 3 and allocation size of slab is
/// 8. The corresponding object would start at &data + 3 * 8.
fn is_allocated(&mut self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self.bitfield[base_idx] & (1 << bit_idx)) > 0
}
/// Sets the bit number `idx` in the bit-field.
fn set_bit(&mut self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self.bitfield[base_idx] |= 1 << bit_idx;
}
/// Clears bit number `idx` in the bit-field.
fn clear_bit(&mut self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self.bitfield[base_idx] &= !(1 << bit_idx);
}
/// Deallocates a memory object within this page.
fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
debug!(
"ObjectPage deallocating ptr = 0x{:x} with {:?}",
ptr as usize, layout
);
let page_offset = (ptr as usize) & 0xfff;
assert!(page_offset % layout.size() == 0);
let idx = page_offset / layout.size();
assert!(
self.is_allocated(idx),
"ptr = 0x{:x} was not allocated",
ptr as usize
);
self.clear_bit(idx);
}
/// Tries to allocate an object within this page.
///
/// In case the Slab is full, returns None.
fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.first_fit(layout) {
Some((idx, addr)) => {
self.set_bit(idx);
unsafe { mem::transmute::<usize, *mut u8>(addr) }
}
None => ptr::null_mut(),
}
}
/// Checks if we can still allocate more objects within the page.
fn is_full(&self) -> bool {
unsafe {
self.bitfield
.iter()
.filter(|&x| *x != u64::max_value())
.count()
== 0
}
}
/// Checks if the page has currently no allocation.
fn is_empty(&self) -> bool {
unsafe { self.bitfield.iter().filter(|&x| *x > 0x0).count() == 0 }
}
}
#[test]
pub fn check_first_fit() {
let op: ObjectPage = Default::default();
let layout = Layout::from_size_align(8, 8).unwrap();
println!("{:?}", op.first_fit(layout));
}
/// Rawlink is a type like Option<T> but for holding a raw pointer
struct Rawlink<T> {
p: *mut T,
}
impl<T> Default for Rawlink<T> {
fn default() -> Self {
Rawlink { p: ptr::null_mut() }
}
}
impl<T> Rawlink<T> {
/// Like Option::None for Rawlink
fn none() -> Rawlink<T> {
Rawlink { p: ptr::null_mut() }
}
/// Like Option::Some for Rawlink
fn some(n: &mut T) -> Rawlink<T> {
Rawlink { p: n }
}
/// Convert the `Rawlink` into an Option value
///
/// **unsafe** because:
///
/// - Dereference of raw pointer.
/// - Returns reference of arbitrary lifetime.
unsafe fn resolve<'a>(&self) -> Option<&'a T> {
self.p.as_ref()
}
/// Convert the `Rawlink` into an Option value
///
/// **unsafe** because:
///
/// - Dereference of raw pointer.
/// - Returns reference of arbitrary lifetime.
unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
self.p.as_mut()
}
/// Return the `Rawlink` and replace with `Rawlink::none()`
fn take(&mut self) -> Rawlink<T> {
mem::replace(self, Rawlink::none())
}
}
| {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated = !bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if !offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
}
None
} | identifier_body |
lib.rs | //! A slab allocator implementation for small objects
//! (< architecture page size).
//!
//! The organization is as follows (top-down):
//!
//! * A `ZoneAllocator` manages many `SCAllocator` and can
//! satisfy requests for different allocation sizes.
//! * A `SCAllocator` allocates objects of exactly one size.
//! It holds its data in a ObjectPageList.
//! * A `ObjectPage` contains allocated objects and associated meta-data.
//! * A `PageProvider` is provided by the client and used by the
//! SCAllocator to allocate ObjectPage.
//!
#![allow(unused_features, dead_code, unused_variables)]
#![cfg_attr(feature = "unstable", feature(const_fn))]
#![cfg_attr(test, feature(prelude_import, test, raw, libc))]
#![no_std]
#![crate_name = "slabmalloc"]
#![crate_type = "lib"]
extern crate spin;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate env_logger;
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate libc;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
extern crate test;
#[cfg(test)]
mod tests;
use core::alloc::{GlobalAlloc, Layout};
use core::fmt;
use core::mem;
use core::ptr;
use spin::Mutex;
#[cfg(target_arch = "x86_64")]
const CACHE_LINE_SIZE: usize = 64;
#[cfg(target_arch = "x86_64")]
const BASE_PAGE_SIZE: usize = 4096;
#[cfg(target_arch = "x86_64")]
type VAddr = usize;
const MAX_SIZE_CLASSES: usize = 10;
pub struct SafeZoneAllocator(Mutex<ZoneAllocator<'static>>);
impl SafeZoneAllocator {
#[cfg(feature = "unstable")]
pub const fn new(provider: &'static Mutex<PageProvider>) -> SafeZoneAllocator {
SafeZoneAllocator(Mutex::new(ZoneAllocator::new(provider)))
}
#[cfg(not(feature = "unstable"))]
pub fn new(provider: &'static Mutex<PageProvider>) -> SafeZoneAllocator {
SafeZoneAllocator(Mutex::new(ZoneAllocator::new(provider)))
}
}
unsafe impl GlobalAlloc for SafeZoneAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align().is_power_of_two());
self.0.lock().allocate(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//let ptr = NonNull::new_unchecked(ptr);
self.0.lock().deallocate(ptr, layout);
}
}
/// The memory backing as used by the SCAllocator.
///
/// A client that wants to use the zone or size class allocators
/// has to provide this interface and stick an implementation of it
/// into every SCAllocator.
pub trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, &'a mut ObjectPage<'a>);
}
/// A zone allocator.
///
/// Has a bunch of size class allocators and can serve
/// allocation requests for many different (MAX_SIZE_CLASSES) object sizes
/// (by selecting the right slab allocator).
pub struct ZoneAllocator<'a> {
pager: &'a Mutex<PageProvider<'a>>,
slabs: [SCAllocator<'a>; MAX_SIZE_CLASSES],
}
impl<'a> ZoneAllocator<'a> {
pub const MAX_ALLOC_SIZE: usize = 4032;
#[cfg(feature = "unstable")]
pub const fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn | (current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if !ptr.is_null() {
return ptr;
} else {
continue;
}
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) & !(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated = !bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if !offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
}
None
}
/// Check if the current `idx` is allocated.
///
/// # Notes
/// In case `idx` is 3 and allocation size of slab is
/// 8. The corresponding object would start at &data + 3 * 8.
fn is_allocated(&mut self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self.bitfield[base_idx] & (1 << bit_idx)) > 0
}
/// Sets the bit number `idx` in the bit-field.
fn set_bit(&mut self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self.bitfield[base_idx] |= 1 << bit_idx;
}
/// Clears bit number `idx` in the bit-field.
fn clear_bit(&mut self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self.bitfield[base_idx] &= !(1 << bit_idx);
}
/// Deallocates a memory object within this page.
fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
debug!(
"ObjectPage deallocating ptr = 0x{:x} with {:?}",
ptr as usize, layout
);
let page_offset = (ptr as usize) & 0xfff;
assert!(page_offset % layout.size() == 0);
let idx = page_offset / layout.size();
assert!(
self.is_allocated(idx),
"ptr = 0x{:x} was not allocated",
ptr as usize
);
self.clear_bit(idx);
}
/// Tries to allocate an object within this page.
///
/// In case the Slab is full, returns None.
fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.first_fit(layout) {
Some((idx, addr)) => {
self.set_bit(idx);
unsafe { mem::transmute::<usize, *mut u8>(addr) }
}
None => ptr::null_mut(),
}
}
/// Checks if we can still allocate more objects within the page.
fn is_full(&self) -> bool {
unsafe {
self.bitfield
.iter()
.filter(|&x| *x != u64::max_value())
.count()
== 0
}
}
/// Checks if the page has currently no allocation.
fn is_empty(&self) -> bool {
unsafe { self.bitfield.iter().filter(|&x| *x > 0x0).count() == 0 }
}
}
#[test]
pub fn check_first_fit() {
let op: ObjectPage = Default::default();
let layout = Layout::from_size_align(8, 8).unwrap();
println!("{:?}", op.first_fit(layout));
}
/// Rawlink is a type like Option<T> but for holding a raw pointer
struct Rawlink<T> {
p: *mut T,
}
impl<T> Default for Rawlink<T> {
fn default() -> Self {
Rawlink { p: ptr::null_mut() }
}
}
impl<T> Rawlink<T> {
/// Like Option::None for Rawlink
fn none() -> Rawlink<T> {
Rawlink { p: ptr::null_mut() }
}
/// Like Option::Some for Rawlink
fn some(n: &mut T) -> Rawlink<T> {
Rawlink { p: n }
}
/// Convert the `Rawlink` into an Option value
///
/// **unsafe** because:
///
/// - Dereference of raw pointer.
/// - Returns reference of arbitrary lifetime.
unsafe fn resolve<'a>(&self) -> Option<&'a T> {
self.p.as_ref()
}
/// Convert the `Rawlink` into an Option value
///
/// **unsafe** because:
///
/// - Dereference of raw pointer.
/// - Returns reference of arbitrary lifetime.
unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
self.p.as_mut()
}
/// Return the `Rawlink` and replace with `Rawlink::none()`
fn take(&mut self) -> Rawlink<T> {
mem::replace(self, Rawlink::none())
}
}
| get_max_size | identifier_name |
lib.rs | //! A slab allocator implementation for small objects
//! (< architecture page size).
//!
//! The organization is as follows (top-down):
//!
//! * A `ZoneAllocator` manages many `SCAllocator` and can
//! satisfy requests for different allocation sizes.
//! * A `SCAllocator` allocates objects of exactly one size.
//! It holds its data in a ObjectPageList.
//! * A `ObjectPage` contains allocated objects and associated meta-data.
//! * A `PageProvider` is provided by the client and used by the
//! SCAllocator to allocate ObjectPage.
//!
#![allow(unused_features, dead_code, unused_variables)]
#![cfg_attr(feature = "unstable", feature(const_fn))]
#![cfg_attr(test, feature(prelude_import, test, raw, libc))]
#![no_std]
#![crate_name = "slabmalloc"]
#![crate_type = "lib"]
extern crate spin;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate env_logger;
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate libc;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
extern crate test;
#[cfg(test)]
mod tests;
use core::alloc::{GlobalAlloc, Layout};
use core::fmt;
use core::mem;
use core::ptr;
use spin::Mutex;
#[cfg(target_arch = "x86_64")]
const CACHE_LINE_SIZE: usize = 64;
#[cfg(target_arch = "x86_64")]
const BASE_PAGE_SIZE: usize = 4096;
#[cfg(target_arch = "x86_64")]
type VAddr = usize;
const MAX_SIZE_CLASSES: usize = 10;
pub struct SafeZoneAllocator(Mutex<ZoneAllocator<'static>>);
impl SafeZoneAllocator {
#[cfg(feature = "unstable")]
pub const fn new(provider: &'static Mutex<PageProvider>) -> SafeZoneAllocator {
SafeZoneAllocator(Mutex::new(ZoneAllocator::new(provider)))
}
#[cfg(not(feature = "unstable"))]
pub fn new(provider: &'static Mutex<PageProvider>) -> SafeZoneAllocator {
SafeZoneAllocator(Mutex::new(ZoneAllocator::new(provider)))
}
}
unsafe impl GlobalAlloc for SafeZoneAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align().is_power_of_two());
self.0.lock().allocate(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//let ptr = NonNull::new_unchecked(ptr);
self.0.lock().deallocate(ptr, layout);
}
}
/// The memory backing as used by the SCAllocator.
///
/// A client that wants to use the zone or size class allocators
/// has to provide this interface and stick an implementation of it
/// into every SCAllocator.
pub trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, &'a mut ObjectPage<'a>);
}
/// A zone allocator.
///
/// Has a bunch of size class allocators and can serve
/// allocation requests for many different (MAX_SIZE_CLASSES) object sizes
/// (by selecting the right slab allocator).
pub struct ZoneAllocator<'a> {
pager: &'a Mutex<PageProvider<'a>>,
slabs: [SCAllocator<'a>; MAX_SIZE_CLASSES],
}
impl<'a> ZoneAllocator<'a> {
pub const MAX_ALLOC_SIZE: usize = 4032;
#[cfg(feature = "unstable")]
pub const fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn get_max_size(current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if !ptr.is_null() {
return ptr;
} else |
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) & !(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated = !bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if !offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
}
None
}
/// Check if the current `idx` is allocated.
///
/// # Notes
/// In case `idx` is 3 and allocation size of slab is
/// 8. The corresponding object would start at &data + 3 * 8.
fn is_allocated(&mut self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self.bitfield[base_idx] & (1 << bit_idx)) > 0
}
/// Sets the bit number `idx` in the bit-field.
fn set_bit(&mut self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self.bitfield[base_idx] |= 1 << bit_idx;
}
/// Clears bit number `idx` in the bit-field.
fn clear_bit(&mut self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self.bitfield[base_idx] &= !(1 << bit_idx);
}
/// Deallocates a memory object within this page.
fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
debug!(
"ObjectPage deallocating ptr = 0x{:x} with {:?}",
ptr as usize, layout
);
let page_offset = (ptr as usize) & 0xfff;
assert!(page_offset % layout.size() == 0);
let idx = page_offset / layout.size();
assert!(
self.is_allocated(idx),
"ptr = 0x{:x} was not allocated",
ptr as usize
);
self.clear_bit(idx);
}
/// Tries to allocate an object within this page.
///
/// In case the Slab is full, returns None.
fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.first_fit(layout) {
Some((idx, addr)) => {
self.set_bit(idx);
unsafe { mem::transmute::<usize, *mut u8>(addr) }
}
None => ptr::null_mut(),
}
}
/// Checks if we can still allocate more objects within the page.
fn is_full(&self) -> bool {
unsafe {
self.bitfield
.iter()
.filter(|&x| *x != u64::max_value())
.count()
== 0
}
}
/// Checks if the page has currently no allocation.
fn is_empty(&self) -> bool {
unsafe { self.bitfield.iter().filter(|&x| *x > 0x0).count() == 0 }
}
}
#[test]
pub fn check_first_fit() {
let op: ObjectPage = Default::default();
let layout = Layout::from_size_align(8, 8).unwrap();
println!("{:?}", op.first_fit(layout));
}
/// Rawlink is a type like Option<T> but for holding a raw pointer
struct Rawlink<T> {
p: *mut T,
}
impl<T> Default for Rawlink<T> {
fn default() -> Self {
Rawlink { p: ptr::null_mut() }
}
}
impl<T> Rawlink<T> {
/// Like Option::None for Rawlink
fn none() -> Rawlink<T> {
Rawlink { p: ptr::null_mut() }
}
/// Like Option::Some for Rawlink
fn some(n: &mut T) -> Rawlink<T> {
Rawlink { p: n }
}
/// Convert the `Rawlink` into an Option value
///
/// **unsafe** because:
///
/// - Dereference of raw pointer.
/// - Returns reference of arbitrary lifetime.
unsafe fn resolve<'a>(&self) -> Option<&'a T> {
self.p.as_ref()
}
/// Convert the `Rawlink` into an Option value
///
/// **unsafe** because:
///
/// - Dereference of raw pointer.
/// - Returns reference of arbitrary lifetime.
unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
self.p.as_mut()
}
/// Return the `Rawlink` and replace with `Rawlink::none()`
fn take(&mut self) -> Rawlink<T> {
mem::replace(self, Rawlink::none())
}
}
| {
continue;
} | conditional_block |
lib.rs | //! A slab allocator implementation for small objects
//! (< architecture page size).
//!
//! The organization is as follows (top-down):
//!
//! * A `ZoneAllocator` manages many `SCAllocator` and can
//! satisfy requests for different allocation sizes.
//! * A `SCAllocator` allocates objects of exactly one size.
//! It holds its data in a ObjectPageList.
//! * A `ObjectPage` contains allocated objects and associated meta-data.
//! * A `PageProvider` is provided by the client and used by the
//! SCAllocator to allocate ObjectPage.
//!
#![allow(unused_features, dead_code, unused_variables)]
#![cfg_attr(feature = "unstable", feature(const_fn))]
#![cfg_attr(test, feature(prelude_import, test, raw, libc))]
#![no_std]
#![crate_name = "slabmalloc"]
#![crate_type = "lib"]
extern crate spin;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate env_logger;
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate libc;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
extern crate test;
#[cfg(test)]
mod tests;
use core::alloc::{GlobalAlloc, Layout};
use core::fmt;
use core::mem;
use core::ptr;
use spin::Mutex;
#[cfg(target_arch = "x86_64")]
const CACHE_LINE_SIZE: usize = 64;
#[cfg(target_arch = "x86_64")]
const BASE_PAGE_SIZE: usize = 4096;
#[cfg(target_arch = "x86_64")]
type VAddr = usize;
const MAX_SIZE_CLASSES: usize = 10;
pub struct SafeZoneAllocator(Mutex<ZoneAllocator<'static>>);
impl SafeZoneAllocator {
#[cfg(feature = "unstable")]
pub const fn new(provider: &'static Mutex<PageProvider>) -> SafeZoneAllocator {
SafeZoneAllocator(Mutex::new(ZoneAllocator::new(provider)))
}
#[cfg(not(feature = "unstable"))]
pub fn new(provider: &'static Mutex<PageProvider>) -> SafeZoneAllocator {
SafeZoneAllocator(Mutex::new(ZoneAllocator::new(provider)))
}
}
unsafe impl GlobalAlloc for SafeZoneAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align().is_power_of_two());
self.0.lock().allocate(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//let ptr = NonNull::new_unchecked(ptr);
self.0.lock().deallocate(ptr, layout);
}
}
/// The memory backing as used by the SCAllocator.
///
/// A client that wants to use the zone or size class allocators
/// has to provide this interface and stick an implementation of it
/// into every SCAllocator.
pub trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, &'a mut ObjectPage<'a>);
}
/// A zone allocator.
///
/// Has a bunch of size class allocators and can serve
/// allocation requests for many different (MAX_SIZE_CLASSES) object sizes
/// (by selecting the right slab allocator).
pub struct ZoneAllocator<'a> {
pager: &'a Mutex<PageProvider<'a>>,
slabs: [SCAllocator<'a>; MAX_SIZE_CLASSES],
}
impl<'a> ZoneAllocator<'a> {
pub const MAX_ALLOC_SIZE: usize = 4032;
#[cfg(feature = "unstable")]
pub const fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> { | SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn get_max_size(current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if !ptr.is_null() {
return ptr;
} else {
continue;
}
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) & !(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated = !bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if !offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
}
None
}
/// Check if the current `idx` is allocated.
///
/// # Notes
/// In case `idx` is 3 and allocation size of slab is
/// 8. The corresponding object would start at &data + 3 * 8.
fn is_allocated(&mut self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self.bitfield[base_idx] & (1 << bit_idx)) > 0
}
/// Sets the bit number `idx` in the bit-field.
fn set_bit(&mut self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self.bitfield[base_idx] |= 1 << bit_idx;
}
/// Clears bit number `idx` in the bit-field.
fn clear_bit(&mut self, idx: usize) {
let base_idx = idx / 64;
let bit_idx = idx % 64;
self.bitfield[base_idx] &= !(1 << bit_idx);
}
/// Deallocates a memory object within this page.
fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
debug!(
"ObjectPage deallocating ptr = 0x{:x} with {:?}",
ptr as usize, layout
);
let page_offset = (ptr as usize) & 0xfff;
assert!(page_offset % layout.size() == 0);
let idx = page_offset / layout.size();
assert!(
self.is_allocated(idx),
"ptr = 0x{:x} was not allocated",
ptr as usize
);
self.clear_bit(idx);
}
/// Tries to allocate an object within this page.
///
/// In case the Slab is full, returns None.
fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.first_fit(layout) {
Some((idx, addr)) => {
self.set_bit(idx);
unsafe { mem::transmute::<usize, *mut u8>(addr) }
}
None => ptr::null_mut(),
}
}
/// Checks if we can still allocate more objects within the page.
fn is_full(&self) -> bool {
unsafe {
self.bitfield
.iter()
.filter(|&x| *x != u64::max_value())
.count()
== 0
}
}
/// Checks if the page has currently no allocation.
fn is_empty(&self) -> bool {
unsafe { self.bitfield.iter().filter(|&x| *x > 0x0).count() == 0 }
}
}
#[test]
pub fn check_first_fit() {
let op: ObjectPage = Default::default();
let layout = Layout::from_size_align(8, 8).unwrap();
println!("{:?}", op.first_fit(layout));
}
/// Rawlink is a type like Option<T> but for holding a raw pointer
struct Rawlink<T> {
p: *mut T,
}
impl<T> Default for Rawlink<T> {
fn default() -> Self {
Rawlink { p: ptr::null_mut() }
}
}
impl<T> Rawlink<T> {
/// Like Option::None for Rawlink
fn none() -> Rawlink<T> {
Rawlink { p: ptr::null_mut() }
}
/// Like Option::Some for Rawlink
fn some(n: &mut T) -> Rawlink<T> {
Rawlink { p: n }
}
/// Convert the `Rawlink` into an Option value
///
/// **unsafe** because:
///
/// - Dereference of raw pointer.
/// - Returns reference of arbitrary lifetime.
unsafe fn resolve<'a>(&self) -> Option<&'a T> {
self.p.as_ref()
}
/// Convert the `Rawlink` into an Option value
///
/// **unsafe** because:
///
/// - Dereference of raw pointer.
/// - Returns reference of arbitrary lifetime.
unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> {
self.p.as_mut()
}
/// Return the `Rawlink` and replace with `Rawlink::none()`
fn take(&mut self) -> Rawlink<T> {
mem::replace(self, Rawlink::none())
}
} | ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager), | random_line_split |
ip6.py | # $Id: ip6.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $
# -*- coding: utf-8 -*-
"""Internet Protocol, version 6."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import ip
from .compat import compat_ord
class IP6(dpkt.Packet):
"""Internet Protocol, version 6.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of IPv6.
TODO.
"""
__hdr__ = (
('_v_fc_flow', 'I', 0x60000000),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
_protosw = ip.IP._protosw
@property
def v(self):
return self._v_fc_flow >> 28
@v.setter
def v(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xf0000000) | (v << 28)
@property
def fc(self):
return (self._v_fc_flow >> 20) & 0xff
@fc.setter
def fc(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xff00000) | (v << 20)
@property
def flow(self):
return self._v_fc_flow & 0xfffff
@flow.setter
def flow(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = {}
# NOTE: self.extension_hdrs is not accurate, as it doesn't support duplicate header types.
# According to RFC-1883 "Each extension header should occur at most once, except for the
# Destination Options header which should occur at most twice".
# Secondly, the .headers_str() method attempts to pack the extension headers in order as
# defined in the RFC, however it doesn't adjust the next header (nxt) pointer accordingly.
# Here we introduce the new field .all_extension_headers; it allows duplicate types and
# keeps the original order.
self.all_extension_headers = []
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next_ext_hdr = self.nxt
while next_ext_hdr in ext_hdrs:
ext = ext_hdrs_cls[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
self.all_extension_headers.append(ext)
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
# If all_extension_headers is available, return the headers as they originally appeared
if self.all_extension_headers:
return b''.join(bytes(ext) for ext in self.all_extension_headers)
# Output extension headers in order defined in RFC1883 (except dest opts)
header_str = b""
for hdr in ext_hdrs:
if hdr in self.extension_hdrs:
header_str += bytes(self.extension_hdrs[hdr])
return header_str
def __bytes__(self):
if (self.p == 6 or self.p == 17 or self.p == 58) and not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = bytes(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + bytes(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol | self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
self.data = buf[2:self.length] # keep raw data with all pad options, but not the following data
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
# Unit tests
def test_ipg():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00\x00')
_ip = IP6(s)
# basic properties
assert _ip.v == 6
assert _ip.fc == 0
assert _ip.flow == 0
_ip.data.sum = 0
s2 = bytes(_ip)
assert s == s2
def test_ip6_routing_header():
s = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(s)
s2 = bytes(_ip)
# 43 is Routing header id
assert len(_ip.extension_hdrs[43].addresses) == 2
assert _ip.tcp
assert s == s2
def test_ip6_fragment_header():
s = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
# s2 = str(fh) variable 's2' is not used
assert fh.nxt == 6
assert fh.id == 65535
assert fh.frag_off == 8191
assert fh.m_flag == 1
assert bytes(fh) == s
# IP6 with fragment header
s = (b'\x60\x00\x00\x00\x00\x10\x2c\x00\x02\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x02\x03\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x29\x00\x00\x01'
b'\x00\x00\x00\x00\x60\x00\x00\x00\x00\x10\x2c\x00')
_ip = IP6(s)
assert bytes(_ip) == s
def test_ip6_options_header():
s = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
options = IP6OptsHeader(s).options
assert len(options) == 3
assert bytes(IP6OptsHeader(s)) == s
def test_ip6_ah_header():
s = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert ah.length == 24
assert ah.auth_data == b'xxxxxxxx'
assert ah.spi == 0x2020202
assert ah.seq == 0x1010101
assert bytes(ah) == s
def test_ip6_esp_header():
s = (b'\x00\x00\x01\x00\x00\x00\x00\x44\xe2\x4f\x9e\x68\xf3\xcd\xb1\x5f\x61\x65\x42\x8b\x78\x0b'
b'\x4a\xfd\x13\xf0\x15\x98\xf5\x55\x16\xa8\x12\xb3\xb8\x4d\xbc\x16\xb2\x14\xbe\x3d\xf9\x96'
b'\xd4\xa0\x39\x1f\x85\x74\x25\x81\x83\xa6\x0d\x99\xb6\xba\xa3\xcc\xb6\xe0\x9a\x78\xee\xf2'
b'\xaf\x9a')
esp = IP6ESPHeader(s)
assert esp.length == 68
assert esp.spi == 256
assert bytes(esp) == s
def test_ip6_extension_headers():
p = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(p)
o = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
_ip.extension_hdrs[0] = IP6HopOptsHeader(o)
fh = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
_ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
_ip.extension_hdrs[51] = IP6AHHeader(ah)
do = b'\x3b\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert len(_ip.extension_hdrs) == 5
def test_ip6_all_extension_headers(): # https://github.com/kbandla/dpkt/pull/403
s = (b'\x60\x00\x00\x00\x00\x47\x3c\x40\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x02\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x3c\x00\x01\x04'
b'\x00\x00\x00\x00\x3c\x00\x01\x04\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00\x2c\x00'
b'\x00\x00\x00\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00'
b'\x3a\x00\x00\x00\x00\x00\x00\x00\x80\x00\xd8\xe5\x0c\x1a\x00\x00\x50\x61\x79\x4c\x6f\x61'
b'\x64')
_ip = IP6(s)
assert _ip.p == 58 # ICMPv6
hdrs = _ip.all_extension_headers
assert len(hdrs) == 7
assert isinstance(hdrs[0], IP6DstOptsHeader)
assert isinstance(hdrs[3], IP6FragmentHeader)
assert isinstance(hdrs[5], IP6DstOptsHeader)
assert bytes(_ip) == s
if __name__ == '__main__':
test_ipg()
test_ip6_routing_header()
test_ip6_fragment_header()
test_ip6_options_header()
test_ip6_ah_header()
test_ip6_esp_header()
test_ip6_extension_headers()
test_ip6_all_extension_headers()
print('Tests Successful...') | ('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf) | random_line_split |
ip6.py | # $Id: ip6.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $
# -*- coding: utf-8 -*-
"""Internet Protocol, version 6."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import ip
from .compat import compat_ord
class IP6(dpkt.Packet):
"""Internet Protocol, version 6.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of IPv6.
TODO.
"""
__hdr__ = (
('_v_fc_flow', 'I', 0x60000000),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
_protosw = ip.IP._protosw
@property
def v(self):
return self._v_fc_flow >> 28
@v.setter
def v(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xf0000000) | (v << 28)
@property
def fc(self):
return (self._v_fc_flow >> 20) & 0xff
@fc.setter
def fc(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xff00000) | (v << 20)
@property
def flow(self):
return self._v_fc_flow & 0xfffff
@flow.setter
def flow(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = {}
# NOTE: self.extension_hdrs is not accurate, as it doesn't support duplicate header types.
# According to RFC-1883 "Each extension header should occur at most once, except for the
# Destination Options header which should occur at most twice".
# Secondly, the .headers_str() method attempts to pack the extension headers in order as
# defined in the RFC, however it doesn't adjust the next header (nxt) pointer accordingly.
# Here we introduce the new field .all_extension_headers; it allows duplicate types and
# keeps the original order.
self.all_extension_headers = []
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
|
next_ext_hdr = self.nxt
while next_ext_hdr in ext_hdrs:
ext = ext_hdrs_cls[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
self.all_extension_headers.append(ext)
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
# If all_extension_headers is available, return the headers as they originally appeared
if self.all_extension_headers:
return b''.join(bytes(ext) for ext in self.all_extension_headers)
# Output extension headers in order defined in RFC1883 (except dest opts)
header_str = b""
for hdr in ext_hdrs:
if hdr in self.extension_hdrs:
header_str += bytes(self.extension_hdrs[hdr])
return header_str
def __bytes__(self):
if (self.p == 6 or self.p == 17 or self.p == 58) and not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = bytes(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + bytes(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
self.data = buf[2:self.length] # keep raw data with all pad options, but not the following data
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
# Unit tests
def test_ipg():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00\x00')
_ip = IP6(s)
# basic properties
assert _ip.v == 6
assert _ip.fc == 0
assert _ip.flow == 0
_ip.data.sum = 0
s2 = bytes(_ip)
assert s == s2
def test_ip6_routing_header():
s = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(s)
s2 = bytes(_ip)
# 43 is Routing header id
assert len(_ip.extension_hdrs[43].addresses) == 2
assert _ip.tcp
assert s == s2
def test_ip6_fragment_header():
s = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
# s2 = str(fh) variable 's2' is not used
assert fh.nxt == 6
assert fh.id == 65535
assert fh.frag_off == 8191
assert fh.m_flag == 1
assert bytes(fh) == s
# IP6 with fragment header
s = (b'\x60\x00\x00\x00\x00\x10\x2c\x00\x02\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x02\x03\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x29\x00\x00\x01'
b'\x00\x00\x00\x00\x60\x00\x00\x00\x00\x10\x2c\x00')
_ip = IP6(s)
assert bytes(_ip) == s
def test_ip6_options_header():
s = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
options = IP6OptsHeader(s).options
assert len(options) == 3
assert bytes(IP6OptsHeader(s)) == s
def test_ip6_ah_header():
s = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert ah.length == 24
assert ah.auth_data == b'xxxxxxxx'
assert ah.spi == 0x2020202
assert ah.seq == 0x1010101
assert bytes(ah) == s
def test_ip6_esp_header():
s = (b'\x00\x00\x01\x00\x00\x00\x00\x44\xe2\x4f\x9e\x68\xf3\xcd\xb1\x5f\x61\x65\x42\x8b\x78\x0b'
b'\x4a\xfd\x13\xf0\x15\x98\xf5\x55\x16\xa8\x12\xb3\xb8\x4d\xbc\x16\xb2\x14\xbe\x3d\xf9\x96'
b'\xd4\xa0\x39\x1f\x85\x74\x25\x81\x83\xa6\x0d\x99\xb6\xba\xa3\xcc\xb6\xe0\x9a\x78\xee\xf2'
b'\xaf\x9a')
esp = IP6ESPHeader(s)
assert esp.length == 68
assert esp.spi == 256
assert bytes(esp) == s
def test_ip6_extension_headers():
p = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(p)
o = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
_ip.extension_hdrs[0] = IP6HopOptsHeader(o)
fh = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
_ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
_ip.extension_hdrs[51] = IP6AHHeader(ah)
do = b'\x3b\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert len(_ip.extension_hdrs) == 5
def test_ip6_all_extension_headers(): # https://github.com/kbandla/dpkt/pull/403
s = (b'\x60\x00\x00\x00\x00\x47\x3c\x40\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x02\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x3c\x00\x01\x04'
b'\x00\x00\x00\x00\x3c\x00\x01\x04\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00\x2c\x00'
b'\x00\x00\x00\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00'
b'\x3a\x00\x00\x00\x00\x00\x00\x00\x80\x00\xd8\xe5\x0c\x1a\x00\x00\x50\x61\x79\x4c\x6f\x61'
b'\x64')
_ip = IP6(s)
assert _ip.p == 58 # ICMPv6
hdrs = _ip.all_extension_headers
assert len(hdrs) == 7
assert isinstance(hdrs[0], IP6DstOptsHeader)
assert isinstance(hdrs[3], IP6FragmentHeader)
assert isinstance(hdrs[5], IP6DstOptsHeader)
assert bytes(_ip) == s
if __name__ == '__main__':
test_ipg()
test_ip6_routing_header()
test_ip6_fragment_header()
test_ip6_options_header()
test_ip6_ah_header()
test_ip6_esp_header()
test_ip6_extension_headers()
test_ip6_all_extension_headers()
print('Tests Successful...')
| buf = self.data | conditional_block |
ip6.py | # $Id: ip6.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $
# -*- coding: utf-8 -*-
"""Internet Protocol, version 6."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import ip
from .compat import compat_ord
class IP6(dpkt.Packet):
"""Internet Protocol, version 6.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of IPv6.
TODO.
"""
__hdr__ = (
('_v_fc_flow', 'I', 0x60000000),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
_protosw = ip.IP._protosw
@property
def v(self):
return self._v_fc_flow >> 28
@v.setter
def v(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xf0000000) | (v << 28)
@property
def fc(self):
return (self._v_fc_flow >> 20) & 0xff
@fc.setter
def fc(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xff00000) | (v << 20)
@property
def flow(self):
return self._v_fc_flow & 0xfffff
@flow.setter
def flow(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = {}
# NOTE: self.extension_hdrs is not accurate, as it doesn't support duplicate header types.
# According to RFC-1883 "Each extension header should occur at most once, except for the
# Destination Options header which should occur at most twice".
# Secondly, the .headers_str() method attempts to pack the extension headers in order as
# defined in the RFC, however it doesn't adjust the next header (nxt) pointer accordingly.
# Here we introduce the new field .all_extension_headers; it allows duplicate types and
# keeps the original order.
self.all_extension_headers = []
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next_ext_hdr = self.nxt
while next_ext_hdr in ext_hdrs:
ext = ext_hdrs_cls[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
self.all_extension_headers.append(ext)
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
# If all_extension_headers is available, return the headers as they originally appeared
if self.all_extension_headers:
return b''.join(bytes(ext) for ext in self.all_extension_headers)
# Output extension headers in order defined in RFC1883 (except dest opts)
header_str = b""
for hdr in ext_hdrs:
if hdr in self.extension_hdrs:
header_str += bytes(self.extension_hdrs[hdr])
return header_str
def __bytes__(self):
if (self.p == 6 or self.p == 17 or self.p == 58) and not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = bytes(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + bytes(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
self.data = buf[2:self.length] # keep raw data with all pad options, but not the following data
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
# Unit tests
def test_ipg():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00\x00')
_ip = IP6(s)
# basic properties
assert _ip.v == 6
assert _ip.fc == 0
assert _ip.flow == 0
_ip.data.sum = 0
s2 = bytes(_ip)
assert s == s2
def test_ip6_routing_header():
s = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(s)
s2 = bytes(_ip)
# 43 is Routing header id
assert len(_ip.extension_hdrs[43].addresses) == 2
assert _ip.tcp
assert s == s2
def test_ip6_fragment_header():
s = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
# s2 = str(fh) variable 's2' is not used
assert fh.nxt == 6
assert fh.id == 65535
assert fh.frag_off == 8191
assert fh.m_flag == 1
assert bytes(fh) == s
# IP6 with fragment header
s = (b'\x60\x00\x00\x00\x00\x10\x2c\x00\x02\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x02\x03\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x29\x00\x00\x01'
b'\x00\x00\x00\x00\x60\x00\x00\x00\x00\x10\x2c\x00')
_ip = IP6(s)
assert bytes(_ip) == s
def test_ip6_options_header():
s = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
options = IP6OptsHeader(s).options
assert len(options) == 3
assert bytes(IP6OptsHeader(s)) == s
def test_ip6_ah_header():
s = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert ah.length == 24
assert ah.auth_data == b'xxxxxxxx'
assert ah.spi == 0x2020202
assert ah.seq == 0x1010101
assert bytes(ah) == s
def test_ip6_esp_header():
|
def test_ip6_extension_headers():
p = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(p)
o = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
_ip.extension_hdrs[0] = IP6HopOptsHeader(o)
fh = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
_ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
_ip.extension_hdrs[51] = IP6AHHeader(ah)
do = b'\x3b\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert len(_ip.extension_hdrs) == 5
def test_ip6_all_extension_headers(): # https://github.com/kbandla/dpkt/pull/403
s = (b'\x60\x00\x00\x00\x00\x47\x3c\x40\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x02\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x3c\x00\x01\x04'
b'\x00\x00\x00\x00\x3c\x00\x01\x04\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00\x2c\x00'
b'\x00\x00\x00\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00'
b'\x3a\x00\x00\x00\x00\x00\x00\x00\x80\x00\xd8\xe5\x0c\x1a\x00\x00\x50\x61\x79\x4c\x6f\x61'
b'\x64')
_ip = IP6(s)
assert _ip.p == 58 # ICMPv6
hdrs = _ip.all_extension_headers
assert len(hdrs) == 7
assert isinstance(hdrs[0], IP6DstOptsHeader)
assert isinstance(hdrs[3], IP6FragmentHeader)
assert isinstance(hdrs[5], IP6DstOptsHeader)
assert bytes(_ip) == s
if __name__ == '__main__':
test_ipg()
test_ip6_routing_header()
test_ip6_fragment_header()
test_ip6_options_header()
test_ip6_ah_header()
test_ip6_esp_header()
test_ip6_extension_headers()
test_ip6_all_extension_headers()
print('Tests Successful...')
| s = (b'\x00\x00\x01\x00\x00\x00\x00\x44\xe2\x4f\x9e\x68\xf3\xcd\xb1\x5f\x61\x65\x42\x8b\x78\x0b'
b'\x4a\xfd\x13\xf0\x15\x98\xf5\x55\x16\xa8\x12\xb3\xb8\x4d\xbc\x16\xb2\x14\xbe\x3d\xf9\x96'
b'\xd4\xa0\x39\x1f\x85\x74\x25\x81\x83\xa6\x0d\x99\xb6\xba\xa3\xcc\xb6\xe0\x9a\x78\xee\xf2'
b'\xaf\x9a')
esp = IP6ESPHeader(s)
assert esp.length == 68
assert esp.spi == 256
assert bytes(esp) == s | identifier_body |
ip6.py | # $Id: ip6.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $
# -*- coding: utf-8 -*-
"""Internet Protocol, version 6."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import ip
from .compat import compat_ord
class IP6(dpkt.Packet):
"""Internet Protocol, version 6.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of IPv6.
TODO.
"""
__hdr__ = (
('_v_fc_flow', 'I', 0x60000000),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
_protosw = ip.IP._protosw
@property
def v(self):
return self._v_fc_flow >> 28
@v.setter
def v(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xf0000000) | (v << 28)
@property
def | (self):
return (self._v_fc_flow >> 20) & 0xff
@fc.setter
def fc(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xff00000) | (v << 20)
@property
def flow(self):
return self._v_fc_flow & 0xfffff
@flow.setter
def flow(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = {}
# NOTE: self.extension_hdrs is not accurate, as it doesn't support duplicate header types.
# According to RFC-1883 "Each extension header should occur at most once, except for the
# Destination Options header which should occur at most twice".
# Secondly, the .headers_str() method attempts to pack the extension headers in order as
# defined in the RFC, however it doesn't adjust the next header (nxt) pointer accordingly.
# Here we introduce the new field .all_extension_headers; it allows duplicate types and
# keeps the original order.
self.all_extension_headers = []
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next_ext_hdr = self.nxt
while next_ext_hdr in ext_hdrs:
ext = ext_hdrs_cls[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
self.all_extension_headers.append(ext)
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
# If all_extension_headers is available, return the headers as they originally appeared
if self.all_extension_headers:
return b''.join(bytes(ext) for ext in self.all_extension_headers)
# Output extension headers in order defined in RFC1883 (except dest opts)
header_str = b""
for hdr in ext_hdrs:
if hdr in self.extension_hdrs:
header_str += bytes(self.extension_hdrs[hdr])
return header_str
def __bytes__(self):
if (self.p == 6 or self.p == 17 or self.p == 58) and not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = bytes(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + bytes(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
self.data = buf[2:self.length] # keep raw data with all pad options, but not the following data
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
# Unit tests
def test_ipg():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00\x00')
_ip = IP6(s)
# basic properties
assert _ip.v == 6
assert _ip.fc == 0
assert _ip.flow == 0
_ip.data.sum = 0
s2 = bytes(_ip)
assert s == s2
def test_ip6_routing_header():
s = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(s)
s2 = bytes(_ip)
# 43 is Routing header id
assert len(_ip.extension_hdrs[43].addresses) == 2
assert _ip.tcp
assert s == s2
def test_ip6_fragment_header():
s = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
# s2 = str(fh) variable 's2' is not used
assert fh.nxt == 6
assert fh.id == 65535
assert fh.frag_off == 8191
assert fh.m_flag == 1
assert bytes(fh) == s
# IP6 with fragment header
s = (b'\x60\x00\x00\x00\x00\x10\x2c\x00\x02\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x02\x03\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x29\x00\x00\x01'
b'\x00\x00\x00\x00\x60\x00\x00\x00\x00\x10\x2c\x00')
_ip = IP6(s)
assert bytes(_ip) == s
def test_ip6_options_header():
s = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
options = IP6OptsHeader(s).options
assert len(options) == 3
assert bytes(IP6OptsHeader(s)) == s
def test_ip6_ah_header():
s = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert ah.length == 24
assert ah.auth_data == b'xxxxxxxx'
assert ah.spi == 0x2020202
assert ah.seq == 0x1010101
assert bytes(ah) == s
def test_ip6_esp_header():
s = (b'\x00\x00\x01\x00\x00\x00\x00\x44\xe2\x4f\x9e\x68\xf3\xcd\xb1\x5f\x61\x65\x42\x8b\x78\x0b'
b'\x4a\xfd\x13\xf0\x15\x98\xf5\x55\x16\xa8\x12\xb3\xb8\x4d\xbc\x16\xb2\x14\xbe\x3d\xf9\x96'
b'\xd4\xa0\x39\x1f\x85\x74\x25\x81\x83\xa6\x0d\x99\xb6\xba\xa3\xcc\xb6\xe0\x9a\x78\xee\xf2'
b'\xaf\x9a')
esp = IP6ESPHeader(s)
assert esp.length == 68
assert esp.spi == 256
assert bytes(esp) == s
def test_ip6_extension_headers():
p = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(p)
o = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
_ip.extension_hdrs[0] = IP6HopOptsHeader(o)
fh = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
_ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
_ip.extension_hdrs[51] = IP6AHHeader(ah)
do = b'\x3b\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
_ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert len(_ip.extension_hdrs) == 5
def test_ip6_all_extension_headers(): # https://github.com/kbandla/dpkt/pull/403
s = (b'\x60\x00\x00\x00\x00\x47\x3c\x40\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x02\xfe\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x3c\x00\x01\x04'
b'\x00\x00\x00\x00\x3c\x00\x01\x04\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00\x2c\x00'
b'\x00\x00\x00\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x2c\x00\x01\x04\x00\x00\x00\x00'
b'\x3a\x00\x00\x00\x00\x00\x00\x00\x80\x00\xd8\xe5\x0c\x1a\x00\x00\x50\x61\x79\x4c\x6f\x61'
b'\x64')
_ip = IP6(s)
assert _ip.p == 58 # ICMPv6
hdrs = _ip.all_extension_headers
assert len(hdrs) == 7
assert isinstance(hdrs[0], IP6DstOptsHeader)
assert isinstance(hdrs[3], IP6FragmentHeader)
assert isinstance(hdrs[5], IP6DstOptsHeader)
assert bytes(_ip) == s
if __name__ == '__main__':
test_ipg()
test_ip6_routing_header()
test_ip6_fragment_header()
test_ip6_options_header()
test_ip6_ah_header()
test_ip6_esp_header()
test_ip6_extension_headers()
test_ip6_all_extension_headers()
print('Tests Successful...')
| fc | identifier_name |
schema.go | // Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memstore
import (
"encoding/json"
"sync"
"unsafe"
memCom "github.com/uber/aresdb/memstore/common"
"github.com/uber/aresdb/metastore"
metaCom "github.com/uber/aresdb/metastore/common"
"github.com/uber/aresdb/utils"
)
// TableSchema stores metadata of the table such as columns and primary keys.
// It also stores the dictionaries for enum columns.
type TableSchema struct {
sync.RWMutex `json:"-"`
// Main schema of the table. Mutable.
Schema metaCom.Table `json:"schema"`
// Maps from column names to their IDs. Mutable.
ColumnIDs map[string]int `json:"columnIDs"`
// Maps from enum column names to their case dictionaries. Mutable.
EnumDicts map[string]EnumDict `json:"enumDicts"`
// DataType for each column ordered by column ID. Mutable.
ValueTypeByColumn []memCom.DataType `json:"valueTypeByColumn"`
// Number of bytes in the primary key. Immutable.
PrimaryKeyBytes int `json:"primaryKeyBytes"`
// Types of each primary key column. Immutable.
PrimaryKeyColumnTypes []memCom.DataType `json:"primaryKeyColumnTypes"`
// Default values of each column. Mutable. Nil means default value is not set.
DefaultValues []*memCom.DataValue `json:"-"`
}
// EnumDict contains mapping from and to enum strings to numbers.
type EnumDict struct {
// Either 0x100 for small_enum, or 0x10000 for big_enum.
Capacity int `json:"capacity"`
Dict map[string]int `json:"dict"`
ReverseDict []string `json:"reverseDict"`
}
// NewTableSchema creates a new table schema object from metaStore table object,
// this does not set enum cases.
func NewTableSchema(table *metaCom.Table) *TableSchema {
tableSchema := &TableSchema{
Schema: *table,
ColumnIDs: make(map[string]int),
EnumDicts: make(map[string]EnumDict),
ValueTypeByColumn: make([]memCom.DataType, len(table.Columns)),
PrimaryKeyColumnTypes: make([]memCom.DataType, len(table.PrimaryKeyColumns)),
DefaultValues: make([]*memCom.DataValue, len(table.Columns)),
}
for id, column := range table.Columns {
if !column.Deleted {
tableSchema.ColumnIDs[column.Name] = id
}
tableSchema.ValueTypeByColumn[id] = memCom.DataTypeForColumn(column)
}
for i, columnID := range table.PrimaryKeyColumns {
columnType := tableSchema.ValueTypeByColumn[columnID]
tableSchema.PrimaryKeyColumnTypes[i] = columnType
dataBits := memCom.DataTypeBits(columnType)
if dataBits < 8 {
dataBits = 8
}
tableSchema.PrimaryKeyBytes += dataBits / 8
}
return tableSchema
}
// MarshalJSON marshals TableSchema into json.
func (t *TableSchema) MarshalJSON() ([]byte, error) {
// Avoid loop json.Marshal calls.
type alias TableSchema
t.RLock()
defer t.RUnlock()
return json.Marshal((*alias)(t))
}
// SetTable sets a updated table and update TableSchema,
// should acquire lock before calling.
func (t *TableSchema) SetTable(table *metaCom.Table) {
t.Schema = *table
for id, column := range table.Columns {
if !column.Deleted {
t.ColumnIDs[column.Name] = id
} else {
delete(t.ColumnIDs, column.Name)
}
if id >= len(t.ValueTypeByColumn) {
t.ValueTypeByColumn = append(t.ValueTypeByColumn, memCom.DataTypeForColumn(column))
}
if id >= len(t.DefaultValues) {
t.DefaultValues = append(t.DefaultValues, nil)
}
}
}
// SetDefaultValue parses the default value string if present and sets to TableSchema.
// Schema lock should be acquired and release by caller and enum dict should already be
// created/update before this function.
func (t *TableSchema) SetDefaultValue(columnID int) {
// Default values are already set.
if t.DefaultValues[columnID] != nil {
return
}
column := t.Schema.Columns[columnID]
defStrVal := column.DefaultValue
if defStrVal == nil || column.Deleted {
t.DefaultValues[columnID] = &memCom.NullDataValue
return
}
dataType := t.ValueTypeByColumn[columnID]
dataTypeName := memCom.DataTypeName[dataType]
val := memCom.DataValue{
Valid: true,
DataType: dataType,
}
if dataType == memCom.SmallEnum || dataType == memCom.BigEnum {
enumDict, ok := t.EnumDicts[column.Name]
if !ok {
// Should no happen since the enum dict should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find EnumDict for column")
}
enumVal, ok := enumDict.Dict[*defStrVal]
if !ok {
// Should no happen since the enum value should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find enum value for column")
}
if dataType == memCom.SmallEnum {
enumValUint8 := uint8(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint8)
} else {
enumValUint16 := uint16(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint16)
}
} else {
dataValue, err := memCom.ValueFromString(*defStrVal, dataType)
if err != nil {
// Should not happen since the string value is already validated by schema handler.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot parse default value")
}
if dataType == memCom.Bool {
val.IsBool = true
val.BoolVal = dataValue.BoolVal
} else {
val.OtherVal = dataValue.OtherVal
}
}
val.CmpFunc = memCom.GetCompareFunc(dataType)
t.DefaultValues[columnID] = &val
return
}
// createEnumDict creates the enum dictionary for the specified column with the
// specified initial cases, and attaches it to TableSchema object.
// Caller should acquire the schema lock before calling this function.
func (t *TableSchema) createEnumDict(columnName string, enumCases []string) {
columnID := t.ColumnIDs[columnName]
dataType := t.ValueTypeByColumn[columnID]
enumCapacity := 1 << uint(memCom.DataTypeBits(dataType))
enumDict := map[string]int{}
for id, enumCase := range enumCases {
enumDict[enumCase] = id
}
t.EnumDicts[columnName] = EnumDict{
Capacity: enumCapacity,
Dict: enumDict,
ReverseDict: enumCases,
}
}
// GetValueTypeByColumn makes a copy of the ValueTypeByColumn so callers don't have to hold a read
// lock to access it.
func (t *TableSchema) GetValueTypeByColumn() []memCom.DataType {
t.RLock()
defer t.RUnlock()
return t.ValueTypeByColumn
}
// GetPrimaryKeyColumns makes a copy of the Schema.PrimaryKeyColumns so callers don't have to hold
// a read lock to access it.
func (t *TableSchema) GetPrimaryKeyColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.PrimaryKeyColumns
}
// GetColumnDeletions returns a boolean slice that indicates whether a column has been deleted. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnDeletions() []bool {
deletedByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
deletedByColumn[columnID] = column.Deleted
}
return deletedByColumn
}
// GetColumnIfNonNilDefault returns a boolean slice that indicates whether a column has non nil default value. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnIfNonNilDefault() []bool {
nonNilDefaultByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
nonNilDefaultByColumn[columnID] = column.DefaultValue != nil
}
return nonNilDefaultByColumn
}
// GetArchivingSortColumns makes a copy of the Schema.ArchivingSortColumns so
// callers don't have to hold a read lock to access it.
func (t *TableSchema) GetArchivingSortColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.ArchivingSortColumns
}
// FetchSchema fetches schema from metaStore and updates in-memory copy of table schema,
// and set up watch channels for metaStore schema changes, used for bootstrapping mem store.
func (m *memStoreImpl) FetchSchema() error {
tables, err := m.metaStore.ListTables()
if err != nil {
return utils.StackError(err, "Failed to list tables from meta")
}
for _, tableName := range tables {
err := m.fetchTable(tableName)
if err != nil {
return err
}
}
// watch table addition/modification
tableSchemaChangeEvents, done, err := m.metaStore.WatchTableSchemaEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableSchemaChange(tableSchemaChangeEvents, done)
// watch table deletion
tableListChangeEvents, done, err := m.metaStore.WatchTableListEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableListChange(tableListChangeEvents, done)
// watch enum cases appending
m.RLock()
for _, tableSchema := range m.TableSchemas {
for columnName, enumCases := range tableSchema.EnumDicts {
err := m.watchEnumCases(tableSchema.Schema.Name, columnName, len(enumCases.ReverseDict))
if err != nil {
return err
}
}
}
m.RUnlock()
return nil
}
func (m *memStoreImpl) fetchTable(tableName string) error {
table, err := m.metaStore.GetTable(tableName)
if err != nil {
if err != metastore.ErrTableDoesNotExist {
return utils.StackError(err, "Failed to get table schema for table %s from meta", tableName)
}
} else {
tableSchema := NewTableSchema(table)
for columnID, column := range table.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
enumCases, err := m.metaStore.GetEnumDict(tableName, column.Name)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to fetch enum cases for table: %s, column: %s", tableName, column.Name)
}
} else {
tableSchema.createEnumDict(column.Name, enumCases)
}
}
}
tableSchema.SetDefaultValue(columnID)
}
m.Lock()
m.TableSchemas[tableName] = tableSchema
m.Unlock()
}
return nil
}
// watch enumCases will setup watch channels for each enum column.
func (m *memStoreImpl) watchEnumCases(tableName, columnName string, startCase int) error {
enumDictChangeEvents, done, err := m.metaStore.WatchEnumDictEvents(tableName, columnName, startCase)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to watch enum case events")
}
} else {
go m.handleEnumDictChange(tableName, columnName, enumDictChangeEvents, done)
}
return nil
}
// handleTableListChange handles table deletion events from metaStore.
func (m *memStoreImpl) handleTableListChange(tableListChangeEvents <-chan []string, done chan<- struct{}) {
for newTableList := range tableListChangeEvents {
m.applyTableList(newTableList)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableList(newTableList []string) {
m.Lock()
for tableName, tableSchema := range m.TableSchemas {
if utils.IndexOfStr(newTableList, tableName) < 0 {
// detach shards and schema from map
// to prevent new usage
tableShards := m.TableShards[tableName]
delete(m.TableSchemas, tableName)
delete(m.TableShards, tableName)
// only one table deletion at a time
m.Unlock()
for shardID, shard := range tableShards {
shard.Destruct()
m.diskStore.DeleteTableShard(tableName, shardID)
}
m.scheduler.DeleteTable(tableName, tableSchema.Schema.IsFactTable)
return
}
}
m.Unlock()
}
// handleTableSchemaChange handles table schema change event from metaStore including new table schema.
func (m *memStoreImpl) handleTableSchemaChange(tableSchemaChangeEvents <-chan *metaCom.Table, done chan<- struct{}) {
for table := range tableSchemaChangeEvents {
m.applyTableSchema(table)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableSchema(newTable *metaCom.Table) {
tableName := newTable.Name
newEnumColumns := []string{}
// default start watching from first enumCase
startEnumID := 0
defer func() {
for _, column := range newEnumColumns {
err := m.watchEnumCases(tableName, column, startEnumID)
if err != nil {
utils.GetLogger().With(
"error", err.Error(),
"table", tableName,
"column", column).
Panic("Failed to watch enum dict events")
}
}
}()
m.Lock()
tableSchema, tableExist := m.TableSchemas[tableName]
// new table
if !tableExist {
tableSchema = NewTableSchema(newTable)
for columnID, column := range newTable.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
tableSchema.SetDefaultValue(columnID)
}
m.TableSchemas[newTable.Name] = tableSchema
m.Unlock()
return
}
m.Unlock()
var columnsToDelete []int
tableSchema.Lock()
oldColumns := tableSchema.Schema.Columns
tableSchema.SetTable(newTable)
for columnID, column := range newTable.Columns {
tableSchema.SetDefaultValue(columnID)
if column.Deleted {
if columnID < len(oldColumns) && !oldColumns[columnID].Deleted { // new deletions only
delete(tableSchema.EnumDicts, column.Name)
columnsToDelete = append(columnsToDelete, columnID)
}
} else {
if column.IsEnumColumn() {
_, exist := tableSchema.EnumDicts[column.Name]
if !exist {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
var oldPreloadingDays int
newPreloadingDays := column.Config.PreloadingDays
// preloading will be triggered if
// 1. this is a new column and PreloadingDays > 0
// 2. this is a old column and PreloadingDays > oldPreloadingDays
if columnID < len(oldColumns) {
oldPreloadingDays = oldColumns[columnID].Config.PreloadingDays
}
m.HostMemManager.TriggerPreload(tableName, columnID, oldPreloadingDays, newPreloadingDays)
}
}
tableSchema.Unlock()
for _, columnID := range columnsToDelete {
var shards []*TableShard
m.RLock()
for _, shard := range m.TableShards[tableName] {
shard.Users.Add(1)
shards = append(shards, shard)
}
m.RUnlock()
for _, shard := range shards {
// May block for extended amount of time during archiving
shard.DeleteColumn(columnID)
shard.Users.Done()
}
}
}
// handleEnumDictChange handles enum dict change event from metaStore for specific table and column.
func (m *memStoreImpl) handleEnumDictChange(tableName, columnName string, enumDictChangeEvents <-chan string, done chan<- struct{}) {
for newEnumCase := range enumDictChangeEvents {
m.applyEnumCase(tableName, columnName, newEnumCase)
}
close(done)
}
func (m *memStoreImpl) applyEnumCase(tableName, columnName string, newEnumCase string) {
m.RLock()
tableSchema, tableExist := m.TableSchemas[tableName]
if !tableExist {
m.RUnlock() | tableSchema.Lock()
m.RUnlock()
enumDict, columnExist := tableSchema.EnumDicts[columnName]
if !columnExist {
tableSchema.Unlock()
return
}
enumDict.Dict[newEnumCase] = len(enumDict.ReverseDict)
enumDict.ReverseDict = append(enumDict.ReverseDict, newEnumCase)
tableSchema.EnumDicts[columnName] = enumDict
tableSchema.Unlock()
} | return
}
| random_line_split |
schema.go | // Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memstore
import (
"encoding/json"
"sync"
"unsafe"
memCom "github.com/uber/aresdb/memstore/common"
"github.com/uber/aresdb/metastore"
metaCom "github.com/uber/aresdb/metastore/common"
"github.com/uber/aresdb/utils"
)
// TableSchema stores metadata of the table such as columns and primary keys.
// It also stores the dictionaries for enum columns.
type TableSchema struct {
sync.RWMutex `json:"-"`
// Main schema of the table. Mutable.
Schema metaCom.Table `json:"schema"`
// Maps from column names to their IDs. Mutable.
ColumnIDs map[string]int `json:"columnIDs"`
// Maps from enum column names to their case dictionaries. Mutable.
EnumDicts map[string]EnumDict `json:"enumDicts"`
// DataType for each column ordered by column ID. Mutable.
ValueTypeByColumn []memCom.DataType `json:"valueTypeByColumn"`
// Number of bytes in the primary key. Immutable.
PrimaryKeyBytes int `json:"primaryKeyBytes"`
// Types of each primary key column. Immutable.
PrimaryKeyColumnTypes []memCom.DataType `json:"primaryKeyColumnTypes"`
// Default values of each column. Mutable. Nil means default value is not set.
DefaultValues []*memCom.DataValue `json:"-"`
}
// EnumDict contains mapping from and to enum strings to numbers.
type EnumDict struct {
// Either 0x100 for small_enum, or 0x10000 for big_enum.
Capacity int `json:"capacity"`
Dict map[string]int `json:"dict"`
ReverseDict []string `json:"reverseDict"`
}
// NewTableSchema creates a new table schema object from metaStore table object,
// this does not set enum cases.
func NewTableSchema(table *metaCom.Table) *TableSchema {
tableSchema := &TableSchema{
Schema: *table,
ColumnIDs: make(map[string]int),
EnumDicts: make(map[string]EnumDict),
ValueTypeByColumn: make([]memCom.DataType, len(table.Columns)),
PrimaryKeyColumnTypes: make([]memCom.DataType, len(table.PrimaryKeyColumns)),
DefaultValues: make([]*memCom.DataValue, len(table.Columns)),
}
for id, column := range table.Columns {
if !column.Deleted {
tableSchema.ColumnIDs[column.Name] = id
}
tableSchema.ValueTypeByColumn[id] = memCom.DataTypeForColumn(column)
}
for i, columnID := range table.PrimaryKeyColumns {
columnType := tableSchema.ValueTypeByColumn[columnID]
tableSchema.PrimaryKeyColumnTypes[i] = columnType
dataBits := memCom.DataTypeBits(columnType)
if dataBits < 8 {
dataBits = 8
}
tableSchema.PrimaryKeyBytes += dataBits / 8
}
return tableSchema
}
// MarshalJSON marshals TableSchema into json.
func (t *TableSchema) MarshalJSON() ([]byte, error) {
// Avoid loop json.Marshal calls.
type alias TableSchema
t.RLock()
defer t.RUnlock()
return json.Marshal((*alias)(t))
}
// SetTable sets a updated table and update TableSchema,
// should acquire lock before calling.
func (t *TableSchema) SetTable(table *metaCom.Table) {
t.Schema = *table
for id, column := range table.Columns {
if !column.Deleted {
t.ColumnIDs[column.Name] = id
} else {
delete(t.ColumnIDs, column.Name)
}
if id >= len(t.ValueTypeByColumn) {
t.ValueTypeByColumn = append(t.ValueTypeByColumn, memCom.DataTypeForColumn(column))
}
if id >= len(t.DefaultValues) {
t.DefaultValues = append(t.DefaultValues, nil)
}
}
}
// SetDefaultValue parses the default value string if present and sets to TableSchema.
// Schema lock should be acquired and release by caller and enum dict should already be
// created/update before this function.
func (t *TableSchema) SetDefaultValue(columnID int) {
// Default values are already set.
if t.DefaultValues[columnID] != nil {
return
}
column := t.Schema.Columns[columnID]
defStrVal := column.DefaultValue
if defStrVal == nil || column.Deleted {
t.DefaultValues[columnID] = &memCom.NullDataValue
return
}
dataType := t.ValueTypeByColumn[columnID]
dataTypeName := memCom.DataTypeName[dataType]
val := memCom.DataValue{
Valid: true,
DataType: dataType,
}
if dataType == memCom.SmallEnum || dataType == memCom.BigEnum {
enumDict, ok := t.EnumDicts[column.Name]
if !ok {
// Should no happen since the enum dict should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find EnumDict for column")
}
enumVal, ok := enumDict.Dict[*defStrVal]
if !ok {
// Should no happen since the enum value should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find enum value for column")
}
if dataType == memCom.SmallEnum {
enumValUint8 := uint8(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint8)
} else {
enumValUint16 := uint16(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint16)
}
} else {
dataValue, err := memCom.ValueFromString(*defStrVal, dataType)
if err != nil {
// Should not happen since the string value is already validated by schema handler.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot parse default value")
}
if dataType == memCom.Bool {
val.IsBool = true
val.BoolVal = dataValue.BoolVal
} else {
val.OtherVal = dataValue.OtherVal
}
}
val.CmpFunc = memCom.GetCompareFunc(dataType)
t.DefaultValues[columnID] = &val
return
}
// createEnumDict creates the enum dictionary for the specified column with the
// specified initial cases, and attaches it to TableSchema object.
// Caller should acquire the schema lock before calling this function.
func (t *TableSchema) createEnumDict(columnName string, enumCases []string) {
columnID := t.ColumnIDs[columnName]
dataType := t.ValueTypeByColumn[columnID]
enumCapacity := 1 << uint(memCom.DataTypeBits(dataType))
enumDict := map[string]int{}
for id, enumCase := range enumCases {
enumDict[enumCase] = id
}
t.EnumDicts[columnName] = EnumDict{
Capacity: enumCapacity,
Dict: enumDict,
ReverseDict: enumCases,
}
}
// GetValueTypeByColumn makes a copy of the ValueTypeByColumn so callers don't have to hold a read
// lock to access it.
func (t *TableSchema) GetValueTypeByColumn() []memCom.DataType {
t.RLock()
defer t.RUnlock()
return t.ValueTypeByColumn
}
// GetPrimaryKeyColumns makes a copy of the Schema.PrimaryKeyColumns so callers don't have to hold
// a read lock to access it.
func (t *TableSchema) GetPrimaryKeyColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.PrimaryKeyColumns
}
// GetColumnDeletions returns a boolean slice that indicates whether a column has been deleted. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnDeletions() []bool {
deletedByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
deletedByColumn[columnID] = column.Deleted
}
return deletedByColumn
}
// GetColumnIfNonNilDefault returns a boolean slice that indicates whether a column has non nil default value. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnIfNonNilDefault() []bool {
nonNilDefaultByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
nonNilDefaultByColumn[columnID] = column.DefaultValue != nil
}
return nonNilDefaultByColumn
}
// GetArchivingSortColumns makes a copy of the Schema.ArchivingSortColumns so
// callers don't have to hold a read lock to access it.
func (t *TableSchema) GetArchivingSortColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.ArchivingSortColumns
}
// FetchSchema fetches schema from metaStore and updates in-memory copy of table schema,
// and set up watch channels for metaStore schema changes, used for bootstrapping mem store.
func (m *memStoreImpl) FetchSchema() error {
tables, err := m.metaStore.ListTables()
if err != nil {
return utils.StackError(err, "Failed to list tables from meta")
}
for _, tableName := range tables {
err := m.fetchTable(tableName)
if err != nil {
return err
}
}
// watch table addition/modification
tableSchemaChangeEvents, done, err := m.metaStore.WatchTableSchemaEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableSchemaChange(tableSchemaChangeEvents, done)
// watch table deletion
tableListChangeEvents, done, err := m.metaStore.WatchTableListEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableListChange(tableListChangeEvents, done)
// watch enum cases appending
m.RLock()
for _, tableSchema := range m.TableSchemas {
for columnName, enumCases := range tableSchema.EnumDicts {
err := m.watchEnumCases(tableSchema.Schema.Name, columnName, len(enumCases.ReverseDict))
if err != nil {
return err
}
}
}
m.RUnlock()
return nil
}
func (m *memStoreImpl) fetchTable(tableName string) error {
table, err := m.metaStore.GetTable(tableName)
if err != nil {
if err != metastore.ErrTableDoesNotExist {
return utils.StackError(err, "Failed to get table schema for table %s from meta", tableName)
}
} else {
tableSchema := NewTableSchema(table)
for columnID, column := range table.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
enumCases, err := m.metaStore.GetEnumDict(tableName, column.Name)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to fetch enum cases for table: %s, column: %s", tableName, column.Name)
}
} else {
tableSchema.createEnumDict(column.Name, enumCases)
}
}
}
tableSchema.SetDefaultValue(columnID)
}
m.Lock()
m.TableSchemas[tableName] = tableSchema
m.Unlock()
}
return nil
}
// watch enumCases will setup watch channels for each enum column.
func (m *memStoreImpl) watchEnumCases(tableName, columnName string, startCase int) error {
enumDictChangeEvents, done, err := m.metaStore.WatchEnumDictEvents(tableName, columnName, startCase)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to watch enum case events")
}
} else {
go m.handleEnumDictChange(tableName, columnName, enumDictChangeEvents, done)
}
return nil
}
// handleTableListChange handles table deletion events from metaStore.
func (m *memStoreImpl) handleTableListChange(tableListChangeEvents <-chan []string, done chan<- struct{}) {
for newTableList := range tableListChangeEvents {
m.applyTableList(newTableList)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableList(newTableList []string) {
m.Lock()
for tableName, tableSchema := range m.TableSchemas {
if utils.IndexOfStr(newTableList, tableName) < 0 {
// detach shards and schema from map
// to prevent new usage
tableShards := m.TableShards[tableName]
delete(m.TableSchemas, tableName)
delete(m.TableShards, tableName)
// only one table deletion at a time
m.Unlock()
for shardID, shard := range tableShards {
shard.Destruct()
m.diskStore.DeleteTableShard(tableName, shardID)
}
m.scheduler.DeleteTable(tableName, tableSchema.Schema.IsFactTable)
return
}
}
m.Unlock()
}
// handleTableSchemaChange handles table schema change event from metaStore including new table schema.
func (m *memStoreImpl) handleTableSchemaChange(tableSchemaChangeEvents <-chan *metaCom.Table, done chan<- struct{}) {
for table := range tableSchemaChangeEvents {
m.applyTableSchema(table)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableSchema(newTable *metaCom.Table) {
tableName := newTable.Name
newEnumColumns := []string{}
// default start watching from first enumCase
startEnumID := 0
defer func() {
for _, column := range newEnumColumns {
err := m.watchEnumCases(tableName, column, startEnumID)
if err != nil {
utils.GetLogger().With(
"error", err.Error(),
"table", tableName,
"column", column).
Panic("Failed to watch enum dict events")
}
}
}()
m.Lock()
tableSchema, tableExist := m.TableSchemas[tableName]
// new table
if !tableExist {
tableSchema = NewTableSchema(newTable)
for columnID, column := range newTable.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
tableSchema.SetDefaultValue(columnID)
}
m.TableSchemas[newTable.Name] = tableSchema
m.Unlock()
return
}
m.Unlock()
var columnsToDelete []int
tableSchema.Lock()
oldColumns := tableSchema.Schema.Columns
tableSchema.SetTable(newTable)
for columnID, column := range newTable.Columns {
tableSchema.SetDefaultValue(columnID)
if column.Deleted {
if columnID < len(oldColumns) && !oldColumns[columnID].Deleted { // new deletions only
delete(tableSchema.EnumDicts, column.Name)
columnsToDelete = append(columnsToDelete, columnID)
}
} else {
if column.IsEnumColumn() {
_, exist := tableSchema.EnumDicts[column.Name]
if !exist {
var enumCases []string
if column.DefaultValue != nil |
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
var oldPreloadingDays int
newPreloadingDays := column.Config.PreloadingDays
// preloading will be triggered if
// 1. this is a new column and PreloadingDays > 0
// 2. this is a old column and PreloadingDays > oldPreloadingDays
if columnID < len(oldColumns) {
oldPreloadingDays = oldColumns[columnID].Config.PreloadingDays
}
m.HostMemManager.TriggerPreload(tableName, columnID, oldPreloadingDays, newPreloadingDays)
}
}
tableSchema.Unlock()
for _, columnID := range columnsToDelete {
var shards []*TableShard
m.RLock()
for _, shard := range m.TableShards[tableName] {
shard.Users.Add(1)
shards = append(shards, shard)
}
m.RUnlock()
for _, shard := range shards {
// May block for extended amount of time during archiving
shard.DeleteColumn(columnID)
shard.Users.Done()
}
}
}
// handleEnumDictChange handles enum dict change event from metaStore for specific table and column.
func (m *memStoreImpl) handleEnumDictChange(tableName, columnName string, enumDictChangeEvents <-chan string, done chan<- struct{}) {
for newEnumCase := range enumDictChangeEvents {
m.applyEnumCase(tableName, columnName, newEnumCase)
}
close(done)
}
func (m *memStoreImpl) applyEnumCase(tableName, columnName string, newEnumCase string) {
m.RLock()
tableSchema, tableExist := m.TableSchemas[tableName]
if !tableExist {
m.RUnlock()
return
}
tableSchema.Lock()
m.RUnlock()
enumDict, columnExist := tableSchema.EnumDicts[columnName]
if !columnExist {
tableSchema.Unlock()
return
}
enumDict.Dict[newEnumCase] = len(enumDict.ReverseDict)
enumDict.ReverseDict = append(enumDict.ReverseDict, newEnumCase)
tableSchema.EnumDicts[columnName] = enumDict
tableSchema.Unlock()
}
| {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
} | conditional_block |
schema.go | // Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memstore
import (
"encoding/json"
"sync"
"unsafe"
memCom "github.com/uber/aresdb/memstore/common"
"github.com/uber/aresdb/metastore"
metaCom "github.com/uber/aresdb/metastore/common"
"github.com/uber/aresdb/utils"
)
// TableSchema stores metadata of the table such as columns and primary keys.
// It also stores the dictionaries for enum columns.
type TableSchema struct {
sync.RWMutex `json:"-"`
// Main schema of the table. Mutable.
Schema metaCom.Table `json:"schema"`
// Maps from column names to their IDs. Mutable.
ColumnIDs map[string]int `json:"columnIDs"`
// Maps from enum column names to their case dictionaries. Mutable.
EnumDicts map[string]EnumDict `json:"enumDicts"`
// DataType for each column ordered by column ID. Mutable.
ValueTypeByColumn []memCom.DataType `json:"valueTypeByColumn"`
// Number of bytes in the primary key. Immutable.
PrimaryKeyBytes int `json:"primaryKeyBytes"`
// Types of each primary key column. Immutable.
PrimaryKeyColumnTypes []memCom.DataType `json:"primaryKeyColumnTypes"`
// Default values of each column. Mutable. Nil means default value is not set.
DefaultValues []*memCom.DataValue `json:"-"`
}
// EnumDict contains mapping from and to enum strings to numbers.
type EnumDict struct {
// Either 0x100 for small_enum, or 0x10000 for big_enum.
Capacity int `json:"capacity"`
Dict map[string]int `json:"dict"`
ReverseDict []string `json:"reverseDict"`
}
// NewTableSchema creates a new table schema object from metaStore table object,
// this does not set enum cases.
func | (table *metaCom.Table) *TableSchema {
tableSchema := &TableSchema{
Schema: *table,
ColumnIDs: make(map[string]int),
EnumDicts: make(map[string]EnumDict),
ValueTypeByColumn: make([]memCom.DataType, len(table.Columns)),
PrimaryKeyColumnTypes: make([]memCom.DataType, len(table.PrimaryKeyColumns)),
DefaultValues: make([]*memCom.DataValue, len(table.Columns)),
}
for id, column := range table.Columns {
if !column.Deleted {
tableSchema.ColumnIDs[column.Name] = id
}
tableSchema.ValueTypeByColumn[id] = memCom.DataTypeForColumn(column)
}
for i, columnID := range table.PrimaryKeyColumns {
columnType := tableSchema.ValueTypeByColumn[columnID]
tableSchema.PrimaryKeyColumnTypes[i] = columnType
dataBits := memCom.DataTypeBits(columnType)
if dataBits < 8 {
dataBits = 8
}
tableSchema.PrimaryKeyBytes += dataBits / 8
}
return tableSchema
}
// MarshalJSON marshals TableSchema into json.
func (t *TableSchema) MarshalJSON() ([]byte, error) {
// Avoid loop json.Marshal calls.
type alias TableSchema
t.RLock()
defer t.RUnlock()
return json.Marshal((*alias)(t))
}
// SetTable sets a updated table and update TableSchema,
// should acquire lock before calling.
func (t *TableSchema) SetTable(table *metaCom.Table) {
t.Schema = *table
for id, column := range table.Columns {
if !column.Deleted {
t.ColumnIDs[column.Name] = id
} else {
delete(t.ColumnIDs, column.Name)
}
if id >= len(t.ValueTypeByColumn) {
t.ValueTypeByColumn = append(t.ValueTypeByColumn, memCom.DataTypeForColumn(column))
}
if id >= len(t.DefaultValues) {
t.DefaultValues = append(t.DefaultValues, nil)
}
}
}
// SetDefaultValue parses the default value string if present and sets to TableSchema.
// Schema lock should be acquired and release by caller and enum dict should already be
// created/update before this function.
func (t *TableSchema) SetDefaultValue(columnID int) {
// Default values are already set.
if t.DefaultValues[columnID] != nil {
return
}
column := t.Schema.Columns[columnID]
defStrVal := column.DefaultValue
if defStrVal == nil || column.Deleted {
t.DefaultValues[columnID] = &memCom.NullDataValue
return
}
dataType := t.ValueTypeByColumn[columnID]
dataTypeName := memCom.DataTypeName[dataType]
val := memCom.DataValue{
Valid: true,
DataType: dataType,
}
if dataType == memCom.SmallEnum || dataType == memCom.BigEnum {
enumDict, ok := t.EnumDicts[column.Name]
if !ok {
// Should no happen since the enum dict should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find EnumDict for column")
}
enumVal, ok := enumDict.Dict[*defStrVal]
if !ok {
// Should no happen since the enum value should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find enum value for column")
}
if dataType == memCom.SmallEnum {
enumValUint8 := uint8(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint8)
} else {
enumValUint16 := uint16(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint16)
}
} else {
dataValue, err := memCom.ValueFromString(*defStrVal, dataType)
if err != nil {
// Should not happen since the string value is already validated by schema handler.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot parse default value")
}
if dataType == memCom.Bool {
val.IsBool = true
val.BoolVal = dataValue.BoolVal
} else {
val.OtherVal = dataValue.OtherVal
}
}
val.CmpFunc = memCom.GetCompareFunc(dataType)
t.DefaultValues[columnID] = &val
return
}
// createEnumDict creates the enum dictionary for the specified column with the
// specified initial cases, and attaches it to TableSchema object.
// Caller should acquire the schema lock before calling this function.
func (t *TableSchema) createEnumDict(columnName string, enumCases []string) {
columnID := t.ColumnIDs[columnName]
dataType := t.ValueTypeByColumn[columnID]
enumCapacity := 1 << uint(memCom.DataTypeBits(dataType))
enumDict := map[string]int{}
for id, enumCase := range enumCases {
enumDict[enumCase] = id
}
t.EnumDicts[columnName] = EnumDict{
Capacity: enumCapacity,
Dict: enumDict,
ReverseDict: enumCases,
}
}
// GetValueTypeByColumn makes a copy of the ValueTypeByColumn so callers don't have to hold a read
// lock to access it.
func (t *TableSchema) GetValueTypeByColumn() []memCom.DataType {
t.RLock()
defer t.RUnlock()
return t.ValueTypeByColumn
}
// GetPrimaryKeyColumns makes a copy of the Schema.PrimaryKeyColumns so callers don't have to hold
// a read lock to access it.
func (t *TableSchema) GetPrimaryKeyColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.PrimaryKeyColumns
}
// GetColumnDeletions returns a boolean slice that indicates whether a column has been deleted. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnDeletions() []bool {
deletedByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
deletedByColumn[columnID] = column.Deleted
}
return deletedByColumn
}
// GetColumnIfNonNilDefault returns a boolean slice that indicates whether a column has non nil default value. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnIfNonNilDefault() []bool {
nonNilDefaultByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
nonNilDefaultByColumn[columnID] = column.DefaultValue != nil
}
return nonNilDefaultByColumn
}
// GetArchivingSortColumns makes a copy of the Schema.ArchivingSortColumns so
// callers don't have to hold a read lock to access it.
func (t *TableSchema) GetArchivingSortColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.ArchivingSortColumns
}
// FetchSchema fetches schema from metaStore and updates in-memory copy of table schema,
// and set up watch channels for metaStore schema changes, used for bootstrapping mem store.
func (m *memStoreImpl) FetchSchema() error {
tables, err := m.metaStore.ListTables()
if err != nil {
return utils.StackError(err, "Failed to list tables from meta")
}
for _, tableName := range tables {
err := m.fetchTable(tableName)
if err != nil {
return err
}
}
// watch table addition/modification
tableSchemaChangeEvents, done, err := m.metaStore.WatchTableSchemaEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableSchemaChange(tableSchemaChangeEvents, done)
// watch table deletion
tableListChangeEvents, done, err := m.metaStore.WatchTableListEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableListChange(tableListChangeEvents, done)
// watch enum cases appending
m.RLock()
for _, tableSchema := range m.TableSchemas {
for columnName, enumCases := range tableSchema.EnumDicts {
err := m.watchEnumCases(tableSchema.Schema.Name, columnName, len(enumCases.ReverseDict))
if err != nil {
return err
}
}
}
m.RUnlock()
return nil
}
func (m *memStoreImpl) fetchTable(tableName string) error {
table, err := m.metaStore.GetTable(tableName)
if err != nil {
if err != metastore.ErrTableDoesNotExist {
return utils.StackError(err, "Failed to get table schema for table %s from meta", tableName)
}
} else {
tableSchema := NewTableSchema(table)
for columnID, column := range table.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
enumCases, err := m.metaStore.GetEnumDict(tableName, column.Name)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to fetch enum cases for table: %s, column: %s", tableName, column.Name)
}
} else {
tableSchema.createEnumDict(column.Name, enumCases)
}
}
}
tableSchema.SetDefaultValue(columnID)
}
m.Lock()
m.TableSchemas[tableName] = tableSchema
m.Unlock()
}
return nil
}
// watch enumCases will setup watch channels for each enum column.
func (m *memStoreImpl) watchEnumCases(tableName, columnName string, startCase int) error {
enumDictChangeEvents, done, err := m.metaStore.WatchEnumDictEvents(tableName, columnName, startCase)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to watch enum case events")
}
} else {
go m.handleEnumDictChange(tableName, columnName, enumDictChangeEvents, done)
}
return nil
}
// handleTableListChange handles table deletion events from metaStore.
func (m *memStoreImpl) handleTableListChange(tableListChangeEvents <-chan []string, done chan<- struct{}) {
for newTableList := range tableListChangeEvents {
m.applyTableList(newTableList)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableList(newTableList []string) {
m.Lock()
for tableName, tableSchema := range m.TableSchemas {
if utils.IndexOfStr(newTableList, tableName) < 0 {
// detach shards and schema from map
// to prevent new usage
tableShards := m.TableShards[tableName]
delete(m.TableSchemas, tableName)
delete(m.TableShards, tableName)
// only one table deletion at a time
m.Unlock()
for shardID, shard := range tableShards {
shard.Destruct()
m.diskStore.DeleteTableShard(tableName, shardID)
}
m.scheduler.DeleteTable(tableName, tableSchema.Schema.IsFactTable)
return
}
}
m.Unlock()
}
// handleTableSchemaChange handles table schema change event from metaStore including new table schema.
func (m *memStoreImpl) handleTableSchemaChange(tableSchemaChangeEvents <-chan *metaCom.Table, done chan<- struct{}) {
for table := range tableSchemaChangeEvents {
m.applyTableSchema(table)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableSchema(newTable *metaCom.Table) {
tableName := newTable.Name
newEnumColumns := []string{}
// default start watching from first enumCase
startEnumID := 0
defer func() {
for _, column := range newEnumColumns {
err := m.watchEnumCases(tableName, column, startEnumID)
if err != nil {
utils.GetLogger().With(
"error", err.Error(),
"table", tableName,
"column", column).
Panic("Failed to watch enum dict events")
}
}
}()
m.Lock()
tableSchema, tableExist := m.TableSchemas[tableName]
// new table
if !tableExist {
tableSchema = NewTableSchema(newTable)
for columnID, column := range newTable.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
tableSchema.SetDefaultValue(columnID)
}
m.TableSchemas[newTable.Name] = tableSchema
m.Unlock()
return
}
m.Unlock()
var columnsToDelete []int
tableSchema.Lock()
oldColumns := tableSchema.Schema.Columns
tableSchema.SetTable(newTable)
for columnID, column := range newTable.Columns {
tableSchema.SetDefaultValue(columnID)
if column.Deleted {
if columnID < len(oldColumns) && !oldColumns[columnID].Deleted { // new deletions only
delete(tableSchema.EnumDicts, column.Name)
columnsToDelete = append(columnsToDelete, columnID)
}
} else {
if column.IsEnumColumn() {
_, exist := tableSchema.EnumDicts[column.Name]
if !exist {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
var oldPreloadingDays int
newPreloadingDays := column.Config.PreloadingDays
// preloading will be triggered if
// 1. this is a new column and PreloadingDays > 0
// 2. this is a old column and PreloadingDays > oldPreloadingDays
if columnID < len(oldColumns) {
oldPreloadingDays = oldColumns[columnID].Config.PreloadingDays
}
m.HostMemManager.TriggerPreload(tableName, columnID, oldPreloadingDays, newPreloadingDays)
}
}
tableSchema.Unlock()
for _, columnID := range columnsToDelete {
var shards []*TableShard
m.RLock()
for _, shard := range m.TableShards[tableName] {
shard.Users.Add(1)
shards = append(shards, shard)
}
m.RUnlock()
for _, shard := range shards {
// May block for extended amount of time during archiving
shard.DeleteColumn(columnID)
shard.Users.Done()
}
}
}
// handleEnumDictChange handles enum dict change event from metaStore for specific table and column.
func (m *memStoreImpl) handleEnumDictChange(tableName, columnName string, enumDictChangeEvents <-chan string, done chan<- struct{}) {
for newEnumCase := range enumDictChangeEvents {
m.applyEnumCase(tableName, columnName, newEnumCase)
}
close(done)
}
func (m *memStoreImpl) applyEnumCase(tableName, columnName string, newEnumCase string) {
m.RLock()
tableSchema, tableExist := m.TableSchemas[tableName]
if !tableExist {
m.RUnlock()
return
}
tableSchema.Lock()
m.RUnlock()
enumDict, columnExist := tableSchema.EnumDicts[columnName]
if !columnExist {
tableSchema.Unlock()
return
}
enumDict.Dict[newEnumCase] = len(enumDict.ReverseDict)
enumDict.ReverseDict = append(enumDict.ReverseDict, newEnumCase)
tableSchema.EnumDicts[columnName] = enumDict
tableSchema.Unlock()
}
| NewTableSchema | identifier_name |
schema.go | // Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memstore
import (
"encoding/json"
"sync"
"unsafe"
memCom "github.com/uber/aresdb/memstore/common"
"github.com/uber/aresdb/metastore"
metaCom "github.com/uber/aresdb/metastore/common"
"github.com/uber/aresdb/utils"
)
// TableSchema stores metadata of the table such as columns and primary keys.
// It also stores the dictionaries for enum columns.
type TableSchema struct {
sync.RWMutex `json:"-"`
// Main schema of the table. Mutable.
Schema metaCom.Table `json:"schema"`
// Maps from column names to their IDs. Mutable.
ColumnIDs map[string]int `json:"columnIDs"`
// Maps from enum column names to their case dictionaries. Mutable.
EnumDicts map[string]EnumDict `json:"enumDicts"`
// DataType for each column ordered by column ID. Mutable.
ValueTypeByColumn []memCom.DataType `json:"valueTypeByColumn"`
// Number of bytes in the primary key. Immutable.
PrimaryKeyBytes int `json:"primaryKeyBytes"`
// Types of each primary key column. Immutable.
PrimaryKeyColumnTypes []memCom.DataType `json:"primaryKeyColumnTypes"`
// Default values of each column. Mutable. Nil means default value is not set.
DefaultValues []*memCom.DataValue `json:"-"`
}
// EnumDict contains mapping from and to enum strings to numbers.
type EnumDict struct {
// Either 0x100 for small_enum, or 0x10000 for big_enum.
Capacity int `json:"capacity"`
Dict map[string]int `json:"dict"`
ReverseDict []string `json:"reverseDict"`
}
// NewTableSchema creates a new table schema object from metaStore table object,
// this does not set enum cases.
func NewTableSchema(table *metaCom.Table) *TableSchema {
tableSchema := &TableSchema{
Schema: *table,
ColumnIDs: make(map[string]int),
EnumDicts: make(map[string]EnumDict),
ValueTypeByColumn: make([]memCom.DataType, len(table.Columns)),
PrimaryKeyColumnTypes: make([]memCom.DataType, len(table.PrimaryKeyColumns)),
DefaultValues: make([]*memCom.DataValue, len(table.Columns)),
}
for id, column := range table.Columns {
if !column.Deleted {
tableSchema.ColumnIDs[column.Name] = id
}
tableSchema.ValueTypeByColumn[id] = memCom.DataTypeForColumn(column)
}
for i, columnID := range table.PrimaryKeyColumns {
columnType := tableSchema.ValueTypeByColumn[columnID]
tableSchema.PrimaryKeyColumnTypes[i] = columnType
dataBits := memCom.DataTypeBits(columnType)
if dataBits < 8 {
dataBits = 8
}
tableSchema.PrimaryKeyBytes += dataBits / 8
}
return tableSchema
}
// MarshalJSON marshals TableSchema into json.
func (t *TableSchema) MarshalJSON() ([]byte, error) {
// Avoid loop json.Marshal calls.
type alias TableSchema
t.RLock()
defer t.RUnlock()
return json.Marshal((*alias)(t))
}
// SetTable sets a updated table and update TableSchema,
// should acquire lock before calling.
func (t *TableSchema) SetTable(table *metaCom.Table) {
t.Schema = *table
for id, column := range table.Columns {
if !column.Deleted {
t.ColumnIDs[column.Name] = id
} else {
delete(t.ColumnIDs, column.Name)
}
if id >= len(t.ValueTypeByColumn) {
t.ValueTypeByColumn = append(t.ValueTypeByColumn, memCom.DataTypeForColumn(column))
}
if id >= len(t.DefaultValues) {
t.DefaultValues = append(t.DefaultValues, nil)
}
}
}
// SetDefaultValue parses the default value string if present and sets to TableSchema.
// Schema lock should be acquired and release by caller and enum dict should already be
// created/update before this function.
func (t *TableSchema) SetDefaultValue(columnID int) {
// Default values are already set.
if t.DefaultValues[columnID] != nil {
return
}
column := t.Schema.Columns[columnID]
defStrVal := column.DefaultValue
if defStrVal == nil || column.Deleted {
t.DefaultValues[columnID] = &memCom.NullDataValue
return
}
dataType := t.ValueTypeByColumn[columnID]
dataTypeName := memCom.DataTypeName[dataType]
val := memCom.DataValue{
Valid: true,
DataType: dataType,
}
if dataType == memCom.SmallEnum || dataType == memCom.BigEnum {
enumDict, ok := t.EnumDicts[column.Name]
if !ok {
// Should no happen since the enum dict should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find EnumDict for column")
}
enumVal, ok := enumDict.Dict[*defStrVal]
if !ok {
// Should no happen since the enum value should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find enum value for column")
}
if dataType == memCom.SmallEnum {
enumValUint8 := uint8(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint8)
} else {
enumValUint16 := uint16(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint16)
}
} else {
dataValue, err := memCom.ValueFromString(*defStrVal, dataType)
if err != nil {
// Should not happen since the string value is already validated by schema handler.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot parse default value")
}
if dataType == memCom.Bool {
val.IsBool = true
val.BoolVal = dataValue.BoolVal
} else {
val.OtherVal = dataValue.OtherVal
}
}
val.CmpFunc = memCom.GetCompareFunc(dataType)
t.DefaultValues[columnID] = &val
return
}
// createEnumDict creates the enum dictionary for the specified column with the
// specified initial cases, and attaches it to TableSchema object.
// Caller should acquire the schema lock before calling this function.
func (t *TableSchema) createEnumDict(columnName string, enumCases []string) {
columnID := t.ColumnIDs[columnName]
dataType := t.ValueTypeByColumn[columnID]
enumCapacity := 1 << uint(memCom.DataTypeBits(dataType))
enumDict := map[string]int{}
for id, enumCase := range enumCases {
enumDict[enumCase] = id
}
t.EnumDicts[columnName] = EnumDict{
Capacity: enumCapacity,
Dict: enumDict,
ReverseDict: enumCases,
}
}
// GetValueTypeByColumn makes a copy of the ValueTypeByColumn so callers don't have to hold a read
// lock to access it.
func (t *TableSchema) GetValueTypeByColumn() []memCom.DataType {
t.RLock()
defer t.RUnlock()
return t.ValueTypeByColumn
}
// GetPrimaryKeyColumns makes a copy of the Schema.PrimaryKeyColumns so callers don't have to hold
// a read lock to access it.
func (t *TableSchema) GetPrimaryKeyColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.PrimaryKeyColumns
}
// GetColumnDeletions returns a boolean slice that indicates whether a column has been deleted. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnDeletions() []bool {
deletedByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
deletedByColumn[columnID] = column.Deleted
}
return deletedByColumn
}
// GetColumnIfNonNilDefault returns a boolean slice that indicates whether a column has non nil default value. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnIfNonNilDefault() []bool |
// GetArchivingSortColumns makes a copy of the Schema.ArchivingSortColumns so
// callers don't have to hold a read lock to access it.
func (t *TableSchema) GetArchivingSortColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.ArchivingSortColumns
}
// FetchSchema fetches schema from metaStore and updates in-memory copy of table schema,
// and set up watch channels for metaStore schema changes, used for bootstrapping mem store.
func (m *memStoreImpl) FetchSchema() error {
tables, err := m.metaStore.ListTables()
if err != nil {
return utils.StackError(err, "Failed to list tables from meta")
}
for _, tableName := range tables {
err := m.fetchTable(tableName)
if err != nil {
return err
}
}
// watch table addition/modification
tableSchemaChangeEvents, done, err := m.metaStore.WatchTableSchemaEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableSchemaChange(tableSchemaChangeEvents, done)
// watch table deletion
tableListChangeEvents, done, err := m.metaStore.WatchTableListEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableListChange(tableListChangeEvents, done)
// watch enum cases appending
m.RLock()
for _, tableSchema := range m.TableSchemas {
for columnName, enumCases := range tableSchema.EnumDicts {
err := m.watchEnumCases(tableSchema.Schema.Name, columnName, len(enumCases.ReverseDict))
if err != nil {
return err
}
}
}
m.RUnlock()
return nil
}
func (m *memStoreImpl) fetchTable(tableName string) error {
table, err := m.metaStore.GetTable(tableName)
if err != nil {
if err != metastore.ErrTableDoesNotExist {
return utils.StackError(err, "Failed to get table schema for table %s from meta", tableName)
}
} else {
tableSchema := NewTableSchema(table)
for columnID, column := range table.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
enumCases, err := m.metaStore.GetEnumDict(tableName, column.Name)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to fetch enum cases for table: %s, column: %s", tableName, column.Name)
}
} else {
tableSchema.createEnumDict(column.Name, enumCases)
}
}
}
tableSchema.SetDefaultValue(columnID)
}
m.Lock()
m.TableSchemas[tableName] = tableSchema
m.Unlock()
}
return nil
}
// watch enumCases will setup watch channels for each enum column.
func (m *memStoreImpl) watchEnumCases(tableName, columnName string, startCase int) error {
enumDictChangeEvents, done, err := m.metaStore.WatchEnumDictEvents(tableName, columnName, startCase)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to watch enum case events")
}
} else {
go m.handleEnumDictChange(tableName, columnName, enumDictChangeEvents, done)
}
return nil
}
// handleTableListChange handles table deletion events from metaStore.
func (m *memStoreImpl) handleTableListChange(tableListChangeEvents <-chan []string, done chan<- struct{}) {
for newTableList := range tableListChangeEvents {
m.applyTableList(newTableList)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableList(newTableList []string) {
m.Lock()
for tableName, tableSchema := range m.TableSchemas {
if utils.IndexOfStr(newTableList, tableName) < 0 {
// detach shards and schema from map
// to prevent new usage
tableShards := m.TableShards[tableName]
delete(m.TableSchemas, tableName)
delete(m.TableShards, tableName)
// only one table deletion at a time
m.Unlock()
for shardID, shard := range tableShards {
shard.Destruct()
m.diskStore.DeleteTableShard(tableName, shardID)
}
m.scheduler.DeleteTable(tableName, tableSchema.Schema.IsFactTable)
return
}
}
m.Unlock()
}
// handleTableSchemaChange handles table schema change event from metaStore including new table schema.
func (m *memStoreImpl) handleTableSchemaChange(tableSchemaChangeEvents <-chan *metaCom.Table, done chan<- struct{}) {
for table := range tableSchemaChangeEvents {
m.applyTableSchema(table)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableSchema(newTable *metaCom.Table) {
tableName := newTable.Name
newEnumColumns := []string{}
// default start watching from first enumCase
startEnumID := 0
defer func() {
for _, column := range newEnumColumns {
err := m.watchEnumCases(tableName, column, startEnumID)
if err != nil {
utils.GetLogger().With(
"error", err.Error(),
"table", tableName,
"column", column).
Panic("Failed to watch enum dict events")
}
}
}()
m.Lock()
tableSchema, tableExist := m.TableSchemas[tableName]
// new table
if !tableExist {
tableSchema = NewTableSchema(newTable)
for columnID, column := range newTable.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
tableSchema.SetDefaultValue(columnID)
}
m.TableSchemas[newTable.Name] = tableSchema
m.Unlock()
return
}
m.Unlock()
var columnsToDelete []int
tableSchema.Lock()
oldColumns := tableSchema.Schema.Columns
tableSchema.SetTable(newTable)
for columnID, column := range newTable.Columns {
tableSchema.SetDefaultValue(columnID)
if column.Deleted {
if columnID < len(oldColumns) && !oldColumns[columnID].Deleted { // new deletions only
delete(tableSchema.EnumDicts, column.Name)
columnsToDelete = append(columnsToDelete, columnID)
}
} else {
if column.IsEnumColumn() {
_, exist := tableSchema.EnumDicts[column.Name]
if !exist {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
var oldPreloadingDays int
newPreloadingDays := column.Config.PreloadingDays
// preloading will be triggered if
// 1. this is a new column and PreloadingDays > 0
// 2. this is a old column and PreloadingDays > oldPreloadingDays
if columnID < len(oldColumns) {
oldPreloadingDays = oldColumns[columnID].Config.PreloadingDays
}
m.HostMemManager.TriggerPreload(tableName, columnID, oldPreloadingDays, newPreloadingDays)
}
}
tableSchema.Unlock()
for _, columnID := range columnsToDelete {
var shards []*TableShard
m.RLock()
for _, shard := range m.TableShards[tableName] {
shard.Users.Add(1)
shards = append(shards, shard)
}
m.RUnlock()
for _, shard := range shards {
// May block for extended amount of time during archiving
shard.DeleteColumn(columnID)
shard.Users.Done()
}
}
}
// handleEnumDictChange handles enum dict change event from metaStore for specific table and column.
func (m *memStoreImpl) handleEnumDictChange(tableName, columnName string, enumDictChangeEvents <-chan string, done chan<- struct{}) {
for newEnumCase := range enumDictChangeEvents {
m.applyEnumCase(tableName, columnName, newEnumCase)
}
close(done)
}
func (m *memStoreImpl) applyEnumCase(tableName, columnName string, newEnumCase string) {
m.RLock()
tableSchema, tableExist := m.TableSchemas[tableName]
if !tableExist {
m.RUnlock()
return
}
tableSchema.Lock()
m.RUnlock()
enumDict, columnExist := tableSchema.EnumDicts[columnName]
if !columnExist {
tableSchema.Unlock()
return
}
enumDict.Dict[newEnumCase] = len(enumDict.ReverseDict)
enumDict.ReverseDict = append(enumDict.ReverseDict, newEnumCase)
tableSchema.EnumDicts[columnName] = enumDict
tableSchema.Unlock()
}
| {
nonNilDefaultByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
nonNilDefaultByColumn[columnID] = column.DefaultValue != nil
}
return nonNilDefaultByColumn
} | identifier_body |
util.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common utilities and functions. Based on struct2depth"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import logging
import os
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import cv2
gfile = tf.gfile
CMAP_DEFAULT = 'plasma'
# Defines the cropping that is applied to the Cityscapes dataset with respect to
# the original raw input resolution.
CITYSCAPES_CROP = [256, 768, 192, 1856]
def crop_cityscapes(im, resize=None):
ymin, ymax, xmin, xmax = CITYSCAPES_CROP
im = im[ymin:ymax, xmin:xmax]
if resize is not None:
im = cv2.resize(im, resize)
return im
def gray2rgb(im, cmap=CMAP_DEFAULT):
cmap = plt.get_cmap(cmap)
result_img = cmap(im.astype(np.float32))
if result_img.shape[2] > 3:
result_img = np.delete(result_img, 3, 2)
return result_img
def load_image(img_file, resize=None, interpolation='linear', seg_image=False, transpose=False):
"""Load image from disk. Output value range: [0,1]."""
# im_data = np.fromstring(gfile.Open(img_file).read(), np.uint8) # original
# Ref: https://stackoverflow.com/questions/42339876/error-unicodedecodeerror-utf-8
# -codec-cant-decode-byte-0xff-in-position-0-in/48556203
im_data = np.fromstring(gfile.Open(img_file, 'rb').read(), np.uint8)
im = cv2.imdecode(im_data, cv2.IMREAD_COLOR)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if transpose:
im = np.transpose(im, (1, 0, 2))
if resize and resize != im.shape[:2]:
ip = cv2.INTER_LINEAR if interpolation == 'linear' else cv2.INTER_NEAREST
im = cv2.resize(im, resize, interpolation=ip)
if seg_image:
# For segmented image, load as uint8
return np.array(im, dtype=np.uint8)
else:
return np.array(im, dtype=np.float32) / 255.0
def save_image(img_file, im, file_extension):
"""Save image from disk. Expected input value range: [0,1]."""
im = (im * 255.0).astype(np.uint8)
with gfile.Open(img_file, 'w') as f:
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
_, im_data = cv2.imencode('.%s' % file_extension, im)
f.write(im_data.tostring())
def normalize_depth_for_display(depth, pc=95, crop_percent=0, normalizer=None,
cmap=CMAP_DEFAULT):
"""Converts a depth map to an RGB image."""
# Convert to disparity.
disp = 1.0 / (depth + 1e-6)
if normalizer is not None:
disp /= normalizer
else:
disp /= (np.percentile(disp, pc) + 1e-6)
disp = np.clip(disp, 0, 1)
disp = gray2rgb(disp, cmap=cmap)
keep_h = int(disp.shape[0] * (1 - crop_percent))
disp = disp[:keep_h]
return disp
def get_seq_start_end(target_index, seq_length, sample_every=1):
"""Returns absolute seq start and end indices for a given target frame."""
half_offset = int((seq_length - 1) / 2) * sample_every
end_index = target_index + half_offset
start_index = end_index - (seq_length - 1) * sample_every
return start_index, end_index
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Count the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for (_, v) in get_vars_to_save_and_restore().items():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_save_and_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or
'mu' in v.op.name or 'sigma' in v.op.name or
'global_scale_var' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
mapping = {}
if ckpt is not None:
ckpt_var = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]
not_loaded = list(ckpt_var_names)
for v in model_vars:
if v.op.name not in ckpt_var_names:
# For backward compatibility, try additional matching.
v_additional_name = v.op.name.replace('egomotion_prediction/', '')
if v_additional_name in ckpt_var_names:
# Check if shapes match.
ind = ckpt_var_names.index(v_additional_name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v_additional_name] = v
not_loaded.remove(v_additional_name)
continue
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
logging.warning('Did not find var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
else:
# Check if shapes match.
ind = ckpt_var_names.index(v.op.name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v.op.name] = v
not_loaded.remove(v.op.name)
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
if not_loaded:
logging.warning('The following variables in the checkpoint were not loaded:')
for varname_not_loaded in not_loaded:
logging.info('%s', varname_not_loaded)
else: # just get model vars.
for v in model_vars:
mapping[v.op.name] = v
return mapping
def get_imagenet_vars_to_restore(imagenet_ckpt):
"""Returns dict of variables to restore from ImageNet-checkpoint."""
vars_to_restore_imagenet = {}
ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
model_vars = tf.global_variables()
for v in model_vars:
if 'global_step' in v.op.name:
continue
mvname_noprefix = v.op.name.replace('depth_prediction/', '')
mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')
mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')
if mvname_noprefix in ckpt_var_names:
vars_to_restore_imagenet[mvname_noprefix] = v
else:
logging.info('The following variable will not be restored from '
'pretrained ImageNet-checkpoint: %s', mvname_noprefix)
return vars_to_restore_imagenet
def format_number(n):
"""Formats number with thousands commas."""
# locale.setlocale(locale.LC_ALL, 'en_US') # commented by me
# return locale.format('%d', n, grouping=True)
return n
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def read_text_lines(filepath):
with tf.gfile.Open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def save_flags(FLAGS, save_path):
import json
check_path(save_path)
save_path = os.path.join(save_path, 'flags.json')
with open(save_path, 'w') as f:
json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=False)
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def save_command(save_path):
check_path(save_path)
import sys
command = sys.argv
save_file = os.path.join(save_path, 'command.txt')
with open(save_file, 'w') as f:
f.write(' '.join(command))
def make_intrinsics_matrix(fx, fy, cx, cy):
r1 = np.stack([fx, 0, cx])
r2 = np.stack([0, fy, cy])
r3 = np.array([0., 0., 1.])
intrinsics = np.stack([r1, r2, r3])
return intrinsics
def get_multi_scale_intrinsics(intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2 ** s)
fy = intrinsics[1, 1] / (2 ** s)
cx = intrinsics[0, 2] / (2 ** s)
cy = intrinsics[1, 2] / (2 ** s)
intrinsics_multi_scale.append(make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = np.stack(intrinsics_multi_scale) # [num_scales, 3, 3]
return intrinsics_multi_scale
def pack_pred_depths(pred_dir, test_file):
|
# Depth evaluation utils
# Mostly based on the code written by Clement Godard:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
# EIGEN split
def read_file_data(files, data_root):
gt_files = []
gt_calib = []
im_sizes = []
im_files = []
cams = []
num_probs = 0
for filename in files:
filename = filename.split()[0]
splits = filename.split('/')
# camera_id = filename[-1] # 2 is left, 3 is right
date = splits[0]
im_id = splits[4][:10]
file_root = '{}/{}'
im = filename
vel = '{}/{}/velodyne_points/data/{}.bin'.format(splits[0], splits[1], im_id)
if os.path.isfile(data_root + im):
gt_files.append(data_root + vel)
gt_calib.append(data_root + date + '/')
im_sizes.append(cv2.imread(data_root + im).shape[:2])
im_files.append(data_root + im)
cams.append(2)
else:
num_probs += 1
print('{} missing'.format(data_root + im))
# print(num_probs, 'files missing')
return gt_files, gt_calib, im_sizes, im_files, cams
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def read_calib_file(path):
# taken from https://github.com/hunse/kitti
float_chars = set("0123456789.e+- ")
data = {}
with open(path, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
value = value.strip()
data[key] = value
if float_chars.issuperset(value):
# try to cast to float array
try:
data[key] = np.array(list(map(float, value.split(' '))))
except ValueError:
# casting error: data[key] already eq. value, so pass
pass
return data
def get_focal_length_baseline(calib_dir, cam=2):
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
P2_rect = cam2cam['P_rect_02'].reshape(3, 4)
P3_rect = cam2cam['P_rect_03'].reshape(3, 4)
# cam 2 is left of camera 0 -6cm
# cam 3 is to the right +54cm
b2 = P2_rect[0, 3] / -P2_rect[0, 0]
b3 = P3_rect[0, 3] / -P3_rect[0, 0]
baseline = b3 - b2
if cam == 2:
focal_length = P2_rect[0, 0]
elif cam == 3:
focal_length = P3_rect[0, 0]
return focal_length, baseline
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n - 1) + colSub - 1
def generate_depth_map(calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False):
# load calibration files
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir + 'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
P_rect = cam2cam['P_rect_0' + str(cam)].reshape(3, 4)
P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
# load velodyne points and remove all behind image plane (approximation)
# each row of the velodyne data is forward, left, up, reflectance
velo = load_velodyne_points(velo_file_name)
velo = velo[velo[:, 0] >= 0, :]
# project the points to the camera
velo_pts_im = np.dot(P_velo2im, velo.T).T
velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis]
if vel_depth:
velo_pts_im[:, 2] = velo[:, 0]
# check if in bounds
# use minus 1 to get the exact same value as KITTI matlab code
velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1
velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1
val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0])
velo_pts_im = velo_pts_im[val_inds, :]
# project to image
depth = np.zeros(im_shape)
depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]
# find the duplicate points and choose the closest depth
inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
for dd in dupe_inds:
pts = np.where(inds == dd)[0]
x_loc = int(velo_pts_im[pts[0], 0])
y_loc = int(velo_pts_im[pts[0], 1])
depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
depth[depth < 0] = 0
return depth
| """Pack depth predictions as a single .npy file"""
test_images = read_text_lines(test_file)
save_name = 'pred_depth.npy'
output_file = os.path.join(pred_dir, save_name)
img_height = 128
img_width = 416
all_pred = np.zeros((len(test_images), img_height, img_width))
for i, img_path in enumerate(test_images):
npy_path = os.path.join(pred_dir, img_path.replace('png', 'npy'))
depth = np.load(npy_path)
all_pred[i] = np.squeeze(depth)
np.save(output_file, all_pred) | identifier_body |
util.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common utilities and functions. Based on struct2depth"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import logging
import os
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import cv2
gfile = tf.gfile
CMAP_DEFAULT = 'plasma'
# Defines the cropping that is applied to the Cityscapes dataset with respect to
# the original raw input resolution.
CITYSCAPES_CROP = [256, 768, 192, 1856]
def crop_cityscapes(im, resize=None):
ymin, ymax, xmin, xmax = CITYSCAPES_CROP
im = im[ymin:ymax, xmin:xmax]
if resize is not None:
im = cv2.resize(im, resize)
return im
def gray2rgb(im, cmap=CMAP_DEFAULT):
cmap = plt.get_cmap(cmap)
result_img = cmap(im.astype(np.float32))
if result_img.shape[2] > 3:
result_img = np.delete(result_img, 3, 2)
return result_img
def load_image(img_file, resize=None, interpolation='linear', seg_image=False, transpose=False):
"""Load image from disk. Output value range: [0,1]."""
# im_data = np.fromstring(gfile.Open(img_file).read(), np.uint8) # original
# Ref: https://stackoverflow.com/questions/42339876/error-unicodedecodeerror-utf-8
# -codec-cant-decode-byte-0xff-in-position-0-in/48556203
im_data = np.fromstring(gfile.Open(img_file, 'rb').read(), np.uint8)
im = cv2.imdecode(im_data, cv2.IMREAD_COLOR)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if transpose:
im = np.transpose(im, (1, 0, 2))
if resize and resize != im.shape[:2]:
ip = cv2.INTER_LINEAR if interpolation == 'linear' else cv2.INTER_NEAREST
im = cv2.resize(im, resize, interpolation=ip)
if seg_image:
# For segmented image, load as uint8
return np.array(im, dtype=np.uint8)
else:
return np.array(im, dtype=np.float32) / 255.0
def save_image(img_file, im, file_extension):
"""Save image from disk. Expected input value range: [0,1]."""
im = (im * 255.0).astype(np.uint8)
with gfile.Open(img_file, 'w') as f:
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
_, im_data = cv2.imencode('.%s' % file_extension, im)
f.write(im_data.tostring())
def normalize_depth_for_display(depth, pc=95, crop_percent=0, normalizer=None,
cmap=CMAP_DEFAULT):
"""Converts a depth map to an RGB image."""
# Convert to disparity.
disp = 1.0 / (depth + 1e-6)
if normalizer is not None:
disp /= normalizer
else:
disp /= (np.percentile(disp, pc) + 1e-6)
disp = np.clip(disp, 0, 1)
disp = gray2rgb(disp, cmap=cmap)
keep_h = int(disp.shape[0] * (1 - crop_percent))
disp = disp[:keep_h]
return disp
def get_seq_start_end(target_index, seq_length, sample_every=1):
"""Returns absolute seq start and end indices for a given target frame."""
half_offset = int((seq_length - 1) / 2) * sample_every
end_index = target_index + half_offset
start_index = end_index - (seq_length - 1) * sample_every
return start_index, end_index
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Count the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for (_, v) in get_vars_to_save_and_restore().items():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_save_and_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or
'mu' in v.op.name or 'sigma' in v.op.name or
'global_scale_var' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
mapping = {}
if ckpt is not None:
ckpt_var = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]
not_loaded = list(ckpt_var_names)
for v in model_vars:
if v.op.name not in ckpt_var_names:
# For backward compatibility, try additional matching.
v_additional_name = v.op.name.replace('egomotion_prediction/', '')
if v_additional_name in ckpt_var_names:
# Check if shapes match.
ind = ckpt_var_names.index(v_additional_name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v_additional_name] = v
not_loaded.remove(v_additional_name)
continue
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
logging.warning('Did not find var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
else:
# Check if shapes match.
ind = ckpt_var_names.index(v.op.name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v.op.name] = v
not_loaded.remove(v.op.name)
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
if not_loaded:
logging.warning('The following variables in the checkpoint were not loaded:')
for varname_not_loaded in not_loaded:
logging.info('%s', varname_not_loaded)
else: # just get model vars.
for v in model_vars:
mapping[v.op.name] = v
return mapping
def get_imagenet_vars_to_restore(imagenet_ckpt):
"""Returns dict of variables to restore from ImageNet-checkpoint."""
vars_to_restore_imagenet = {}
ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
model_vars = tf.global_variables()
for v in model_vars:
if 'global_step' in v.op.name:
continue
mvname_noprefix = v.op.name.replace('depth_prediction/', '')
mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')
mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')
if mvname_noprefix in ckpt_var_names:
vars_to_restore_imagenet[mvname_noprefix] = v
else:
logging.info('The following variable will not be restored from '
'pretrained ImageNet-checkpoint: %s', mvname_noprefix)
return vars_to_restore_imagenet
def format_number(n):
"""Formats number with thousands commas."""
# locale.setlocale(locale.LC_ALL, 'en_US') # commented by me
# return locale.format('%d', n, grouping=True)
return n
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def read_text_lines(filepath):
with tf.gfile.Open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def save_flags(FLAGS, save_path):
import json
check_path(save_path)
save_path = os.path.join(save_path, 'flags.json')
with open(save_path, 'w') as f:
json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=False)
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def save_command(save_path):
check_path(save_path)
import sys
command = sys.argv
save_file = os.path.join(save_path, 'command.txt')
with open(save_file, 'w') as f:
f.write(' '.join(command))
def make_intrinsics_matrix(fx, fy, cx, cy):
r1 = np.stack([fx, 0, cx])
r2 = np.stack([0, fy, cy])
r3 = np.array([0., 0., 1.])
intrinsics = np.stack([r1, r2, r3])
return intrinsics
def get_multi_scale_intrinsics(intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2 ** s)
fy = intrinsics[1, 1] / (2 ** s)
cx = intrinsics[0, 2] / (2 ** s)
cy = intrinsics[1, 2] / (2 ** s)
intrinsics_multi_scale.append(make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = np.stack(intrinsics_multi_scale) # [num_scales, 3, 3]
return intrinsics_multi_scale
def pack_pred_depths(pred_dir, test_file):
"""Pack depth predictions as a single .npy file"""
test_images = read_text_lines(test_file)
save_name = 'pred_depth.npy'
output_file = os.path.join(pred_dir, save_name)
img_height = 128
img_width = 416
all_pred = np.zeros((len(test_images), img_height, img_width))
for i, img_path in enumerate(test_images):
npy_path = os.path.join(pred_dir, img_path.replace('png', 'npy'))
depth = np.load(npy_path)
all_pred[i] = np.squeeze(depth)
np.save(output_file, all_pred)
# Depth evaluation utils
# Mostly based on the code written by Clement Godard:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
# EIGEN split
def read_file_data(files, data_root):
gt_files = []
gt_calib = []
im_sizes = []
im_files = []
cams = []
num_probs = 0
for filename in files:
filename = filename.split()[0]
splits = filename.split('/')
# camera_id = filename[-1] # 2 is left, 3 is right
date = splits[0]
im_id = splits[4][:10]
file_root = '{}/{}'
im = filename
vel = '{}/{}/velodyne_points/data/{}.bin'.format(splits[0], splits[1], im_id)
if os.path.isfile(data_root + im):
gt_files.append(data_root + vel)
gt_calib.append(data_root + date + '/')
im_sizes.append(cv2.imread(data_root + im).shape[:2])
im_files.append(data_root + im)
cams.append(2)
else:
num_probs += 1
print('{} missing'.format(data_root + im))
# print(num_probs, 'files missing')
return gt_files, gt_calib, im_sizes, im_files, cams
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def | (path):
# taken from https://github.com/hunse/kitti
float_chars = set("0123456789.e+- ")
data = {}
with open(path, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
value = value.strip()
data[key] = value
if float_chars.issuperset(value):
# try to cast to float array
try:
data[key] = np.array(list(map(float, value.split(' '))))
except ValueError:
# casting error: data[key] already eq. value, so pass
pass
return data
def get_focal_length_baseline(calib_dir, cam=2):
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
P2_rect = cam2cam['P_rect_02'].reshape(3, 4)
P3_rect = cam2cam['P_rect_03'].reshape(3, 4)
# cam 2 is left of camera 0 -6cm
# cam 3 is to the right +54cm
b2 = P2_rect[0, 3] / -P2_rect[0, 0]
b3 = P3_rect[0, 3] / -P3_rect[0, 0]
baseline = b3 - b2
if cam == 2:
focal_length = P2_rect[0, 0]
elif cam == 3:
focal_length = P3_rect[0, 0]
return focal_length, baseline
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n - 1) + colSub - 1
def generate_depth_map(calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False):
# load calibration files
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir + 'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
P_rect = cam2cam['P_rect_0' + str(cam)].reshape(3, 4)
P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
# load velodyne points and remove all behind image plane (approximation)
# each row of the velodyne data is forward, left, up, reflectance
velo = load_velodyne_points(velo_file_name)
velo = velo[velo[:, 0] >= 0, :]
# project the points to the camera
velo_pts_im = np.dot(P_velo2im, velo.T).T
velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis]
if vel_depth:
velo_pts_im[:, 2] = velo[:, 0]
# check if in bounds
# use minus 1 to get the exact same value as KITTI matlab code
velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1
velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1
val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0])
velo_pts_im = velo_pts_im[val_inds, :]
# project to image
depth = np.zeros(im_shape)
depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]
# find the duplicate points and choose the closest depth
inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
for dd in dupe_inds:
pts = np.where(inds == dd)[0]
x_loc = int(velo_pts_im[pts[0], 0])
y_loc = int(velo_pts_im[pts[0], 1])
depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
depth[depth < 0] = 0
return depth
| read_calib_file | identifier_name |
util.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common utilities and functions. Based on struct2depth"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import logging
import os
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import cv2
gfile = tf.gfile
CMAP_DEFAULT = 'plasma'
# Defines the cropping that is applied to the Cityscapes dataset with respect to
# the original raw input resolution.
CITYSCAPES_CROP = [256, 768, 192, 1856]
def crop_cityscapes(im, resize=None):
ymin, ymax, xmin, xmax = CITYSCAPES_CROP
im = im[ymin:ymax, xmin:xmax]
if resize is not None:
im = cv2.resize(im, resize)
return im
def gray2rgb(im, cmap=CMAP_DEFAULT):
cmap = plt.get_cmap(cmap)
result_img = cmap(im.astype(np.float32))
if result_img.shape[2] > 3:
result_img = np.delete(result_img, 3, 2)
return result_img
def load_image(img_file, resize=None, interpolation='linear', seg_image=False, transpose=False):
"""Load image from disk. Output value range: [0,1]."""
# im_data = np.fromstring(gfile.Open(img_file).read(), np.uint8) # original
# Ref: https://stackoverflow.com/questions/42339876/error-unicodedecodeerror-utf-8
# -codec-cant-decode-byte-0xff-in-position-0-in/48556203
im_data = np.fromstring(gfile.Open(img_file, 'rb').read(), np.uint8)
im = cv2.imdecode(im_data, cv2.IMREAD_COLOR)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if transpose:
im = np.transpose(im, (1, 0, 2))
if resize and resize != im.shape[:2]:
ip = cv2.INTER_LINEAR if interpolation == 'linear' else cv2.INTER_NEAREST
im = cv2.resize(im, resize, interpolation=ip)
if seg_image:
# For segmented image, load as uint8
return np.array(im, dtype=np.uint8)
else:
return np.array(im, dtype=np.float32) / 255.0
def save_image(img_file, im, file_extension):
"""Save image from disk. Expected input value range: [0,1]."""
im = (im * 255.0).astype(np.uint8)
with gfile.Open(img_file, 'w') as f:
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
_, im_data = cv2.imencode('.%s' % file_extension, im)
f.write(im_data.tostring())
def normalize_depth_for_display(depth, pc=95, crop_percent=0, normalizer=None,
cmap=CMAP_DEFAULT):
"""Converts a depth map to an RGB image."""
# Convert to disparity.
disp = 1.0 / (depth + 1e-6)
if normalizer is not None:
disp /= normalizer
else:
disp /= (np.percentile(disp, pc) + 1e-6)
disp = np.clip(disp, 0, 1)
disp = gray2rgb(disp, cmap=cmap)
keep_h = int(disp.shape[0] * (1 - crop_percent))
disp = disp[:keep_h]
return disp
def get_seq_start_end(target_index, seq_length, sample_every=1):
"""Returns absolute seq start and end indices for a given target frame."""
half_offset = int((seq_length - 1) / 2) * sample_every
end_index = target_index + half_offset
start_index = end_index - (seq_length - 1) * sample_every
return start_index, end_index
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
|
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Count the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for (_, v) in get_vars_to_save_and_restore().items():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_save_and_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or
'mu' in v.op.name or 'sigma' in v.op.name or
'global_scale_var' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
mapping = {}
if ckpt is not None:
ckpt_var = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]
not_loaded = list(ckpt_var_names)
for v in model_vars:
if v.op.name not in ckpt_var_names:
# For backward compatibility, try additional matching.
v_additional_name = v.op.name.replace('egomotion_prediction/', '')
if v_additional_name in ckpt_var_names:
# Check if shapes match.
ind = ckpt_var_names.index(v_additional_name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v_additional_name] = v
not_loaded.remove(v_additional_name)
continue
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
logging.warning('Did not find var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
else:
# Check if shapes match.
ind = ckpt_var_names.index(v.op.name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v.op.name] = v
not_loaded.remove(v.op.name)
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
if not_loaded:
logging.warning('The following variables in the checkpoint were not loaded:')
for varname_not_loaded in not_loaded:
logging.info('%s', varname_not_loaded)
else: # just get model vars.
for v in model_vars:
mapping[v.op.name] = v
return mapping
def get_imagenet_vars_to_restore(imagenet_ckpt):
"""Returns dict of variables to restore from ImageNet-checkpoint."""
vars_to_restore_imagenet = {}
ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
model_vars = tf.global_variables()
for v in model_vars:
if 'global_step' in v.op.name:
continue
mvname_noprefix = v.op.name.replace('depth_prediction/', '')
mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')
mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')
if mvname_noprefix in ckpt_var_names:
vars_to_restore_imagenet[mvname_noprefix] = v
else:
logging.info('The following variable will not be restored from '
'pretrained ImageNet-checkpoint: %s', mvname_noprefix)
return vars_to_restore_imagenet
def format_number(n):
"""Formats number with thousands commas."""
# locale.setlocale(locale.LC_ALL, 'en_US') # commented by me
# return locale.format('%d', n, grouping=True)
return n
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def read_text_lines(filepath):
with tf.gfile.Open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def save_flags(FLAGS, save_path):
import json
check_path(save_path)
save_path = os.path.join(save_path, 'flags.json')
with open(save_path, 'w') as f:
json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=False)
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def save_command(save_path):
check_path(save_path)
import sys
command = sys.argv
save_file = os.path.join(save_path, 'command.txt')
with open(save_file, 'w') as f:
f.write(' '.join(command))
def make_intrinsics_matrix(fx, fy, cx, cy):
r1 = np.stack([fx, 0, cx])
r2 = np.stack([0, fy, cy])
r3 = np.array([0., 0., 1.])
intrinsics = np.stack([r1, r2, r3])
return intrinsics
def get_multi_scale_intrinsics(intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2 ** s)
fy = intrinsics[1, 1] / (2 ** s)
cx = intrinsics[0, 2] / (2 ** s)
cy = intrinsics[1, 2] / (2 ** s)
intrinsics_multi_scale.append(make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = np.stack(intrinsics_multi_scale) # [num_scales, 3, 3]
return intrinsics_multi_scale
def pack_pred_depths(pred_dir, test_file):
"""Pack depth predictions as a single .npy file"""
test_images = read_text_lines(test_file)
save_name = 'pred_depth.npy'
output_file = os.path.join(pred_dir, save_name)
img_height = 128
img_width = 416
all_pred = np.zeros((len(test_images), img_height, img_width))
for i, img_path in enumerate(test_images):
npy_path = os.path.join(pred_dir, img_path.replace('png', 'npy'))
depth = np.load(npy_path)
all_pred[i] = np.squeeze(depth)
np.save(output_file, all_pred)
# Depth evaluation utils
# Mostly based on the code written by Clement Godard:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
# EIGEN split
def read_file_data(files, data_root):
gt_files = []
gt_calib = []
im_sizes = []
im_files = []
cams = []
num_probs = 0
for filename in files:
filename = filename.split()[0]
splits = filename.split('/')
# camera_id = filename[-1] # 2 is left, 3 is right
date = splits[0]
im_id = splits[4][:10]
file_root = '{}/{}'
im = filename
vel = '{}/{}/velodyne_points/data/{}.bin'.format(splits[0], splits[1], im_id)
if os.path.isfile(data_root + im):
gt_files.append(data_root + vel)
gt_calib.append(data_root + date + '/')
im_sizes.append(cv2.imread(data_root + im).shape[:2])
im_files.append(data_root + im)
cams.append(2)
else:
num_probs += 1
print('{} missing'.format(data_root + im))
# print(num_probs, 'files missing')
return gt_files, gt_calib, im_sizes, im_files, cams
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def read_calib_file(path):
# taken from https://github.com/hunse/kitti
float_chars = set("0123456789.e+- ")
data = {}
with open(path, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
value = value.strip()
data[key] = value
if float_chars.issuperset(value):
# try to cast to float array
try:
data[key] = np.array(list(map(float, value.split(' '))))
except ValueError:
# casting error: data[key] already eq. value, so pass
pass
return data
def get_focal_length_baseline(calib_dir, cam=2):
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
P2_rect = cam2cam['P_rect_02'].reshape(3, 4)
P3_rect = cam2cam['P_rect_03'].reshape(3, 4)
# cam 2 is left of camera 0 -6cm
# cam 3 is to the right +54cm
b2 = P2_rect[0, 3] / -P2_rect[0, 0]
b3 = P3_rect[0, 3] / -P3_rect[0, 0]
baseline = b3 - b2
if cam == 2:
focal_length = P2_rect[0, 0]
elif cam == 3:
focal_length = P3_rect[0, 0]
return focal_length, baseline
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n - 1) + colSub - 1
def generate_depth_map(calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False):
# load calibration files
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir + 'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
P_rect = cam2cam['P_rect_0' + str(cam)].reshape(3, 4)
P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
# load velodyne points and remove all behind image plane (approximation)
# each row of the velodyne data is forward, left, up, reflectance
velo = load_velodyne_points(velo_file_name)
velo = velo[velo[:, 0] >= 0, :]
# project the points to the camera
velo_pts_im = np.dot(P_velo2im, velo.T).T
velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis]
if vel_depth:
velo_pts_im[:, 2] = velo[:, 0]
# check if in bounds
# use minus 1 to get the exact same value as KITTI matlab code
velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1
velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1
val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0])
velo_pts_im = velo_pts_im[val_inds, :]
# project to image
depth = np.zeros(im_shape)
depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]
# find the duplicate points and choose the closest depth
inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
for dd in dupe_inds:
pts = np.where(inds == dd)[0]
x_loc = int(velo_pts_im[pts[0], 0])
y_loc = int(velo_pts_im[pts[0], 1])
depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
depth[depth < 0] = 0
return depth
| return 'List of %d... %s' % (len(obj), info(obj[0])) | conditional_block |
util.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common utilities and functions. Based on struct2depth"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import logging
import os
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import cv2
gfile = tf.gfile
CMAP_DEFAULT = 'plasma'
# Defines the cropping that is applied to the Cityscapes dataset with respect to
# the original raw input resolution.
CITYSCAPES_CROP = [256, 768, 192, 1856]
def crop_cityscapes(im, resize=None):
ymin, ymax, xmin, xmax = CITYSCAPES_CROP
im = im[ymin:ymax, xmin:xmax]
if resize is not None:
im = cv2.resize(im, resize)
return im
def gray2rgb(im, cmap=CMAP_DEFAULT):
cmap = plt.get_cmap(cmap)
result_img = cmap(im.astype(np.float32))
if result_img.shape[2] > 3:
result_img = np.delete(result_img, 3, 2)
return result_img
def load_image(img_file, resize=None, interpolation='linear', seg_image=False, transpose=False):
"""Load image from disk. Output value range: [0,1]."""
# im_data = np.fromstring(gfile.Open(img_file).read(), np.uint8) # original
# Ref: https://stackoverflow.com/questions/42339876/error-unicodedecodeerror-utf-8
# -codec-cant-decode-byte-0xff-in-position-0-in/48556203
im_data = np.fromstring(gfile.Open(img_file, 'rb').read(), np.uint8)
im = cv2.imdecode(im_data, cv2.IMREAD_COLOR)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if transpose:
im = np.transpose(im, (1, 0, 2))
if resize and resize != im.shape[:2]:
ip = cv2.INTER_LINEAR if interpolation == 'linear' else cv2.INTER_NEAREST
im = cv2.resize(im, resize, interpolation=ip)
if seg_image:
# For segmented image, load as uint8
return np.array(im, dtype=np.uint8)
else:
return np.array(im, dtype=np.float32) / 255.0
def save_image(img_file, im, file_extension):
"""Save image from disk. Expected input value range: [0,1]."""
im = (im * 255.0).astype(np.uint8)
with gfile.Open(img_file, 'w') as f:
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
_, im_data = cv2.imencode('.%s' % file_extension, im)
f.write(im_data.tostring())
def normalize_depth_for_display(depth, pc=95, crop_percent=0, normalizer=None,
cmap=CMAP_DEFAULT):
"""Converts a depth map to an RGB image."""
# Convert to disparity.
disp = 1.0 / (depth + 1e-6)
if normalizer is not None:
disp /= normalizer
else:
disp /= (np.percentile(disp, pc) + 1e-6)
disp = np.clip(disp, 0, 1)
disp = gray2rgb(disp, cmap=cmap)
keep_h = int(disp.shape[0] * (1 - crop_percent))
disp = disp[:keep_h]
return disp
def get_seq_start_end(target_index, seq_length, sample_every=1):
"""Returns absolute seq start and end indices for a given target frame."""
half_offset = int((seq_length - 1) / 2) * sample_every
end_index = target_index + half_offset
start_index = end_index - (seq_length - 1) * sample_every
return start_index, end_index
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Count the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for (_, v) in get_vars_to_save_and_restore().items():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_save_and_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or
'mu' in v.op.name or 'sigma' in v.op.name or
'global_scale_var' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
mapping = {}
if ckpt is not None:
ckpt_var = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]
not_loaded = list(ckpt_var_names)
for v in model_vars:
if v.op.name not in ckpt_var_names:
# For backward compatibility, try additional matching.
v_additional_name = v.op.name.replace('egomotion_prediction/', '')
if v_additional_name in ckpt_var_names:
# Check if shapes match.
ind = ckpt_var_names.index(v_additional_name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v_additional_name] = v
not_loaded.remove(v_additional_name)
continue
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
logging.warning('Did not find var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
else:
# Check if shapes match.
ind = ckpt_var_names.index(v.op.name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v.op.name] = v
not_loaded.remove(v.op.name)
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
if not_loaded:
logging.warning('The following variables in the checkpoint were not loaded:')
for varname_not_loaded in not_loaded:
logging.info('%s', varname_not_loaded)
else: # just get model vars.
for v in model_vars:
mapping[v.op.name] = v
return mapping
def get_imagenet_vars_to_restore(imagenet_ckpt):
"""Returns dict of variables to restore from ImageNet-checkpoint."""
vars_to_restore_imagenet = {}
ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
model_vars = tf.global_variables()
for v in model_vars:
if 'global_step' in v.op.name:
continue
mvname_noprefix = v.op.name.replace('depth_prediction/', '')
mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')
mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')
if mvname_noprefix in ckpt_var_names:
vars_to_restore_imagenet[mvname_noprefix] = v
else:
logging.info('The following variable will not be restored from '
'pretrained ImageNet-checkpoint: %s', mvname_noprefix)
return vars_to_restore_imagenet
def format_number(n):
"""Formats number with thousands commas."""
# locale.setlocale(locale.LC_ALL, 'en_US') # commented by me
# return locale.format('%d', n, grouping=True)
return n
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def read_text_lines(filepath):
with tf.gfile.Open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def save_flags(FLAGS, save_path):
import json
check_path(save_path)
save_path = os.path.join(save_path, 'flags.json')
with open(save_path, 'w') as f:
json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=False)
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def save_command(save_path):
check_path(save_path)
import sys
command = sys.argv
save_file = os.path.join(save_path, 'command.txt')
with open(save_file, 'w') as f:
f.write(' '.join(command))
def make_intrinsics_matrix(fx, fy, cx, cy):
r1 = np.stack([fx, 0, cx])
r2 = np.stack([0, fy, cy])
r3 = np.array([0., 0., 1.])
intrinsics = np.stack([r1, r2, r3])
return intrinsics
def get_multi_scale_intrinsics(intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2 ** s)
fy = intrinsics[1, 1] / (2 ** s)
cx = intrinsics[0, 2] / (2 ** s)
cy = intrinsics[1, 2] / (2 ** s)
intrinsics_multi_scale.append(make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = np.stack(intrinsics_multi_scale) # [num_scales, 3, 3]
return intrinsics_multi_scale
def pack_pred_depths(pred_dir, test_file):
"""Pack depth predictions as a single .npy file"""
test_images = read_text_lines(test_file)
save_name = 'pred_depth.npy'
output_file = os.path.join(pred_dir, save_name)
img_height = 128
img_width = 416
all_pred = np.zeros((len(test_images), img_height, img_width))
for i, img_path in enumerate(test_images):
npy_path = os.path.join(pred_dir, img_path.replace('png', 'npy'))
depth = np.load(npy_path)
all_pred[i] = np.squeeze(depth)
np.save(output_file, all_pred)
# Depth evaluation utils
# Mostly based on the code written by Clement Godard:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
# EIGEN split
| gt_files = []
gt_calib = []
im_sizes = []
im_files = []
cams = []
num_probs = 0
for filename in files:
filename = filename.split()[0]
splits = filename.split('/')
# camera_id = filename[-1] # 2 is left, 3 is right
date = splits[0]
im_id = splits[4][:10]
file_root = '{}/{}'
im = filename
vel = '{}/{}/velodyne_points/data/{}.bin'.format(splits[0], splits[1], im_id)
if os.path.isfile(data_root + im):
gt_files.append(data_root + vel)
gt_calib.append(data_root + date + '/')
im_sizes.append(cv2.imread(data_root + im).shape[:2])
im_files.append(data_root + im)
cams.append(2)
else:
num_probs += 1
print('{} missing'.format(data_root + im))
# print(num_probs, 'files missing')
return gt_files, gt_calib, im_sizes, im_files, cams
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def read_calib_file(path):
# taken from https://github.com/hunse/kitti
float_chars = set("0123456789.e+- ")
data = {}
with open(path, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
value = value.strip()
data[key] = value
if float_chars.issuperset(value):
# try to cast to float array
try:
data[key] = np.array(list(map(float, value.split(' '))))
except ValueError:
# casting error: data[key] already eq. value, so pass
pass
return data
def get_focal_length_baseline(calib_dir, cam=2):
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
P2_rect = cam2cam['P_rect_02'].reshape(3, 4)
P3_rect = cam2cam['P_rect_03'].reshape(3, 4)
# cam 2 is left of camera 0 -6cm
# cam 3 is to the right +54cm
b2 = P2_rect[0, 3] / -P2_rect[0, 0]
b3 = P3_rect[0, 3] / -P3_rect[0, 0]
baseline = b3 - b2
if cam == 2:
focal_length = P2_rect[0, 0]
elif cam == 3:
focal_length = P3_rect[0, 0]
return focal_length, baseline
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n - 1) + colSub - 1
def generate_depth_map(calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False):
# load calibration files
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir + 'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
P_rect = cam2cam['P_rect_0' + str(cam)].reshape(3, 4)
P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
# load velodyne points and remove all behind image plane (approximation)
# each row of the velodyne data is forward, left, up, reflectance
velo = load_velodyne_points(velo_file_name)
velo = velo[velo[:, 0] >= 0, :]
# project the points to the camera
velo_pts_im = np.dot(P_velo2im, velo.T).T
velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis]
if vel_depth:
velo_pts_im[:, 2] = velo[:, 0]
# check if in bounds
# use minus 1 to get the exact same value as KITTI matlab code
velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1
velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1
val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0])
velo_pts_im = velo_pts_im[val_inds, :]
# project to image
depth = np.zeros(im_shape)
depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]
# find the duplicate points and choose the closest depth
inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
for dd in dupe_inds:
pts = np.where(inds == dd)[0]
x_loc = int(velo_pts_im[pts[0], 0])
y_loc = int(velo_pts_im[pts[0], 1])
depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
depth[depth < 0] = 0
return depth | def read_file_data(files, data_root): | random_line_split |
gogo_fast_api.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: gogo_fast_api.proto
package gogoapi
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
types "github.com/gogo/protobuf/types"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Http .
type Http struct {
Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
AnyData *types.Any `protobuf:"bytes,3,opt,name=any_data,json=anyData,proto3" json:"any_data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Http) Reset() { *m = Http{} }
func (m *Http) String() string { return proto.CompactTextString(m) }
func (*Http) ProtoMessage() {}
func (*Http) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{0}
}
func (m *Http) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Http.Unmarshal(m, b)
}
func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Http.Marshal(b, m, deterministic)
}
func (m *Http) XXX_Merge(src proto.Message) {
xxx_messageInfo_Http.Merge(m, src)
}
func (m *Http) XXX_Size() int {
return xxx_messageInfo_Http.Size(m)
}
func (m *Http) XXX_DiscardUnknown() {
xxx_messageInfo_Http.DiscardUnknown(m)
}
var xxx_messageInfo_Http proto.InternalMessageInfo
func (m *Http) GetRules() []*HttpRule |
func (m *Http) GetFullyDecodeReservedExpansion() bool {
if m != nil {
return m.FullyDecodeReservedExpansion
}
return false
}
func (m *Http) GetAnyData() *types.Any {
if m != nil {
return m.AnyData
}
return nil
}
// HttpRule .
type HttpRule struct {
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// Types that are valid to be assigned to Pattern:
// *HttpRule_Get
// *HttpRule_Put
// *HttpRule_Post
// *HttpRule_Delete
// *HttpRule_Patch
// *HttpRule_Custom
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{1}
}
func (m *HttpRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HttpRule.Unmarshal(m, b)
}
func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
}
func (m *HttpRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_HttpRule.Merge(m, src)
}
func (m *HttpRule) XXX_Size() int {
return xxx_messageInfo_HttpRule.Size(m)
}
func (m *HttpRule) XXX_DiscardUnknown() {
xxx_messageInfo_HttpRule.DiscardUnknown(m)
}
var xxx_messageInfo_HttpRule proto.InternalMessageInfo
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof" json:"get,omitempty"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof" json:"put,omitempty"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof" json:"post,omitempty"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof" json:"delete,omitempty"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof" json:"patch,omitempty"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof" json:"custom,omitempty"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetResponseBody() string {
if m != nil {
return m.ResponseBody
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HttpRule) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
// CustomHttpPattern .
type CustomHttpPattern struct {
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{2}
}
func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
}
func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
}
func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomHttpPattern.Merge(m, src)
}
func (m *CustomHttpPattern) XXX_Size() int {
return xxx_messageInfo_CustomHttpPattern.Size(m)
}
func (m *CustomHttpPattern) XXX_DiscardUnknown() {
xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
}
var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "gogoapi.Http")
proto.RegisterType((*HttpRule)(nil), "gogoapi.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "gogoapi.CustomHttpPattern")
}
func init() { proto.RegisterFile("gogo_fast_api.proto", fileDescriptor_08ec8be04814436d) }
var fileDescriptor_08ec8be04814436d = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x8f, 0xd3, 0x3c,
0x10, 0x87, 0xdf, 0x6c, 0xff, 0xa5, 0xee, 0xbe, 0x87, 0xf5, 0xae, 0x90, 0xa9, 0x90, 0xa8, 0xca,
0x81, 0x9e, 0x12, 0x69, 0xe1, 0x82, 0x7a, 0x22, 0xec, 0x4a, 0x7b, 0x44, 0x39, 0x72, 0x89, 0x26,
0xf1, 0x34, 0xb5, 0x9a, 0xda, 0x56, 0x3c, 0x41, 0xe4, 0x13, 0xf1, 0x85, 0xf8, 0x40, 0x28, 0x8e,
0x5b, 0x0e, 0x88, 0xdb, 0xcc, 0x6f, 0x9e, 0x24, 0x4f, 0xc6, 0x66, 0xf7, 0xb5, 0xa9, 0x4d, 0x71,
0x00, 0x47, 0x05, 0x58, 0x95, 0xd8, 0xd6, 0x90, 0xe1, 0x8b, 0x21, 0x04, 0xab, 0xd6, 0xaf, 0x6b,
0x63, 0xea, 0x06, 0x53, 0x1f, 0x97, 0xdd, 0x21, 0x05, 0xdd, 0x8f, 0xcc, 0xf6, 0x67, 0xc4, 0xa6,
0x2f, 0x44, 0x96, 0xbf, 0x67, 0xb3, 0xb6, 0x6b, 0xd0, 0x89, 0x68, 0x33, 0xd9, 0xad, 0x1e, 0xef,
0x92, 0xf0, 0x70, 0x32, 0x4c, 0xf3, 0xae, 0xc1, 0x7c, 0x9c, 0xf3, 0x67, 0xf6, 0xf6, 0xd0, 0x35,
0x4d, 0x5f, 0x48, 0xac, 0x8c, 0xc4, 0xa2, 0x45, 0x87, 0xed, 0x77, 0x94, 0x05, 0xfe, 0xb0, 0xa0,
0x9d, 0x32, 0x5a, 0xdc, 0x6c, 0xa2, 0x5d, 0x9c, 0xbf, 0xf1, 0xd8, 0x93, 0xa7, 0xf2, 0x00, 0x3d,
0x5f, 0x18, 0x9e, 0xb2, 0x18, 0x74, 0x5f, 0x48, 0x20, 0x10, 0x93, 0x4d, 0xb4, 0x5b, 0x3d, 0x3e,
0x24, 0xa3, 0x66, 0x72, 0xd1, 0x4c, 0x3e, 0xeb, 0x3e, 0x5f, 0x80, 0xee, 0x9f, 0x80, 0x60, 0xfb,
0xeb, 0x86, 0xc5, 0x17, 0x17, 0xbe, 0x66, 0xb1, 0xc3, 0x06, 0x2b, 0x32, 0xad, 0x88, 0x36, 0xd1,
0x6e, 0x99, 0x5f, 0x7b, 0xce, 0xd9, 0xa4, 0x46, 0xf2, 0x12, 0xcb, 0x97, 0xff, 0xf2, 0xa1, 0x19,
0x32, 0xdb, 0x91, 0xff, 0x90, 0xcf, 0x6c, 0x47, 0xfc, 0x81, 0x4d, 0xad, 0x71, 0x24, 0xa6, 0x21,
0xf4, 0x1d, 0x17, 0x6c, 0x2e, 0xb1, 0x41, 0x42, 0x31, 0x0b, 0x79, 0xe8, 0xf9, 0x2b, 0x36, 0xb3,
0x40, 0xd5, 0x51, 0xcc, 0xc3, 0x60, 0x6c, 0xf9, 0x47, 0x36, 0xaf, 0x3a, 0x47, 0xe6, 0x2c, 0x62,
0xff, 0x1f, 0xeb, 0xeb, 0xea, 0xbe, 0xf8, 0x78, 0x90, 0xfe, 0x0a, 0x44, 0xd8, 0xea, 0xe1, 0x6d,
0x23, 0xcb, 0x39, 0x9b, 0x96, 0x46, 0xf6, 0x62, 0xe1, 0xed, 0x7d, 0xcd, 0xdf, 0xb1, 0xff, 0x5b,
0x74, 0xd6, 0x68, 0x87, 0x85, 0x1f, 0xde, 0xfa, 0xe1, 0xed, 0x25, 0xcc, 0x06, 0x28, 0x63, 0xf7,
0x20, 0xa5, 0x22, 0x65, 0x34, 0x34, 0x45, 0xa9, 0xb4, 0x54, 0xba, 0x76, 0x62, 0xf5, 0xaf, 0x63,
0xe3, 0x7f, 0xe8, 0x2c, 0xc0, 0xd9, 0x92, 0x2d, 0xec, 0x68, 0xb4, 0xdd, 0xb3, 0xbb, 0xbf, 0x34,
0x07, 0xb9, 0x93, 0xd2, 0x32, 0xac, 0xd6, 0xd7, 0x43, 0x66, 0x81, 0x8e, 0xe3, 0x5e, 0x73, 0x5f,
0x67, 0xfb, 0x6f, 0x9f, 0x6a, 0x45, 0xc7, 0xae, 0x4c, 0x2a, 0x73, 0x4e, 0xd5, 0x09, 0x54, 0xdd,
0x81, 0xae, 0xaf, 0x17, 0xad, 0x28, 0x51, 0x57, 0xc7, 0x33, 0xb4, 0x27, 0x97, 0xfa, 0x1b, 0x0a,
0x56, 0xed, 0x83, 0x61, 0x39, 0xf7, 0xd4, 0x87, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa4, 0xe6,
0xc4, 0x1c, 0xbc, 0x02, 0x00, 0x00,
}
| {
if m != nil {
return m.Rules
}
return nil
} | identifier_body |
gogo_fast_api.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: gogo_fast_api.proto
package gogoapi
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
types "github.com/gogo/protobuf/types"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Http .
type Http struct {
Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
AnyData *types.Any `protobuf:"bytes,3,opt,name=any_data,json=anyData,proto3" json:"any_data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Http) Reset() { *m = Http{} }
func (m *Http) String() string { return proto.CompactTextString(m) }
func (*Http) ProtoMessage() {}
func (*Http) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{0}
}
func (m *Http) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Http.Unmarshal(m, b)
}
func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Http.Marshal(b, m, deterministic)
}
func (m *Http) XXX_Merge(src proto.Message) {
xxx_messageInfo_Http.Merge(m, src)
}
func (m *Http) XXX_Size() int {
return xxx_messageInfo_Http.Size(m)
}
func (m *Http) XXX_DiscardUnknown() {
xxx_messageInfo_Http.DiscardUnknown(m)
}
var xxx_messageInfo_Http proto.InternalMessageInfo
func (m *Http) GetRules() []*HttpRule {
if m != nil |
return nil
}
func (m *Http) GetFullyDecodeReservedExpansion() bool {
if m != nil {
return m.FullyDecodeReservedExpansion
}
return false
}
func (m *Http) GetAnyData() *types.Any {
if m != nil {
return m.AnyData
}
return nil
}
// HttpRule .
type HttpRule struct {
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// Types that are valid to be assigned to Pattern:
// *HttpRule_Get
// *HttpRule_Put
// *HttpRule_Post
// *HttpRule_Delete
// *HttpRule_Patch
// *HttpRule_Custom
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{1}
}
func (m *HttpRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HttpRule.Unmarshal(m, b)
}
func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
}
func (m *HttpRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_HttpRule.Merge(m, src)
}
func (m *HttpRule) XXX_Size() int {
return xxx_messageInfo_HttpRule.Size(m)
}
func (m *HttpRule) XXX_DiscardUnknown() {
xxx_messageInfo_HttpRule.DiscardUnknown(m)
}
var xxx_messageInfo_HttpRule proto.InternalMessageInfo
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof" json:"get,omitempty"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof" json:"put,omitempty"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof" json:"post,omitempty"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof" json:"delete,omitempty"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof" json:"patch,omitempty"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof" json:"custom,omitempty"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetResponseBody() string {
if m != nil {
return m.ResponseBody
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HttpRule) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
// CustomHttpPattern .
type CustomHttpPattern struct {
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{2}
}
func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
}
func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
}
func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomHttpPattern.Merge(m, src)
}
func (m *CustomHttpPattern) XXX_Size() int {
return xxx_messageInfo_CustomHttpPattern.Size(m)
}
func (m *CustomHttpPattern) XXX_DiscardUnknown() {
xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
}
var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "gogoapi.Http")
proto.RegisterType((*HttpRule)(nil), "gogoapi.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "gogoapi.CustomHttpPattern")
}
func init() { proto.RegisterFile("gogo_fast_api.proto", fileDescriptor_08ec8be04814436d) }
var fileDescriptor_08ec8be04814436d = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x8f, 0xd3, 0x3c,
0x10, 0x87, 0xdf, 0x6c, 0xff, 0xa5, 0xee, 0xbe, 0x87, 0xf5, 0xae, 0x90, 0xa9, 0x90, 0xa8, 0xca,
0x81, 0x9e, 0x12, 0x69, 0xe1, 0x82, 0x7a, 0x22, 0xec, 0x4a, 0x7b, 0x44, 0x39, 0x72, 0x89, 0x26,
0xf1, 0x34, 0xb5, 0x9a, 0xda, 0x56, 0x3c, 0x41, 0xe4, 0x13, 0xf1, 0x85, 0xf8, 0x40, 0x28, 0x8e,
0x5b, 0x0e, 0x88, 0xdb, 0xcc, 0x6f, 0x9e, 0x24, 0x4f, 0xc6, 0x66, 0xf7, 0xb5, 0xa9, 0x4d, 0x71,
0x00, 0x47, 0x05, 0x58, 0x95, 0xd8, 0xd6, 0x90, 0xe1, 0x8b, 0x21, 0x04, 0xab, 0xd6, 0xaf, 0x6b,
0x63, 0xea, 0x06, 0x53, 0x1f, 0x97, 0xdd, 0x21, 0x05, 0xdd, 0x8f, 0xcc, 0xf6, 0x67, 0xc4, 0xa6,
0x2f, 0x44, 0x96, 0xbf, 0x67, 0xb3, 0xb6, 0x6b, 0xd0, 0x89, 0x68, 0x33, 0xd9, 0xad, 0x1e, 0xef,
0x92, 0xf0, 0x70, 0x32, 0x4c, 0xf3, 0xae, 0xc1, 0x7c, 0x9c, 0xf3, 0x67, 0xf6, 0xf6, 0xd0, 0x35,
0x4d, 0x5f, 0x48, 0xac, 0x8c, 0xc4, 0xa2, 0x45, 0x87, 0xed, 0x77, 0x94, 0x05, 0xfe, 0xb0, 0xa0,
0x9d, 0x32, 0x5a, 0xdc, 0x6c, 0xa2, 0x5d, 0x9c, 0xbf, 0xf1, 0xd8, 0x93, 0xa7, 0xf2, 0x00, 0x3d,
0x5f, 0x18, 0x9e, 0xb2, 0x18, 0x74, 0x5f, 0x48, 0x20, 0x10, 0x93, 0x4d, 0xb4, 0x5b, 0x3d, 0x3e,
0x24, 0xa3, 0x66, 0x72, 0xd1, 0x4c, 0x3e, 0xeb, 0x3e, 0x5f, 0x80, 0xee, 0x9f, 0x80, 0x60, 0xfb,
0xeb, 0x86, 0xc5, 0x17, 0x17, 0xbe, 0x66, 0xb1, 0xc3, 0x06, 0x2b, 0x32, 0xad, 0x88, 0x36, 0xd1,
0x6e, 0x99, 0x5f, 0x7b, 0xce, 0xd9, 0xa4, 0x46, 0xf2, 0x12, 0xcb, 0x97, 0xff, 0xf2, 0xa1, 0x19,
0x32, 0xdb, 0x91, 0xff, 0x90, 0xcf, 0x6c, 0x47, 0xfc, 0x81, 0x4d, 0xad, 0x71, 0x24, 0xa6, 0x21,
0xf4, 0x1d, 0x17, 0x6c, 0x2e, 0xb1, 0x41, 0x42, 0x31, 0x0b, 0x79, 0xe8, 0xf9, 0x2b, 0x36, 0xb3,
0x40, 0xd5, 0x51, 0xcc, 0xc3, 0x60, 0x6c, 0xf9, 0x47, 0x36, 0xaf, 0x3a, 0x47, 0xe6, 0x2c, 0x62,
0xff, 0x1f, 0xeb, 0xeb, 0xea, 0xbe, 0xf8, 0x78, 0x90, 0xfe, 0x0a, 0x44, 0xd8, 0xea, 0xe1, 0x6d,
0x23, 0xcb, 0x39, 0x9b, 0x96, 0x46, 0xf6, 0x62, 0xe1, 0xed, 0x7d, 0xcd, 0xdf, 0xb1, 0xff, 0x5b,
0x74, 0xd6, 0x68, 0x87, 0x85, 0x1f, 0xde, 0xfa, 0xe1, 0xed, 0x25, 0xcc, 0x06, 0x28, 0x63, 0xf7,
0x20, 0xa5, 0x22, 0x65, 0x34, 0x34, 0x45, 0xa9, 0xb4, 0x54, 0xba, 0x76, 0x62, 0xf5, 0xaf, 0x63,
0xe3, 0x7f, 0xe8, 0x2c, 0xc0, 0xd9, 0x92, 0x2d, 0xec, 0x68, 0xb4, 0xdd, 0xb3, 0xbb, 0xbf, 0x34,
0x07, 0xb9, 0x93, 0xd2, 0x32, 0xac, 0xd6, 0xd7, 0x43, 0x66, 0x81, 0x8e, 0xe3, 0x5e, 0x73, 0x5f,
0x67, 0xfb, 0x6f, 0x9f, 0x6a, 0x45, 0xc7, 0xae, 0x4c, 0x2a, 0x73, 0x4e, 0xd5, 0x09, 0x54, 0xdd,
0x81, 0xae, 0xaf, 0x17, 0xad, 0x28, 0x51, 0x57, 0xc7, 0x33, 0xb4, 0x27, 0x97, 0xfa, 0x1b, 0x0a,
0x56, 0xed, 0x83, 0x61, 0x39, 0xf7, 0xd4, 0x87, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa4, 0xe6,
0xc4, 0x1c, 0xbc, 0x02, 0x00, 0x00,
}
| {
return m.Rules
} | conditional_block |
gogo_fast_api.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: gogo_fast_api.proto
package gogoapi
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
types "github.com/gogo/protobuf/types"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Http .
type Http struct {
Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
AnyData *types.Any `protobuf:"bytes,3,opt,name=any_data,json=anyData,proto3" json:"any_data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Http) Reset() { *m = Http{} }
func (m *Http) String() string { return proto.CompactTextString(m) }
func (*Http) ProtoMessage() {}
func (*Http) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{0}
}
func (m *Http) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Http.Unmarshal(m, b)
}
func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Http.Marshal(b, m, deterministic)
}
func (m *Http) XXX_Merge(src proto.Message) {
xxx_messageInfo_Http.Merge(m, src)
}
func (m *Http) XXX_Size() int {
return xxx_messageInfo_Http.Size(m)
}
func (m *Http) XXX_DiscardUnknown() {
xxx_messageInfo_Http.DiscardUnknown(m)
}
var xxx_messageInfo_Http proto.InternalMessageInfo
func (m *Http) GetRules() []*HttpRule {
if m != nil {
return m.Rules
}
return nil
}
func (m *Http) GetFullyDecodeReservedExpansion() bool {
if m != nil {
return m.FullyDecodeReservedExpansion
}
return false
}
func (m *Http) GetAnyData() *types.Any {
if m != nil {
return m.AnyData
}
return nil
}
// HttpRule .
type HttpRule struct {
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// Types that are valid to be assigned to Pattern:
// *HttpRule_Get
// *HttpRule_Put
// *HttpRule_Post
// *HttpRule_Delete
// *HttpRule_Patch
// *HttpRule_Custom
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{1}
}
func (m *HttpRule) XXX_Unmarshal(b []byte) error { | func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
}
func (m *HttpRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_HttpRule.Merge(m, src)
}
func (m *HttpRule) XXX_Size() int {
return xxx_messageInfo_HttpRule.Size(m)
}
func (m *HttpRule) XXX_DiscardUnknown() {
xxx_messageInfo_HttpRule.DiscardUnknown(m)
}
var xxx_messageInfo_HttpRule proto.InternalMessageInfo
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof" json:"get,omitempty"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof" json:"put,omitempty"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof" json:"post,omitempty"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof" json:"delete,omitempty"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof" json:"patch,omitempty"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof" json:"custom,omitempty"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetResponseBody() string {
if m != nil {
return m.ResponseBody
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HttpRule) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
// CustomHttpPattern .
type CustomHttpPattern struct {
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{2}
}
func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
}
func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
}
func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomHttpPattern.Merge(m, src)
}
func (m *CustomHttpPattern) XXX_Size() int {
return xxx_messageInfo_CustomHttpPattern.Size(m)
}
func (m *CustomHttpPattern) XXX_DiscardUnknown() {
xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
}
var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "gogoapi.Http")
proto.RegisterType((*HttpRule)(nil), "gogoapi.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "gogoapi.CustomHttpPattern")
}
func init() { proto.RegisterFile("gogo_fast_api.proto", fileDescriptor_08ec8be04814436d) }
var fileDescriptor_08ec8be04814436d = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x8f, 0xd3, 0x3c,
0x10, 0x87, 0xdf, 0x6c, 0xff, 0xa5, 0xee, 0xbe, 0x87, 0xf5, 0xae, 0x90, 0xa9, 0x90, 0xa8, 0xca,
0x81, 0x9e, 0x12, 0x69, 0xe1, 0x82, 0x7a, 0x22, 0xec, 0x4a, 0x7b, 0x44, 0x39, 0x72, 0x89, 0x26,
0xf1, 0x34, 0xb5, 0x9a, 0xda, 0x56, 0x3c, 0x41, 0xe4, 0x13, 0xf1, 0x85, 0xf8, 0x40, 0x28, 0x8e,
0x5b, 0x0e, 0x88, 0xdb, 0xcc, 0x6f, 0x9e, 0x24, 0x4f, 0xc6, 0x66, 0xf7, 0xb5, 0xa9, 0x4d, 0x71,
0x00, 0x47, 0x05, 0x58, 0x95, 0xd8, 0xd6, 0x90, 0xe1, 0x8b, 0x21, 0x04, 0xab, 0xd6, 0xaf, 0x6b,
0x63, 0xea, 0x06, 0x53, 0x1f, 0x97, 0xdd, 0x21, 0x05, 0xdd, 0x8f, 0xcc, 0xf6, 0x67, 0xc4, 0xa6,
0x2f, 0x44, 0x96, 0xbf, 0x67, 0xb3, 0xb6, 0x6b, 0xd0, 0x89, 0x68, 0x33, 0xd9, 0xad, 0x1e, 0xef,
0x92, 0xf0, 0x70, 0x32, 0x4c, 0xf3, 0xae, 0xc1, 0x7c, 0x9c, 0xf3, 0x67, 0xf6, 0xf6, 0xd0, 0x35,
0x4d, 0x5f, 0x48, 0xac, 0x8c, 0xc4, 0xa2, 0x45, 0x87, 0xed, 0x77, 0x94, 0x05, 0xfe, 0xb0, 0xa0,
0x9d, 0x32, 0x5a, 0xdc, 0x6c, 0xa2, 0x5d, 0x9c, 0xbf, 0xf1, 0xd8, 0x93, 0xa7, 0xf2, 0x00, 0x3d,
0x5f, 0x18, 0x9e, 0xb2, 0x18, 0x74, 0x5f, 0x48, 0x20, 0x10, 0x93, 0x4d, 0xb4, 0x5b, 0x3d, 0x3e,
0x24, 0xa3, 0x66, 0x72, 0xd1, 0x4c, 0x3e, 0xeb, 0x3e, 0x5f, 0x80, 0xee, 0x9f, 0x80, 0x60, 0xfb,
0xeb, 0x86, 0xc5, 0x17, 0x17, 0xbe, 0x66, 0xb1, 0xc3, 0x06, 0x2b, 0x32, 0xad, 0x88, 0x36, 0xd1,
0x6e, 0x99, 0x5f, 0x7b, 0xce, 0xd9, 0xa4, 0x46, 0xf2, 0x12, 0xcb, 0x97, 0xff, 0xf2, 0xa1, 0x19,
0x32, 0xdb, 0x91, 0xff, 0x90, 0xcf, 0x6c, 0x47, 0xfc, 0x81, 0x4d, 0xad, 0x71, 0x24, 0xa6, 0x21,
0xf4, 0x1d, 0x17, 0x6c, 0x2e, 0xb1, 0x41, 0x42, 0x31, 0x0b, 0x79, 0xe8, 0xf9, 0x2b, 0x36, 0xb3,
0x40, 0xd5, 0x51, 0xcc, 0xc3, 0x60, 0x6c, 0xf9, 0x47, 0x36, 0xaf, 0x3a, 0x47, 0xe6, 0x2c, 0x62,
0xff, 0x1f, 0xeb, 0xeb, 0xea, 0xbe, 0xf8, 0x78, 0x90, 0xfe, 0x0a, 0x44, 0xd8, 0xea, 0xe1, 0x6d,
0x23, 0xcb, 0x39, 0x9b, 0x96, 0x46, 0xf6, 0x62, 0xe1, 0xed, 0x7d, 0xcd, 0xdf, 0xb1, 0xff, 0x5b,
0x74, 0xd6, 0x68, 0x87, 0x85, 0x1f, 0xde, 0xfa, 0xe1, 0xed, 0x25, 0xcc, 0x06, 0x28, 0x63, 0xf7,
0x20, 0xa5, 0x22, 0x65, 0x34, 0x34, 0x45, 0xa9, 0xb4, 0x54, 0xba, 0x76, 0x62, 0xf5, 0xaf, 0x63,
0xe3, 0x7f, 0xe8, 0x2c, 0xc0, 0xd9, 0x92, 0x2d, 0xec, 0x68, 0xb4, 0xdd, 0xb3, 0xbb, 0xbf, 0x34,
0x07, 0xb9, 0x93, 0xd2, 0x32, 0xac, 0xd6, 0xd7, 0x43, 0x66, 0x81, 0x8e, 0xe3, 0x5e, 0x73, 0x5f,
0x67, 0xfb, 0x6f, 0x9f, 0x6a, 0x45, 0xc7, 0xae, 0x4c, 0x2a, 0x73, 0x4e, 0xd5, 0x09, 0x54, 0xdd,
0x81, 0xae, 0xaf, 0x17, 0xad, 0x28, 0x51, 0x57, 0xc7, 0x33, 0xb4, 0x27, 0x97, 0xfa, 0x1b, 0x0a,
0x56, 0xed, 0x83, 0x61, 0x39, 0xf7, 0xd4, 0x87, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa4, 0xe6,
0xc4, 0x1c, 0xbc, 0x02, 0x00, 0x00,
} | return xxx_messageInfo_HttpRule.Unmarshal(m, b)
} | random_line_split |
gogo_fast_api.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: gogo_fast_api.proto
package gogoapi
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
types "github.com/gogo/protobuf/types"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Http .
type Http struct {
Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
AnyData *types.Any `protobuf:"bytes,3,opt,name=any_data,json=anyData,proto3" json:"any_data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Http) Reset() { *m = Http{} }
func (m *Http) String() string { return proto.CompactTextString(m) }
func (*Http) ProtoMessage() {}
func (*Http) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{0}
}
func (m *Http) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Http.Unmarshal(m, b)
}
func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Http.Marshal(b, m, deterministic)
}
func (m *Http) XXX_Merge(src proto.Message) {
xxx_messageInfo_Http.Merge(m, src)
}
func (m *Http) XXX_Size() int {
return xxx_messageInfo_Http.Size(m)
}
func (m *Http) XXX_DiscardUnknown() {
xxx_messageInfo_Http.DiscardUnknown(m)
}
var xxx_messageInfo_Http proto.InternalMessageInfo
func (m *Http) GetRules() []*HttpRule {
if m != nil {
return m.Rules
}
return nil
}
func (m *Http) GetFullyDecodeReservedExpansion() bool {
if m != nil {
return m.FullyDecodeReservedExpansion
}
return false
}
func (m *Http) GetAnyData() *types.Any {
if m != nil {
return m.AnyData
}
return nil
}
// HttpRule .
type HttpRule struct {
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// Types that are valid to be assigned to Pattern:
// *HttpRule_Get
// *HttpRule_Put
// *HttpRule_Post
// *HttpRule_Delete
// *HttpRule_Patch
// *HttpRule_Custom
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{1}
}
func (m *HttpRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HttpRule.Unmarshal(m, b)
}
func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
}
func (m *HttpRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_HttpRule.Merge(m, src)
}
func (m *HttpRule) XXX_Size() int {
return xxx_messageInfo_HttpRule.Size(m)
}
func (m *HttpRule) XXX_DiscardUnknown() {
xxx_messageInfo_HttpRule.DiscardUnknown(m)
}
var xxx_messageInfo_HttpRule proto.InternalMessageInfo
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof" json:"get,omitempty"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof" json:"put,omitempty"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof" json:"post,omitempty"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof" json:"delete,omitempty"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof" json:"patch,omitempty"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof" json:"custom,omitempty"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) | () string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetResponseBody() string {
if m != nil {
return m.ResponseBody
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HttpRule) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
// CustomHttpPattern .
type CustomHttpPattern struct {
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{2}
}
func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
}
func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
}
func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomHttpPattern.Merge(m, src)
}
func (m *CustomHttpPattern) XXX_Size() int {
return xxx_messageInfo_CustomHttpPattern.Size(m)
}
func (m *CustomHttpPattern) XXX_DiscardUnknown() {
xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
}
var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "gogoapi.Http")
proto.RegisterType((*HttpRule)(nil), "gogoapi.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "gogoapi.CustomHttpPattern")
}
func init() { proto.RegisterFile("gogo_fast_api.proto", fileDescriptor_08ec8be04814436d) }
var fileDescriptor_08ec8be04814436d = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x8f, 0xd3, 0x3c,
0x10, 0x87, 0xdf, 0x6c, 0xff, 0xa5, 0xee, 0xbe, 0x87, 0xf5, 0xae, 0x90, 0xa9, 0x90, 0xa8, 0xca,
0x81, 0x9e, 0x12, 0x69, 0xe1, 0x82, 0x7a, 0x22, 0xec, 0x4a, 0x7b, 0x44, 0x39, 0x72, 0x89, 0x26,
0xf1, 0x34, 0xb5, 0x9a, 0xda, 0x56, 0x3c, 0x41, 0xe4, 0x13, 0xf1, 0x85, 0xf8, 0x40, 0x28, 0x8e,
0x5b, 0x0e, 0x88, 0xdb, 0xcc, 0x6f, 0x9e, 0x24, 0x4f, 0xc6, 0x66, 0xf7, 0xb5, 0xa9, 0x4d, 0x71,
0x00, 0x47, 0x05, 0x58, 0x95, 0xd8, 0xd6, 0x90, 0xe1, 0x8b, 0x21, 0x04, 0xab, 0xd6, 0xaf, 0x6b,
0x63, 0xea, 0x06, 0x53, 0x1f, 0x97, 0xdd, 0x21, 0x05, 0xdd, 0x8f, 0xcc, 0xf6, 0x67, 0xc4, 0xa6,
0x2f, 0x44, 0x96, 0xbf, 0x67, 0xb3, 0xb6, 0x6b, 0xd0, 0x89, 0x68, 0x33, 0xd9, 0xad, 0x1e, 0xef,
0x92, 0xf0, 0x70, 0x32, 0x4c, 0xf3, 0xae, 0xc1, 0x7c, 0x9c, 0xf3, 0x67, 0xf6, 0xf6, 0xd0, 0x35,
0x4d, 0x5f, 0x48, 0xac, 0x8c, 0xc4, 0xa2, 0x45, 0x87, 0xed, 0x77, 0x94, 0x05, 0xfe, 0xb0, 0xa0,
0x9d, 0x32, 0x5a, 0xdc, 0x6c, 0xa2, 0x5d, 0x9c, 0xbf, 0xf1, 0xd8, 0x93, 0xa7, 0xf2, 0x00, 0x3d,
0x5f, 0x18, 0x9e, 0xb2, 0x18, 0x74, 0x5f, 0x48, 0x20, 0x10, 0x93, 0x4d, 0xb4, 0x5b, 0x3d, 0x3e,
0x24, 0xa3, 0x66, 0x72, 0xd1, 0x4c, 0x3e, 0xeb, 0x3e, 0x5f, 0x80, 0xee, 0x9f, 0x80, 0x60, 0xfb,
0xeb, 0x86, 0xc5, 0x17, 0x17, 0xbe, 0x66, 0xb1, 0xc3, 0x06, 0x2b, 0x32, 0xad, 0x88, 0x36, 0xd1,
0x6e, 0x99, 0x5f, 0x7b, 0xce, 0xd9, 0xa4, 0x46, 0xf2, 0x12, 0xcb, 0x97, 0xff, 0xf2, 0xa1, 0x19,
0x32, 0xdb, 0x91, 0xff, 0x90, 0xcf, 0x6c, 0x47, 0xfc, 0x81, 0x4d, 0xad, 0x71, 0x24, 0xa6, 0x21,
0xf4, 0x1d, 0x17, 0x6c, 0x2e, 0xb1, 0x41, 0x42, 0x31, 0x0b, 0x79, 0xe8, 0xf9, 0x2b, 0x36, 0xb3,
0x40, 0xd5, 0x51, 0xcc, 0xc3, 0x60, 0x6c, 0xf9, 0x47, 0x36, 0xaf, 0x3a, 0x47, 0xe6, 0x2c, 0x62,
0xff, 0x1f, 0xeb, 0xeb, 0xea, 0xbe, 0xf8, 0x78, 0x90, 0xfe, 0x0a, 0x44, 0xd8, 0xea, 0xe1, 0x6d,
0x23, 0xcb, 0x39, 0x9b, 0x96, 0x46, 0xf6, 0x62, 0xe1, 0xed, 0x7d, 0xcd, 0xdf, 0xb1, 0xff, 0x5b,
0x74, 0xd6, 0x68, 0x87, 0x85, 0x1f, 0xde, 0xfa, 0xe1, 0xed, 0x25, 0xcc, 0x06, 0x28, 0x63, 0xf7,
0x20, 0xa5, 0x22, 0x65, 0x34, 0x34, 0x45, 0xa9, 0xb4, 0x54, 0xba, 0x76, 0x62, 0xf5, 0xaf, 0x63,
0xe3, 0x7f, 0xe8, 0x2c, 0xc0, 0xd9, 0x92, 0x2d, 0xec, 0x68, 0xb4, 0xdd, 0xb3, 0xbb, 0xbf, 0x34,
0x07, 0xb9, 0x93, 0xd2, 0x32, 0xac, 0xd6, 0xd7, 0x43, 0x66, 0x81, 0x8e, 0xe3, 0x5e, 0x73, 0x5f,
0x67, 0xfb, 0x6f, 0x9f, 0x6a, 0x45, 0xc7, 0xae, 0x4c, 0x2a, 0x73, 0x4e, 0xd5, 0x09, 0x54, 0xdd,
0x81, 0xae, 0xaf, 0x17, 0xad, 0x28, 0x51, 0x57, 0xc7, 0x33, 0xb4, 0x27, 0x97, 0xfa, 0x1b, 0x0a,
0x56, 0xed, 0x83, 0x61, 0x39, 0xf7, 0xd4, 0x87, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa4, 0xe6,
0xc4, 0x1c, 0xbc, 0x02, 0x00, 0x00,
}
| GetSelector | identifier_name |
app.js | /**
* Created by yj on 16/4/29.
*/
/**
* 最小堆
*/
class MinHeap{
constructor(cmp){
this.cmp = cmp;
this.queue = []
}
push(val){
this.queue.push(val);
}
peek(){
if(this.empty()){
throw new Error("Can't peek an empty heap");
}
return this.queue.reduce((min,val) =>{
if(this.cmp(min,val) < 0 ){
return min;
}else{
return val;
}
})
}
pop(){
if(this.empty()){
throw new Error("Can't pop an empty heap");
}
let minIndex = 0;
for(let i=1;i<this.queue.length;i++){
if(this.cmp(this.queue[i],this.queue[minIndex])<0){
minIndex = i;
}
}
return this.queue.splice(minIndex,1)[0];
}
empty(){
return this.queue.length <=0;
}
dump(){
console.log('queue:',this.queue);
}
}
/**
* 工具函数
*/
class Utils {
/**
* 打乱数组,得到一组随机数
* @param a
*/
static shuffle(a) {
for (let i = 1; i < a.length; i++) {
let id = Math.floor(Math.random() * i);
[a[id], a[i]] = [a[i], a[id]];
}
}
/**
* 深拷贝
* @param dst
* @param rest
* @returns {*|{}}
*/
static deepCopy(dst, ...rest) {
dst = dst || {};
for (let src of rest) {
if (!src) return;
for (let key in src) {
if (src.hasOwnProperty(key)) {
if (typeof src[key] === 'object') {
dst[key] = Utils.deepCopy(dst[key], src[key]);
} else {
dst[key] = src[key];
}
}
}
}
return dst;
}
}
class Event{
constructor(){
this.listeners = {}
}
on(type,handler){
(this.listeners[type] || (this.listeners[type] = [])).push(handler);
return this;
}
fire(type,data,context){
let handlers = this.listeners[type];
for(let handler of handlers){
handler.apply(context,data)
}
return this;
}
off(type,handler){
if(type == void 0){
this.listeners = {}
return this
}
if(handler == void 0){
delete this.listeners[type]
return this
}
let handlers = this.listeners[type] || [];
let id = handlers.indexOf(handler);
if(id != -1){
handlers.splice(id,1);
return this;
}
if(handlers.length == 0){
delete this.listeners[type]
}
return this;
}
}
/**
* 游戏地图
*/
class GridMap {
constructor(selector) {
this.$el = document.querySelector(selector);
}
create(rows, cols) {
let html = '';
| (let i = 0; i < rows; i++) {
html += '<tr>';
for (let j = 0; j < cols; j++) {
html += '<td class="map-box" data-type="empty">';
}
html += '</tr>';
}
this.rows = rows;
this.cols = cols;
this.$el.innerHTML = html;
this.$boxes = this.$el.getElementsByTagName('td');
}
clear() {
for (let i = 0; i < this.rows; i++) {
for (let j = 0; j < this.cols; j++) {
this.type([i, j], 'empty');
}
}
}
type([x,y], type) {
if (type == void 0) {
return this.$boxes[y * this.cols + x].dataset.type;
} else {
this.$boxes[y * this.cols + x].dataset.type = type;
}
}
}
class PathFinder {
constructor(map,cfg){
this.map = map;
this.rows = map.rows;
this.cols = map.cols;
this.search_type = cfg.search_type;
this.path = [[0,1],[-1,0],[0,-1],[1,0]]
}
setSearchType(type) {
this.search_type = type;
}
/**
* 返回路径
* @param src
* @param dst
* @returns {*|Array}
*/
find_path(src, dst) {
switch (this.search_type) {
case 'dfs':
return this.dfs(src, dst, {});
case 'bfs':
return this.bfs(src, dst, {});
case 'astar':
return this.astar(src, dst,{});
}
}
/**
* 判断坐标是否在地图内
* @param i
* @param j
* @returns {boolean}
*/
isValid(i, j) {
return i >= 0 && i < this.rows && j >= 0 && j < this.cols;
}
/**
* 深度优先搜索
* @param src
* @param dst
* @param visited
* @returns {*}
*/
dfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y};
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true;
var path = this.dfs(next, dst, visited);
if (path) {
path.unshift(next);
return path;
}
}
}
}
}
/**
* 广度优先搜索
*/
bfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
let queue = [];
queue.push(src);
let path = []
while (queue.length > 0) {
src = queue.shift();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y}
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true
queue.push(next);
}
}
}
}
}
/**
* A* 寻路算法
* @param src
* @param dst
* @param visited
* @returns {*[]}
*/
astar(src,dst,visited){
if(src.x == dst.x && src.y == dst.y){
return [dst]
}
let dist = (s1,s2)=>{
return Math.abs(s1.x - s2.x) + Math.abs(s1.y - s2.y);
};
let hashPos = s => s.x + '-' + s.y;
let cmp = (el1,el2) => dist(el1,dst) - dist(el2,dst);
let heap = new MinHeap(cmp);
heap.push(src);
let path = []
while(!heap.empty()){
src = heap.pop();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for(let [i,j] of this.path){
let x = src.x + i;
let y = src.y + j;
let next = {x,y};
if(this.isValid(x,y) && !visited[x + '-' + y] && this.map.type([x,y]) == 'empty'){
visited[x + '-' + y] = true;
heap.push(next);
}
}
}
}
}
class Character{
constructor(selector){
if(typeof selector == 'string'){
this.$el = document.querySelector(selector)
}else if(selector.nodeType){
this.$el = selector
}else{
this.$el = null
}
}
setPos([x,y]){
this.x = x;
this.y = y;
this.$el.style.left = x * 20 + 'px';
this.$el.style.top = y * 20 + 'px';
}
getPos() {
return {x: this.x, y: this.y};
}
}
/**
* 玩家类
*/
class Player extends Character{
constructor(selector) {
super(selector)
}
/**
* 异步移动,实现动画效果
* @param pos
*/
goto(pos) {
this.x = pos.x;
this.y = pos.y;
this.$el.style.left = this.x * 20 + 'px';
this.$el.style.top = this.y * 20 + 'px';
}
}
class Enemy extends Character{
}
class Target extends Character{
}
/**
* 主游戏类
*/
class Game extends Event{
constructor(cfg) {
super()
//初始化配置
this.cfg = {
rows: 20,
cols: 20,
search_type: 'dfs',
duration: 100
}
Object.assign(this.cfg,cfg);
//初始化数据
this.map = new GridMap('#kingsman-map');
this.player = new Player('#kingsman-player');
this.target = new Target('#kingsman-target');
this.map.create(this.cfg.rows, this.cfg.cols);
this.pathFinder = new PathFinder(this.map,{
search_type: this.cfg.search_type
});
//重置游戏状态
this.reset();
}
/**
* 设置玩家和目标
*/
setPlayerAndTarget() {
let positions = [];
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (this.map.type([i, j]) == 'empty') {
positions.push([i, j]);
}
}
}
let len = positions.length;
if (len < 2) {
throw new Error('map is full');
}
Utils.shuffle(positions);
let player = positions[0];
let target = positions[1];
this.player.setPos(player);
this.target.setPos(target);
}
setEnemy(){
}
/**
* 随机的修建障碍物 //todo 建筑迷宫算法
*/
randBuild() {
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (Math.random() > 0.9) {
this.map.type([i, j], 'wall');
}
}
}
}
/**
* 设置间隔时间
* @param duration
*/
setDuration(duration) {
this.cfg.duration = duration;
this.player.$el.style.transitionDuration = duration + 'ms';
}
/**
* 设置地图尺寸
* @param n
*/
setSize(n) {
this.cfg.rows = this.cfg.cols = n;
this.map.create(this.cfg.cols, this.cfg.rows);
this.randBuild();
}
/**
* 设置寻路算法
* @param search_type
*/
setSearchType(search_type) {
this.pathFinder.setSearchType(search_type);
}
move(pos) {
this.player.goto(pos);
}
/**
* 寻路
* @param target
*/
goto(dst) {
//自动寻路
if (dst == void 0) {
dst = this.target.getPos();
}
let player = this.player.getPos();
let path = this.find_path(player, dst);
for (let next of path) {
this.run_async(this.move, [next])
.catch(err =>{
console.error(err)
})
}
let target = this.target.getPos()
if(dst.x == target.x && dst.y == target.y) {
this.run_async(function(){
this.fire('gameover'); // 游戏结束加载下一关卡
});
}
}
/**
* 寻找路径
* @param src
* @param target
* @returns {*|Array}
*/
find_path(src, target) {
return this.pathFinder.find_path(src, target);
}
/**
* 运行异步函数
* @param handler
* @param args
* @returns {Promise}
*/
run_async(handler, args) {
let promise = new Promise((resolve, reject)=> {
this.queue.push({
handler, args,
callback: function (err, data) {
if (err) {
reject(err);
} else {
resolve(data);
}
}
});
});
if (!this.running) {
this.taskloop();
}
return promise;
}
/**
* 主任务循环
*/
taskloop() {
this.running = true;
if(this.isPaused) return;
let task = this.queue.shift();
if (task) {
try {
let ret = task.handler.apply(this, task.args);
task.callback(null, ret); //成功回调
setTimeout(this.taskloop.bind(this), this.cfg.duration);
} catch (err) {
this.running = false;
this.queue = [];
task.callback(err); //失败回调
}
} else {
this.running = false;
}
}
/**
* 切换到下一关
*/
next_level() {
this.reset();
}
/**
*
* 重置游戏状态
*/
reset() {
this.map.clear();
this.randBuild();
this.setPlayerAndTarget();
this.queue = [];
this.running = false;
this.isPaused = false;
}
/**
* 暂停寻路动画
*/
pause(){
this.isPaused = true
}
/**
* 恢复寻路动画
*/
restart(){
this.isPaused = false
if(this.running){
this.taskloop();
}
}
}
/**
* 控制类
*/
class Application extends Event{
constructor() {
super();
this.$controls = document.getElementById('kingsman-control');
this.$duration = document.getElementById('duration');
this.$search_type = document.getElementById('search_type');
this.$size = document.getElementById('size');
this.$map = document.getElementById('kingsman-map');
this.$bg = document.getElementById('kingsman-bg');
this.$pause_restart = document.getElementById('pause_restart');
this.$game_level = document.getElementById('kingsman-game-state-level')
this.level = 1;
let cfg = this.readConfig();
this.game = new Game(cfg);
this.showGameState()
this.bindUI();
}
/**
* 初始化配置
*/
readConfig(){
let duration = this.$duration.value;
let size = this.$size.value;
let search_type = this.$search_type.value;
return {duration,size,search_type}
}
showGameState(){
this.$game_level.innerHTML = `第${this.level}关`;
}
/**
* 进入下一关
*/
next_level(){
this.level++;
this.showGameState();
this.game.next_level();
}
/**
* 绑定事件
*/
bindUI() {
this.game.on('gameover',this.next_level.bind(this))
this.$controls.addEventListener('change', e => {
switch (e.target.id) {
case 'duration':
let duration = this.$duration.value;
this.game.setDuration(duration);
break;
case 'search_type':
let search_type = this.$search_type.value;
this.game.setSearchType(search_type)
break;
case 'size':
let size = this.$size.value;
this.game.setSize(size);
break;
default:
break;
}
});
this.$controls.addEventListener('click', e => {
switch (e.target.id) {
case 'goto_target':
this.game.goto(); //自动寻路
break;
case 'next_level':
this.next_level();
break;
case 'pause_restart':
let state = e.target.dataset.type
if(state == 'pause'){
e.target.dataset.type = 'restart';
e.target.textContent = '继续';
this.game.pause()
}else{
e.target.dataset.type= 'pause'
e.target.textContent = '暂停'
this.game.restart()
}
}
});
this.$bg.addEventListener('click', e=> {
let x,y;
if (e.target.nodeName.toLowerCase() === 'td') {
let cell = e.target;
x = cell.cellIndex;
y = cell.parentNode.rowIndex;
}else if(e.target.id == 'kingsman-target') {
let pos = this.game.target.getPos()
x = pos.x;
y = pos.y;
}
let target = {x, y};
this.game.goto(target);//手动寻路
});
}
}
var app = new Application();
| for | identifier_name |
app.js | /**
* Created by yj on 16/4/29.
*/
/**
* 最小堆
*/
class MinHeap{
constructor(cmp){
this.cmp = cmp;
this.queue = []
}
push(val){
this.queue.push(val);
}
peek(){
if(this.empty()){
throw new Error("Can't peek an empty heap");
}
return this.queue.reduce((min,val) =>{
if(this.cmp(min,val) < 0 ){
return min;
}else{
return val;
}
})
}
pop(){
if(this.empty()){
throw new Error("Can't pop an empty heap");
}
let minIndex = 0;
for(let i=1;i<this.queue.length;i++){
if(this.cmp(this.queue[i],this.queue[minIndex])<0){
minIndex = i;
}
}
return this.queue.splice(minIndex,1)[0];
}
empty(){
return this.queue.length <=0;
}
dump(){
console.log('queue:',this.queue);
}
}
/**
* 工具函数
*/
class Utils {
/**
* 打乱数组,得到一组随机数
* @param a
*/
static shuffle(a) {
for (let i = 1; i < a.length; i++) {
let id = Math.floor(Math.random() * i);
[a[id], a[i]] = [a[i], a[id]];
}
}
/**
* 深拷贝
* @param dst
* @param rest
* @returns {*|{}}
*/
static deepCopy(dst, ...rest) {
dst = dst || {};
for (let src of rest) {
if (!src) return;
for (let key in src) {
if (src.hasOwnProperty(key)) {
if (typeof src[key] === 'object') {
dst[key] = Utils.deepCopy(dst[key], src[key]);
} else {
dst[key] = src[key];
}
}
}
}
return dst;
}
}
class Event{
constructor(){
this.listeners = {}
}
on(type,handler){
(this.listeners[type] || (this.listeners[type] = [])).push(handler);
return this;
}
fire(type,data,context){
let handlers = this.listeners[type];
for(let handler of handlers){
handler.apply(context,data)
}
return this;
}
off(type,handler){
if(type == void 0){
this.listeners = {}
return this
}
if(handler == void 0){
delete this.listeners[type]
return this
}
let handlers = this.listeners[type] || [];
let id = handlers.indexOf(handler);
if(id != -1){
handlers.splice(id,1);
return this;
}
if(handlers.length == 0){
delete this.listeners[type]
}
return this;
}
}
/**
* 游戏地图
*/
class GridMap {
constructor(selector) {
this.$el = document.querySelector(selector);
}
create(rows, cols) {
let html = '';
for (let i = 0; i < rows; i++) {
html += '<tr>';
for (let j = 0; j < cols; j++) {
html += '<td class="map-box" data-type="empty">';
}
html += '</tr>';
}
this.rows = rows;
this.cols = cols;
this.$el.innerHTML = html;
this.$boxes = this.$el.getElementsByTagName('td');
}
clear() {
for (let i = 0; i < this.rows; i++) {
for (let j = 0; j < this.cols; j++) {
this.type([i, j], 'empty');
}
}
}
type([x,y], type) {
if (type == void 0) {
return this.$boxes[y * this.cols + x].dataset.type;
} else {
this.$boxes[y * this.cols + x].dataset.type = type;
}
}
}
class PathFinder {
constructor(map,cfg){
this.map = map;
this.rows = map.rows;
this.cols = map.cols;
this.search_type = cfg.search_type;
this.path = [[0,1],[-1,0],[0,-1],[1,0]]
}
setSearchType(type) {
this.search_type = type;
}
/**
* 返回路径
* @param src
* @param dst
* @returns {*|Array}
*/
find_path(src, dst) {
switch (this.search_type) {
case 'dfs':
return this.dfs(src, dst, {});
case 'bfs':
return this.bfs(src, dst, {});
case 'astar':
return this.astar(src, dst,{});
}
}
/**
* 判断坐标是否在地图内
* @param i
* @param j
* @returns {boolean}
*/
isValid(i, j) {
return i >= 0 && i < this.rows && j >= 0 && j < this.cols;
}
/**
* 深度优先搜索
* @param src
* @param dst
* @param visited
* @returns {*}
*/
dfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y};
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true;
var path = this.dfs(next, dst, visited);
if (path) {
path.unshift(next);
return path;
}
}
}
}
}
/**
* 广度优先搜索
*/
bfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
let queue = [];
queue.push(src);
let path = []
while (queue.length > 0) {
src = queue.shift();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y}
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true
queue.push(next);
}
}
}
}
}
/**
* A* 寻路算法
* @param src
* @param dst
* @param visited
* @returns {*[]}
*/
astar(src,dst,visited){
if(src.x == dst.x && src.y == dst.y){
return [dst]
}
let dist = (s1,s2)=>{
return Math.abs(s1.x - s2.x) + Math.abs(s1.y - s2.y);
};
let hashPos = s => s.x + '-' + s.y;
let cmp = (el1,el2) => dist(el1,dst) - dist(el2,dst);
let heap = new MinHeap(cmp);
heap.push(src);
let path = []
while(!heap.empty()){
src = heap.pop();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for(let [i,j] of this.path){
let x = src.x + i;
let y = src.y + j;
let next = {x,y};
if(this.isValid(x,y) && !visited[x + '-' + y] && this.map.type([x,y]) == 'empty'){
visited[x + '-' + y] = true;
heap.push(next);
}
}
}
}
}
class Character{
constructor(selector){
if(typeof selector == 'string'){
this.$el = document.querySelector(selector)
}else if(selector.nodeType){
this.$el = selector
}else{
this.$el = null
}
}
setPos([x,y]){
this.x = x;
this.y = y;
|
this.$el.style.top = y * 20 + 'px';
}
getPos() {
return {x: this.x, y: this.y};
}
}
/**
* 玩家类
*/
class Player extends Character{
constructor(selector) {
super(selector)
}
/**
* 异步移动,实现动画效果
* @param pos
*/
goto(pos) {
this.x = pos.x;
this.y = pos.y;
this.$el.style.left = this.x * 20 + 'px';
this.$el.style.top = this.y * 20 + 'px';
}
}
class Enemy extends Character{
}
class Target extends Character{
}
/**
* 主游戏类
*/
class Game extends Event{
constructor(cfg) {
super()
//初始化配置
this.cfg = {
rows: 20,
cols: 20,
search_type: 'dfs',
duration: 100
}
Object.assign(this.cfg,cfg);
//初始化数据
this.map = new GridMap('#kingsman-map');
this.player = new Player('#kingsman-player');
this.target = new Target('#kingsman-target');
this.map.create(this.cfg.rows, this.cfg.cols);
this.pathFinder = new PathFinder(this.map,{
search_type: this.cfg.search_type
});
//重置游戏状态
this.reset();
}
/**
* 设置玩家和目标
*/
setPlayerAndTarget() {
let positions = [];
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (this.map.type([i, j]) == 'empty') {
positions.push([i, j]);
}
}
}
let len = positions.length;
if (len < 2) {
throw new Error('map is full');
}
Utils.shuffle(positions);
let player = positions[0];
let target = positions[1];
this.player.setPos(player);
this.target.setPos(target);
}
setEnemy(){
}
/**
* 随机的修建障碍物 //todo 建筑迷宫算法
*/
randBuild() {
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (Math.random() > 0.9) {
this.map.type([i, j], 'wall');
}
}
}
}
/**
* 设置间隔时间
* @param duration
*/
setDuration(duration) {
this.cfg.duration = duration;
this.player.$el.style.transitionDuration = duration + 'ms';
}
/**
* 设置地图尺寸
* @param n
*/
setSize(n) {
this.cfg.rows = this.cfg.cols = n;
this.map.create(this.cfg.cols, this.cfg.rows);
this.randBuild();
}
/**
* 设置寻路算法
* @param search_type
*/
setSearchType(search_type) {
this.pathFinder.setSearchType(search_type);
}
move(pos) {
this.player.goto(pos);
}
/**
* 寻路
* @param target
*/
goto(dst) {
//自动寻路
if (dst == void 0) {
dst = this.target.getPos();
}
let player = this.player.getPos();
let path = this.find_path(player, dst);
for (let next of path) {
this.run_async(this.move, [next])
.catch(err =>{
console.error(err)
})
}
let target = this.target.getPos()
if(dst.x == target.x && dst.y == target.y) {
this.run_async(function(){
this.fire('gameover'); // 游戏结束加载下一关卡
});
}
}
/**
* 寻找路径
* @param src
* @param target
* @returns {*|Array}
*/
find_path(src, target) {
return this.pathFinder.find_path(src, target);
}
/**
* 运行异步函数
* @param handler
* @param args
* @returns {Promise}
*/
run_async(handler, args) {
let promise = new Promise((resolve, reject)=> {
this.queue.push({
handler, args,
callback: function (err, data) {
if (err) {
reject(err);
} else {
resolve(data);
}
}
});
});
if (!this.running) {
this.taskloop();
}
return promise;
}
/**
* 主任务循环
*/
taskloop() {
this.running = true;
if(this.isPaused) return;
let task = this.queue.shift();
if (task) {
try {
let ret = task.handler.apply(this, task.args);
task.callback(null, ret); //成功回调
setTimeout(this.taskloop.bind(this), this.cfg.duration);
} catch (err) {
this.running = false;
this.queue = [];
task.callback(err); //失败回调
}
} else {
this.running = false;
}
}
/**
* 切换到下一关
*/
next_level() {
this.reset();
}
/**
*
* 重置游戏状态
*/
reset() {
this.map.clear();
this.randBuild();
this.setPlayerAndTarget();
this.queue = [];
this.running = false;
this.isPaused = false;
}
/**
* 暂停寻路动画
*/
pause(){
this.isPaused = true
}
/**
* 恢复寻路动画
*/
restart(){
this.isPaused = false
if(this.running){
this.taskloop();
}
}
}
/**
* 控制类
*/
class Application extends Event{
constructor() {
super();
this.$controls = document.getElementById('kingsman-control');
this.$duration = document.getElementById('duration');
this.$search_type = document.getElementById('search_type');
this.$size = document.getElementById('size');
this.$map = document.getElementById('kingsman-map');
this.$bg = document.getElementById('kingsman-bg');
this.$pause_restart = document.getElementById('pause_restart');
this.$game_level = document.getElementById('kingsman-game-state-level')
this.level = 1;
let cfg = this.readConfig();
this.game = new Game(cfg);
this.showGameState()
this.bindUI();
}
/**
* 初始化配置
*/
readConfig(){
let duration = this.$duration.value;
let size = this.$size.value;
let search_type = this.$search_type.value;
return {duration,size,search_type}
}
showGameState(){
this.$game_level.innerHTML = `第${this.level}关`;
}
/**
* 进入下一关
*/
next_level(){
this.level++;
this.showGameState();
this.game.next_level();
}
/**
* 绑定事件
*/
bindUI() {
this.game.on('gameover',this.next_level.bind(this))
this.$controls.addEventListener('change', e => {
switch (e.target.id) {
case 'duration':
let duration = this.$duration.value;
this.game.setDuration(duration);
break;
case 'search_type':
let search_type = this.$search_type.value;
this.game.setSearchType(search_type)
break;
case 'size':
let size = this.$size.value;
this.game.setSize(size);
break;
default:
break;
}
});
this.$controls.addEventListener('click', e => {
switch (e.target.id) {
case 'goto_target':
this.game.goto(); //自动寻路
break;
case 'next_level':
this.next_level();
break;
case 'pause_restart':
let state = e.target.dataset.type
if(state == 'pause'){
e.target.dataset.type = 'restart';
e.target.textContent = '继续';
this.game.pause()
}else{
e.target.dataset.type= 'pause'
e.target.textContent = '暂停'
this.game.restart()
}
}
});
this.$bg.addEventListener('click', e=> {
let x,y;
if (e.target.nodeName.toLowerCase() === 'td') {
let cell = e.target;
x = cell.cellIndex;
y = cell.parentNode.rowIndex;
}else if(e.target.id == 'kingsman-target') {
let pos = this.game.target.getPos()
x = pos.x;
y = pos.y;
}
let target = {x, y};
this.game.goto(target);//手动寻路
});
}
}
var app = new Application();
| this.$el.style.left = x * 20 + 'px'; | conditional_block |
app.js | /**
* Created by yj on 16/4/29.
*/
/**
* 最小堆
*/
class MinHeap{
constructor(cmp){
this.cmp = cmp;
this.queue = []
}
push(val){
this.queue.push(val);
}
peek(){
if(this.empty()){
throw new Error("Can't peek an empty heap");
}
return this.queue.reduce((min,val) =>{
if(this.cmp(min,val) < 0 ){
return min;
}else{
return val;
}
})
}
pop(){
if(this.empty()){
throw new Error("Can't pop an empty heap");
}
let minIndex = 0;
for(let i=1;i<this.queue.length;i++){
if(this.cmp(this.queue[i],this.queue[minIndex])<0){
minIndex = i;
}
}
return this.queue.splice(minIndex,1)[0];
}
empty(){
return this.queue.length <=0;
}
dump(){
console.log('queue:',this.queue);
}
}
/**
* 工具函数
*/
class Utils {
/**
* 打乱数组,得到一组随机数
* @param a
*/
static shuffle(a) {
for (let i = 1; i < a.length; i++) {
let id = Math.floor(Math.random() * i);
[a[id], a[i]] = [a[i], a[id]];
}
}
/**
* 深拷贝
* @param dst
* @param rest
* @returns {*|{}}
*/
static deepCopy(dst, ...rest) {
dst = dst || {};
for (let src of rest) {
if (!src) return;
for (let key in src) {
if (src.hasOwnProperty(key)) {
if (typeof src[key] === 'object') {
dst[key] = Utils.deepCopy(dst[key], src[key]);
} else {
dst[key] = src[key];
}
}
}
}
return dst;
}
}
class Event{
constructor(){
this.listeners = {}
}
on(type,handler){
(this.listeners[type] || (this.listeners[type] = [])).push(handler);
return this;
}
fire(type,data,context){
let handlers = this.listeners[type];
for(let handler of handlers){
handler.apply(context,data)
}
return this;
}
off(type,handler){
if(type == void 0){
this.listeners = {}
return this
}
if(handler == void 0){
delete this.listeners[type]
return this
}
let handlers = this.listeners[type] || [];
let id = handlers.indexOf(handler);
if(id != -1){
handlers.splice(id,1);
return this;
}
if(handlers.length == 0){
delete this.listeners[type]
}
return this;
}
}
/**
* 游戏地图
*/
class GridMap {
constructor(selector) {
this.$el = document.querySelector(selector);
}
create(rows, cols) {
let html = '';
for (let i = 0; i < rows; i++) {
html += '<tr>';
for (let j = 0; j < cols; j++) {
html += '<td class="map-box" data-type="empty">';
}
html += '</tr>';
}
this.rows = rows;
this.cols = cols;
this.$el.innerHTML = html;
this.$boxes = this.$el.getElementsByTagName('td');
}
clear() {
for (let i = 0; i < this.rows; i++) {
for (let j = 0; j < this.cols; j++) {
this.type([i, j], 'empty');
}
}
}
type([x,y], type) {
if (type == void 0) {
return this.$boxes[y * this.cols + x].dataset.type;
} else {
this.$boxes[y * this.cols + x].dataset.type = type;
}
}
}
class PathFinder {
constructor(map,cfg){
this.map = map;
this.rows = map.rows;
this.cols = map.cols;
this.search_type = cfg.search_type;
this.path = [[0,1],[-1,0],[0,-1],[1,0]]
}
setSearchType(type) {
this.search_type = type;
}
/**
* 返回路径
* @param src
* @param dst
* @returns {*|Array}
*/
find_path(src, dst) {
switch (this.search_type) {
case 'dfs':
return this.dfs(src, dst, {});
case 'bfs':
return this.bfs(src, dst, {});
case 'astar':
return this.astar(src, dst,{});
}
}
/**
* 判断坐标是否在地图内
* @param i
* @param j
* @returns {boolean}
*/
isValid(i, j) {
return i >= 0 && i < this.rows && j >= 0 && j < this.cols;
}
/**
* 深度优先搜索
* @param src
* @param dst
* @param visited
* @returns {*}
*/
dfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y};
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true;
var path = this.dfs(next, dst, visited);
if (path) {
path.unshift(next);
return path;
}
}
}
}
}
/**
* 广度优先搜索
*/
bfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
let queue = [];
queue.push(src);
let path = []
while (queue.length > 0) {
src = queue.shift();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y}
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true
queue.push(next);
}
}
}
}
}
/**
* A* 寻路算法
* @param src
* @param dst
* @param visited
* @returns {*[]}
*/
astar(src,dst,visited){
if(src.x == dst.x && src.y == dst.y){
return [dst]
}
let dist = (s1,s2)=>{
return Math.abs(s1.x - s2.x) + Math.abs(s1.y - s2.y);
};
let hashPos = s => s.x + '-' + s.y;
let cmp = (el1,el2) => dist(el1,dst) - dist(el2,dst);
let heap = new MinHeap(cmp);
heap.push(src);
let path = []
while(!heap.empty()){
src = heap.pop();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for(let [i,j] of this.path){
let x = src.x + i;
let y = src.y + j;
let next = {x,y};
if(this.isValid(x,y) && !visited[x + '-' + y] && this.map.type([x,y]) == 'empty'){
visited[x + '-' + y] = true;
heap.push(next);
}
}
}
}
}
class Character{
constructor(selector){
if(typeof selector == 'string'){
this.$el = document.querySelector(selector)
}else if(selector.nodeType){
this.$el = selector
}else{
this.$el = null
}
}
setPos([x,y]){
this.x = x;
this.y = y;
this.$el.style.left = x * 20 + 'px';
this.$el.style.top = y * 20 + 'px';
}
getPos() {
return {x: this.x, y: this.y};
}
}
/**
* 玩家类
*/
class Player extends Character{
constructor(selector) {
super(selector)
}
/**
* 异步移动,实现动画效果
* @param pos
*/
goto(pos) {
this.x = pos.x;
this.y = pos.y;
this.$el.style.left = this.x * 20 + 'px';
this.$el.style.top = this.y * 20 + 'px';
}
}
class Enemy extends Character{
}
class Target extends Character{
}
/**
* 主游戏类
*/
class Game extends Event{
constructor(cfg) {
super()
//初始化配置
this.cfg = {
rows: 20,
cols: 20,
search_type: 'dfs',
duration: 100
}
Object.assign(this.cfg,cfg);
//初始化数据
this.map = new GridMap('#kingsman-map');
this.player = new Player('#kingsman-player');
this.target = new Target('#kingsman-target');
this.map.create(this.cfg.rows, this.cfg.cols);
this.pathFinder = new PathFinder(this.map,{
search_type: this.cfg.search_type
});
//重置游戏状态
this.reset();
}
/**
* 设置玩家和目标
*/
setPlayerAndTarget() {
let positions = [];
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (this.map.type([i, j]) == 'empty') {
positions.push([i, j]);
}
}
}
let len = positions.length;
if (len < 2) {
throw new Error('map is full');
}
Utils.shuffle(positions);
let player = positions[0];
let target = positions[1];
this.player.setPos(player);
this.target.setPos(target);
}
setEnemy(){
}
/**
* 随机的修建障碍物 //todo 建筑迷宫算法
*/
randBuild() {
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (Math.random() > 0.9) {
this.map.type([i, j], 'wall');
}
}
}
}
/**
* 设置间隔时间
* @param duration
*/
setDuration(duration) {
this.cfg.duration = duration;
this.player.$el.style.transitionDuration = duration + 'ms';
}
/**
* 设置地图尺寸
* @param n
*/
setSize(n) {
this.cfg.rows = this.cfg.cols = n;
this.map.create(this.cfg.cols, this.cfg.rows);
this.randBuild();
}
/**
* 设置寻路算法
* @param search_type
*/
setSearchType(search_type) {
this.pathFinder.setSearchType(search_type);
}
move(pos) {
this.player.goto(pos);
}
/**
* 寻路
* @param target
*/
goto(dst) {
//自动寻路
if (dst == void 0) {
dst = this.target.getPos();
}
let player = this.player.getPos();
let path = this.find_path(player, dst);
for (let next of path) {
this.run_async(this.move, | run_async(handler, args) {
let promise = new Promise((resolve, reject)=> {
this.queue.push({
handler, args,
callback: function (err, data) {
if (err) {
reject(err);
} else {
resolve(data);
}
}
});
});
if (!this.running) {
this.taskloop();
}
return promise;
}
/**
* 主任务循环
*/
taskloop() {
this.running = true;
if(this.isPaused) return;
let task = this.queue.shift();
if (task) {
try {
let ret = task.handler.apply(this, task.args);
task.callback(null, ret); //成功回调
setTimeout(this.taskloop.bind(this), this.cfg.duration);
} catch (err) {
this.running = false;
this.queue = [];
task.callback(err); //失败回调
}
} else {
this.running = false;
}
}
/**
* 切换到下一关
*/
next_level() {
this.reset();
}
/**
*
* 重置游戏状态
*/
reset() {
this.map.clear();
this.randBuild();
this.setPlayerAndTarget();
this.queue = [];
this.running = false;
this.isPaused = false;
}
/**
* 暂停寻路动画
*/
pause(){
this.isPaused = true
}
/**
* 恢复寻路动画
*/
restart(){
this.isPaused = false
if(this.running){
this.taskloop();
}
}
}
/**
* 控制类
*/
class Application extends Event{
constructor() {
super();
this.$controls = document.getElementById('kingsman-control');
this.$duration = document.getElementById('duration');
this.$search_type = document.getElementById('search_type');
this.$size = document.getElementById('size');
this.$map = document.getElementById('kingsman-map');
this.$bg = document.getElementById('kingsman-bg');
this.$pause_restart = document.getElementById('pause_restart');
this.$game_level = document.getElementById('kingsman-game-state-level')
this.level = 1;
let cfg = this.readConfig();
this.game = new Game(cfg);
this.showGameState()
this.bindUI();
}
/**
* 初始化配置
*/
readConfig(){
let duration = this.$duration.value;
let size = this.$size.value;
let search_type = this.$search_type.value;
return {duration,size,search_type}
}
showGameState(){
this.$game_level.innerHTML = `第${this.level}关`;
}
/**
* 进入下一关
*/
next_level(){
this.level++;
this.showGameState();
this.game.next_level();
}
/**
* 绑定事件
*/
bindUI() {
this.game.on('gameover',this.next_level.bind(this))
this.$controls.addEventListener('change', e => {
switch (e.target.id) {
case 'duration':
let duration = this.$duration.value;
this.game.setDuration(duration);
break;
case 'search_type':
let search_type = this.$search_type.value;
this.game.setSearchType(search_type)
break;
case 'size':
let size = this.$size.value;
this.game.setSize(size);
break;
default:
break;
}
});
this.$controls.addEventListener('click', e => {
switch (e.target.id) {
case 'goto_target':
this.game.goto(); //自动寻路
break;
case 'next_level':
this.next_level();
break;
case 'pause_restart':
let state = e.target.dataset.type
if(state == 'pause'){
e.target.dataset.type = 'restart';
e.target.textContent = '继续';
this.game.pause()
}else{
e.target.dataset.type= 'pause'
e.target.textContent = '暂停'
this.game.restart()
}
}
});
this.$bg.addEventListener('click', e=> {
let x,y;
if (e.target.nodeName.toLowerCase() === 'td') {
let cell = e.target;
x = cell.cellIndex;
y = cell.parentNode.rowIndex;
}else if(e.target.id == 'kingsman-target') {
let pos = this.game.target.getPos()
x = pos.x;
y = pos.y;
}
let target = {x, y};
this.game.goto(target);//手动寻路
});
}
}
var app = new Application();
| [next])
.catch(err =>{
console.error(err)
})
}
let target = this.target.getPos()
if(dst.x == target.x && dst.y == target.y) {
this.run_async(function(){
this.fire('gameover'); // 游戏结束加载下一关卡
});
}
}
/**
* 寻找路径
* @param src
* @param target
* @returns {*|Array}
*/
find_path(src, target) {
return this.pathFinder.find_path(src, target);
}
/**
* 运行异步函数
* @param handler
* @param args
* @returns {Promise}
*/
| identifier_body |
app.js | /**
* Created by yj on 16/4/29.
*/
/**
* 最小堆
*/
class MinHeap{
constructor(cmp){
this.cmp = cmp;
this.queue = []
}
push(val){
this.queue.push(val);
}
peek(){
if(this.empty()){
throw new Error("Can't peek an empty heap");
}
return this.queue.reduce((min,val) =>{
if(this.cmp(min,val) < 0 ){
return min;
}else{
return val;
}
})
}
pop(){
if(this.empty()){
throw new Error("Can't pop an empty heap");
}
let minIndex = 0;
for(let i=1;i<this.queue.length;i++){
if(this.cmp(this.queue[i],this.queue[minIndex])<0){
minIndex = i;
}
}
return this.queue.splice(minIndex,1)[0];
}
empty(){
return this.queue.length <=0;
}
dump(){
console.log('queue:',this.queue);
}
}
/**
* 工具函数
*/
class Utils {
/**
* 打乱数组,得到一组随机数
* @param a
*/
static shuffle(a) {
for (let i = 1; i < a.length; i++) {
let id = Math.floor(Math.random() * i);
[a[id], a[i]] = [a[i], a[id]];
}
}
/**
* 深拷贝
* @param dst
* @param rest
* @returns {*|{}}
*/
static deepCopy(dst, ...rest) {
dst = dst || {};
for (let src of rest) {
if (!src) return;
for (let key in src) {
if (src.hasOwnProperty(key)) {
if (typeof src[key] === 'object') {
dst[key] = Utils.deepCopy(dst[key], src[key]);
} else {
dst[key] = src[key];
}
}
}
}
return dst;
}
}
class Event{
constructor(){
this.listeners = {}
}
on(type,handler){
(this.listeners[type] || (this.listeners[type] = [])).push(handler);
return this;
}
fire(type,data,context){
let handlers = this.listeners[type];
for(let handler of handlers){
handler.apply(context,data)
}
return this;
}
off(type,handler){
if(type == void 0){
this.listeners = {}
return this
}
if(handler == void 0){
delete this.listeners[type]
return this
}
let handlers = this.listeners[type] || [];
let id = handlers.indexOf(handler);
if(id != -1){
handlers.splice(id,1);
return this;
}
if(handlers.length == 0){
delete this.listeners[type]
}
return this;
}
}
/**
* 游戏地图
*/
class GridMap {
constructor(selector) {
this.$el = document.querySelector(selector);
}
create(rows, cols) {
let html = '';
for (let i = 0; i < rows; i++) {
html += '<tr>';
for (let j = 0; j < cols; j++) {
html += '<td class="map-box" data-type="empty">';
}
html += '</tr>';
}
this.rows = rows;
this.cols = cols;
this.$el.innerHTML = html;
this.$boxes = this.$el.getElementsByTagName('td');
}
clear() {
for (let i = 0; i < this.rows; i++) {
for (let j = 0; j < this.cols; j++) {
this.type([i, j], 'empty');
}
}
}
type([x,y], type) {
if (type == void 0) {
return this.$boxes[y * this.cols + x].dataset.type;
} else {
this.$boxes[y * this.cols + x].dataset.type = type;
}
}
}
class PathFinder {
constructor(map,cfg){
this.map = map;
this.rows = map.rows;
this.cols = map.cols;
this.search_type = cfg.search_type;
this.path = [[0,1],[-1,0],[0,-1],[1,0]]
}
setSearchType(type) {
this.search_type = type;
}
/**
* 返回路径
* @param src
* @param dst
* @returns {*|Array}
*/
find_path(src, dst) {
switch (this.search_type) {
case 'dfs':
return this.dfs(src, dst, {});
case 'bfs':
return this.bfs(src, dst, {});
case 'astar':
return this.astar(src, dst,{});
}
}
/**
* 判断坐标是否在地图内
* @param i
* @param j
* @returns {boolean}
*/
isValid(i, j) {
return i >= 0 && i < this.rows && j >= 0 && j < this.cols;
}
/**
* 深度优先搜索
* @param src
* @param dst
* @param visited
* @returns {*}
*/
dfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y};
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true;
var path = this.dfs(next, dst, visited);
if (path) {
path.unshift(next);
return path;
}
}
}
}
}
/**
* 广度优先搜索
*/
bfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
let queue = [];
queue.push(src);
let path = []
while (queue.length > 0) {
src = queue.shift();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y}
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true
queue.push(next);
}
}
}
}
}
/**
* A* 寻路算法
* @param src
* @param dst
* @param visited
* @returns {*[]}
*/
astar(src,dst,visited){
if(src.x == dst.x && src.y == dst.y){
return [dst]
}
let dist = (s1,s2)=>{
return Math.abs(s1.x - s2.x) + Math.abs(s1.y - s2.y);
};
let hashPos = s => s.x + '-' + s.y;
let cmp = (el1,el2) => dist(el1,dst) - dist(el2,dst);
let heap = new MinHeap(cmp);
heap.push(src);
let path = []
while(!heap.empty()){
src = heap.pop();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for(let [i,j] of this.path){
let x = src.x + i;
let y = src.y + j;
let next = {x,y};
if(this.isValid(x,y) && !visited[x + '-' + y] && this.map.type([x,y]) == 'empty'){
visited[x + '-' + y] = true;
heap.push(next);
}
}
}
}
}
class Character{
constructor(selector){
if(typeof selector == 'string'){
this.$el = document.querySelector(selector)
}else if(selector.nodeType){
this.$el = selector
}else{
this.$el = null
}
}
setPos([x,y]){
this.x = x;
this.y = y;
this.$el.style.left = x * 20 + 'px';
this.$el.style.top = y * 20 + 'px';
}
getPos() {
return {x: this.x, y: this.y};
}
}
/**
* 玩家类
*/
class Player extends Character{
constructor(selector) {
super(selector)
}
/**
* 异步移动,实现动画效果
* @param pos
*/
goto(pos) {
this.x = pos.x;
this.y = pos.y;
this.$el.style.left = this.x * 20 + 'px';
this.$el.style.top = this.y * 20 + 'px';
}
}
class Enemy extends Character{
}
class Target extends Character{
}
/**
* 主游戏类
*/
class Game extends Event{
constructor(cfg) {
super()
//初始化配置
this.cfg = {
rows: 20,
cols: 20,
search_type: 'dfs',
duration: 100
}
Object.assign(this.cfg,cfg);
//初始化数据
this.map = new GridMap('#kingsman-map');
this.player = new Player('#kingsman-player');
this.target = new Target('#kingsman-target');
this.map.create(this.cfg.rows, this.cfg.cols);
this.pathFinder = new PathFinder(this.map,{
search_type: this.cfg.search_type
});
//重置游戏状态
this.reset();
}
/**
* 设置玩家和目标
*/
setPlayerAndTarget() {
let positions = [];
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (this.map.type([i, j]) == 'empty') {
positions.push([i, j]);
} | throw new Error('map is full');
}
Utils.shuffle(positions);
let player = positions[0];
let target = positions[1];
this.player.setPos(player);
this.target.setPos(target);
}
setEnemy(){
}
/**
* 随机的修建障碍物 //todo 建筑迷宫算法
*/
randBuild() {
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (Math.random() > 0.9) {
this.map.type([i, j], 'wall');
}
}
}
}
/**
* 设置间隔时间
* @param duration
*/
setDuration(duration) {
this.cfg.duration = duration;
this.player.$el.style.transitionDuration = duration + 'ms';
}
/**
* 设置地图尺寸
* @param n
*/
setSize(n) {
this.cfg.rows = this.cfg.cols = n;
this.map.create(this.cfg.cols, this.cfg.rows);
this.randBuild();
}
/**
* 设置寻路算法
* @param search_type
*/
setSearchType(search_type) {
this.pathFinder.setSearchType(search_type);
}
move(pos) {
this.player.goto(pos);
}
/**
* 寻路
* @param target
*/
goto(dst) {
//自动寻路
if (dst == void 0) {
dst = this.target.getPos();
}
let player = this.player.getPos();
let path = this.find_path(player, dst);
for (let next of path) {
this.run_async(this.move, [next])
.catch(err =>{
console.error(err)
})
}
let target = this.target.getPos()
if(dst.x == target.x && dst.y == target.y) {
this.run_async(function(){
this.fire('gameover'); // 游戏结束加载下一关卡
});
}
}
/**
* 寻找路径
* @param src
* @param target
* @returns {*|Array}
*/
find_path(src, target) {
return this.pathFinder.find_path(src, target);
}
/**
* 运行异步函数
* @param handler
* @param args
* @returns {Promise}
*/
run_async(handler, args) {
let promise = new Promise((resolve, reject)=> {
this.queue.push({
handler, args,
callback: function (err, data) {
if (err) {
reject(err);
} else {
resolve(data);
}
}
});
});
if (!this.running) {
this.taskloop();
}
return promise;
}
/**
* 主任务循环
*/
taskloop() {
this.running = true;
if(this.isPaused) return;
let task = this.queue.shift();
if (task) {
try {
let ret = task.handler.apply(this, task.args);
task.callback(null, ret); //成功回调
setTimeout(this.taskloop.bind(this), this.cfg.duration);
} catch (err) {
this.running = false;
this.queue = [];
task.callback(err); //失败回调
}
} else {
this.running = false;
}
}
/**
* 切换到下一关
*/
next_level() {
this.reset();
}
/**
*
* 重置游戏状态
*/
reset() {
this.map.clear();
this.randBuild();
this.setPlayerAndTarget();
this.queue = [];
this.running = false;
this.isPaused = false;
}
/**
* 暂停寻路动画
*/
pause(){
this.isPaused = true
}
/**
* 恢复寻路动画
*/
restart(){
this.isPaused = false
if(this.running){
this.taskloop();
}
}
}
/**
* 控制类
*/
class Application extends Event{
constructor() {
super();
this.$controls = document.getElementById('kingsman-control');
this.$duration = document.getElementById('duration');
this.$search_type = document.getElementById('search_type');
this.$size = document.getElementById('size');
this.$map = document.getElementById('kingsman-map');
this.$bg = document.getElementById('kingsman-bg');
this.$pause_restart = document.getElementById('pause_restart');
this.$game_level = document.getElementById('kingsman-game-state-level')
this.level = 1;
let cfg = this.readConfig();
this.game = new Game(cfg);
this.showGameState()
this.bindUI();
}
/**
* 初始化配置
*/
readConfig(){
let duration = this.$duration.value;
let size = this.$size.value;
let search_type = this.$search_type.value;
return {duration,size,search_type}
}
showGameState(){
this.$game_level.innerHTML = `第${this.level}关`;
}
/**
* 进入下一关
*/
next_level(){
this.level++;
this.showGameState();
this.game.next_level();
}
/**
* 绑定事件
*/
bindUI() {
this.game.on('gameover',this.next_level.bind(this))
this.$controls.addEventListener('change', e => {
switch (e.target.id) {
case 'duration':
let duration = this.$duration.value;
this.game.setDuration(duration);
break;
case 'search_type':
let search_type = this.$search_type.value;
this.game.setSearchType(search_type)
break;
case 'size':
let size = this.$size.value;
this.game.setSize(size);
break;
default:
break;
}
});
this.$controls.addEventListener('click', e => {
switch (e.target.id) {
case 'goto_target':
this.game.goto(); //自动寻路
break;
case 'next_level':
this.next_level();
break;
case 'pause_restart':
let state = e.target.dataset.type
if(state == 'pause'){
e.target.dataset.type = 'restart';
e.target.textContent = '继续';
this.game.pause()
}else{
e.target.dataset.type= 'pause'
e.target.textContent = '暂停'
this.game.restart()
}
}
});
this.$bg.addEventListener('click', e=> {
let x,y;
if (e.target.nodeName.toLowerCase() === 'td') {
let cell = e.target;
x = cell.cellIndex;
y = cell.parentNode.rowIndex;
}else if(e.target.id == 'kingsman-target') {
let pos = this.game.target.getPos()
x = pos.x;
y = pos.y;
}
let target = {x, y};
this.game.goto(target);//手动寻路
});
}
}
var app = new Application(); | }
}
let len = positions.length;
if (len < 2) { | random_line_split |
parse_tweet_data.py | # ______________________________________________________________________________________________________________________
# this code is **************************************(U) UNCLASSIFIED***************************************************
# ______________________________________________________________________________________________________________________
# coding=utf-8
# ----------------------------------------------------------------------------------------------------------------------
# program name: parse_tweet_data
# major version: 1.1
# program purpose: This program converts the updated ~10M tweet dataset and uses various functions to parse,
# inventory, and prep the data for topic modelling and metadata analysis.
# python version: 3.6
#
# Author: Emily Parrish
# major version created:20200602
# last modification: 20200602 Created all major functions for inventoring and parsing
# 20200612 Adjusted directory structure for outputs
# ----------------------------------------------------------------------------------------------------------------------
import os
import sys
import string
import csv
import operator
import pandas as pd
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import collections
import re
from helpers import *
# global paths
path = r'E:\Twitter\Russia\Russia_1906'
path_split = path.split('\\')
# current date
today = '20' + datetime.now().strftime('%m%d')
def convert_source(infile):
''' Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a
pkl file for more efficient import of other processes.
Inputs: *.csv file
Outputs: *.pkl file in the input directory
'''
# construct absolute path
filepath = os.path.join(path, infile)
# import *.csv file to data frame
df = pd.read_csv(filepath)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save data frame to *.pkl file of same name
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
def convert_comb(files):
'''Takes a list of *.csv file inputs and creates a single data frames from each of them.
Inputs: Input *.csv files to be combined into a single data frame
Outputs: A data frame for each input and a single output combined data frame
'''
dfs = list()
for file in files:
ext_path = os.path.join(path, '1_DataFrames')
df = pd.read_pickle(os.path.join(ext_path, file))
dfs.append(df)
new_df = merge_df(dfs)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
new_df.to_pickle(os.path.join(ext_path, outfile))
return new_df
def sort_df(df, field='tweet_time'):
''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It
also adds a column called "unique_id_ida" with formatted ID numbers for each tweet.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def split_df(df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
|
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include
df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]
fulldir = os.path.join(parentdir, dir)
filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4] + '.txt'
outpath = os.path.join(fulldir, filename)
if os.path.exists(outpath):
pass
else:
if os.path.isdir(fulldir):
pass
else:
os.mkdir(fulldir)
if int(i) % 10000 == 0:
print('Files up to ' + str(i) + ' processed.')
f = open(outpath, 'w', encoding='utf-8')
f.write(content[i])
f.close()
def generate_freq(df):
'''Takes an input data frame and generates a histogram of number of tweets binned by month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Input parameter called "increment", which determined by what time interval the tweets are organized
Outputs: Histogram
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ym = (date_bounds.dt.strftime('%Y-%m')).tolist()
df['date_md'] = np.array(date_bounds_ym)
sort = df.sort_values(by=['date_md'])
frq = sort['date_md'].value_counts().to_dict()
frq_df = sort['date_md'].value_counts()
od = collections.OrderedDict(sorted(frq.items()))
rf_dates = list()
for item in list(od.keys()):
date_rf = date_reformat(item)
rf_dates.append(date_rf)
data = {"Date": rf_dates, "Freq": list(od.values())}
graph_frame = pd.dataframe(data=data)
frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))
ax = graph_frame.plot.bar(x="Date", y="Freq", rot=45)
plt.show()
def main():
print('Start time: ' + str(datetime.now()))
infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'
inpath = os.path.join(path, '1_DataFrames')
infilepath = os.path.join(inpath, infile)
stripped_en = pd.read_pickle(infilepath)
print(stripped_en.head()['unique_id_ida'])
# sorted = sort_df(df)
# split_df(sorted, 1)
#
# en_df = get_lang(sorted, 'en')
# stripped_en = strip_formatting(en_df, 10, 'en')
#
# ru_df = get_lang(sorted, 'ru')
# stripped_ru = strip_formatting(ru_df, 12, 'ru')
#
# zh_df = get_lang(sorted, 'zh')
# stripped_zh = strip_formatting(zh_df, 2, 'zh')
extract_content(stripped_en, 'English')
print('End time: ' + str(datetime.now()))
if __name__ == '__main__':
main()
# this code is **************************************(U) UNCLASSIFIED*************************************************** | include.append('0') | conditional_block |
parse_tweet_data.py | # ______________________________________________________________________________________________________________________
# this code is **************************************(U) UNCLASSIFIED***************************************************
# ______________________________________________________________________________________________________________________
# coding=utf-8
# ----------------------------------------------------------------------------------------------------------------------
# program name: parse_tweet_data
# major version: 1.1
# program purpose: This program converts the updated ~10M tweet dataset and uses various functions to parse,
# inventory, and prep the data for topic modelling and metadata analysis.
# python version: 3.6
#
# Author: Emily Parrish
# major version created:20200602
# last modification: 20200602 Created all major functions for inventoring and parsing
# 20200612 Adjusted directory structure for outputs
# ----------------------------------------------------------------------------------------------------------------------
import os
import sys
import string
import csv
import operator
import pandas as pd
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import collections
import re
from helpers import *
# global paths
path = r'E:\Twitter\Russia\Russia_1906'
path_split = path.split('\\')
# current date
today = '20' + datetime.now().strftime('%m%d')
def convert_source(infile):
''' Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a
pkl file for more efficient import of other processes.
Inputs: *.csv file
Outputs: *.pkl file in the input directory
'''
# construct absolute path
filepath = os.path.join(path, infile)
# import *.csv file to data frame
df = pd.read_csv(filepath)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save data frame to *.pkl file of same name
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
def convert_comb(files):
'''Takes a list of *.csv file inputs and creates a single data frames from each of them.
Inputs: Input *.csv files to be combined into a single data frame
Outputs: A data frame for each input and a single output combined data frame
'''
dfs = list()
for file in files:
ext_path = os.path.join(path, '1_DataFrames')
df = pd.read_pickle(os.path.join(ext_path, file))
dfs.append(df)
new_df = merge_df(dfs)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
new_df.to_pickle(os.path.join(ext_path, outfile))
return new_df
def sort_df(df, field='tweet_time'):
''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It
also adds a column called "unique_id_ida" with formatted ID numbers for each tweet.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def | (df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
include.append('0')
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include
df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]
fulldir = os.path.join(parentdir, dir)
filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4] + '.txt'
outpath = os.path.join(fulldir, filename)
if os.path.exists(outpath):
pass
else:
if os.path.isdir(fulldir):
pass
else:
os.mkdir(fulldir)
if int(i) % 10000 == 0:
print('Files up to ' + str(i) + ' processed.')
f = open(outpath, 'w', encoding='utf-8')
f.write(content[i])
f.close()
def generate_freq(df):
'''Takes an input data frame and generates a histogram of number of tweets binned by month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Input parameter called "increment", which determined by what time interval the tweets are organized
Outputs: Histogram
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ym = (date_bounds.dt.strftime('%Y-%m')).tolist()
df['date_md'] = np.array(date_bounds_ym)
sort = df.sort_values(by=['date_md'])
frq = sort['date_md'].value_counts().to_dict()
frq_df = sort['date_md'].value_counts()
od = collections.OrderedDict(sorted(frq.items()))
rf_dates = list()
for item in list(od.keys()):
date_rf = date_reformat(item)
rf_dates.append(date_rf)
data = {"Date": rf_dates, "Freq": list(od.values())}
graph_frame = pd.dataframe(data=data)
frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))
ax = graph_frame.plot.bar(x="Date", y="Freq", rot=45)
plt.show()
def main():
print('Start time: ' + str(datetime.now()))
infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'
inpath = os.path.join(path, '1_DataFrames')
infilepath = os.path.join(inpath, infile)
stripped_en = pd.read_pickle(infilepath)
print(stripped_en.head()['unique_id_ida'])
# sorted = sort_df(df)
# split_df(sorted, 1)
#
# en_df = get_lang(sorted, 'en')
# stripped_en = strip_formatting(en_df, 10, 'en')
#
# ru_df = get_lang(sorted, 'ru')
# stripped_ru = strip_formatting(ru_df, 12, 'ru')
#
# zh_df = get_lang(sorted, 'zh')
# stripped_zh = strip_formatting(zh_df, 2, 'zh')
extract_content(stripped_en, 'English')
print('End time: ' + str(datetime.now()))
if __name__ == '__main__':
main()
# this code is **************************************(U) UNCLASSIFIED*************************************************** | split_df | identifier_name |
parse_tweet_data.py | # ______________________________________________________________________________________________________________________
# this code is **************************************(U) UNCLASSIFIED***************************************************
# ______________________________________________________________________________________________________________________
# coding=utf-8
# ----------------------------------------------------------------------------------------------------------------------
# program name: parse_tweet_data
# major version: 1.1
# program purpose: This program converts the updated ~10M tweet dataset and uses various functions to parse,
# inventory, and prep the data for topic modelling and metadata analysis.
# python version: 3.6
#
# Author: Emily Parrish
# major version created:20200602
# last modification: 20200602 Created all major functions for inventoring and parsing
# 20200612 Adjusted directory structure for outputs
# ----------------------------------------------------------------------------------------------------------------------
import os
import sys
import string
import csv
import operator
import pandas as pd
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import collections
import re
from helpers import *
# global paths
path = r'E:\Twitter\Russia\Russia_1906'
path_split = path.split('\\')
# current date
today = '20' + datetime.now().strftime('%m%d')
def convert_source(infile):
''' Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a
pkl file for more efficient import of other processes.
Inputs: *.csv file
Outputs: *.pkl file in the input directory
'''
# construct absolute path
filepath = os.path.join(path, infile)
# import *.csv file to data frame
df = pd.read_csv(filepath)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save data frame to *.pkl file of same name
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
def convert_comb(files):
'''Takes a list of *.csv file inputs and creates a single data frames from each of them.
Inputs: Input *.csv files to be combined into a single data frame
Outputs: A data frame for each input and a single output combined data frame
'''
dfs = list()
for file in files:
ext_path = os.path.join(path, '1_DataFrames')
df = pd.read_pickle(os.path.join(ext_path, file))
dfs.append(df)
new_df = merge_df(dfs)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
new_df.to_pickle(os.path.join(ext_path, outfile))
return new_df
def sort_df(df, field='tweet_time'):
''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It
also adds a column called "unique_id_ida" with formatted ID numbers for each tweet.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def split_df(df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
include.append('0')
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include | outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]
fulldir = os.path.join(parentdir, dir)
filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4] + '.txt'
outpath = os.path.join(fulldir, filename)
if os.path.exists(outpath):
pass
else:
if os.path.isdir(fulldir):
pass
else:
os.mkdir(fulldir)
if int(i) % 10000 == 0:
print('Files up to ' + str(i) + ' processed.')
f = open(outpath, 'w', encoding='utf-8')
f.write(content[i])
f.close()
def generate_freq(df):
'''Takes an input data frame and generates a histogram of number of tweets binned by month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Input parameter called "increment", which determined by what time interval the tweets are organized
Outputs: Histogram
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ym = (date_bounds.dt.strftime('%Y-%m')).tolist()
df['date_md'] = np.array(date_bounds_ym)
sort = df.sort_values(by=['date_md'])
frq = sort['date_md'].value_counts().to_dict()
frq_df = sort['date_md'].value_counts()
od = collections.OrderedDict(sorted(frq.items()))
rf_dates = list()
for item in list(od.keys()):
date_rf = date_reformat(item)
rf_dates.append(date_rf)
data = {"Date": rf_dates, "Freq": list(od.values())}
graph_frame = pd.dataframe(data=data)
frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))
ax = graph_frame.plot.bar(x="Date", y="Freq", rot=45)
plt.show()
def main():
print('Start time: ' + str(datetime.now()))
infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'
inpath = os.path.join(path, '1_DataFrames')
infilepath = os.path.join(inpath, infile)
stripped_en = pd.read_pickle(infilepath)
print(stripped_en.head()['unique_id_ida'])
# sorted = sort_df(df)
# split_df(sorted, 1)
#
# en_df = get_lang(sorted, 'en')
# stripped_en = strip_formatting(en_df, 10, 'en')
#
# ru_df = get_lang(sorted, 'ru')
# stripped_ru = strip_formatting(ru_df, 12, 'ru')
#
# zh_df = get_lang(sorted, 'zh')
# stripped_zh = strip_formatting(zh_df, 2, 'zh')
extract_content(stripped_en, 'English')
print('End time: ' + str(datetime.now()))
if __name__ == '__main__':
main()
# this code is **************************************(U) UNCLASSIFIED*************************************************** | df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
| random_line_split |
parse_tweet_data.py | # ______________________________________________________________________________________________________________________
# this code is **************************************(U) UNCLASSIFIED***************************************************
# ______________________________________________________________________________________________________________________
# coding=utf-8
# ----------------------------------------------------------------------------------------------------------------------
# program name: parse_tweet_data
# major version: 1.1
# program purpose: This program converts the updated ~10M tweet dataset and uses various functions to parse,
# inventory, and prep the data for topic modelling and metadata analysis.
# python version: 3.6
#
# Author: Emily Parrish
# major version created:20200602
# last modification: 20200602 Created all major functions for inventoring and parsing
# 20200612 Adjusted directory structure for outputs
# ----------------------------------------------------------------------------------------------------------------------
import os
import sys
import string
import csv
import operator
import pandas as pd
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import collections
import re
from helpers import *
# global paths
path = r'E:\Twitter\Russia\Russia_1906'
path_split = path.split('\\')
# current date
today = '20' + datetime.now().strftime('%m%d')
def convert_source(infile):
''' Imports a CSV file into Python Pandas and outputs a Pandas Data Frame Object. It then saves this object to a
pkl file for more efficient import of other processes.
Inputs: *.csv file
Outputs: *.pkl file in the input directory
'''
# construct absolute path
filepath = os.path.join(path, infile)
# import *.csv file to data frame
df = pd.read_csv(filepath)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save data frame to *.pkl file of same name
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
def convert_comb(files):
'''Takes a list of *.csv file inputs and creates a single data frames from each of them.
Inputs: Input *.csv files to be combined into a single data frame
Outputs: A data frame for each input and a single output combined data frame
'''
dfs = list()
for file in files:
ext_path = os.path.join(path, '1_DataFrames')
df = pd.read_pickle(os.path.join(ext_path, file))
dfs.append(df)
new_df = merge_df(dfs)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
new_df.to_pickle(os.path.join(ext_path, outfile))
return new_df
def sort_df(df, field='tweet_time'):
''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It
also adds a column called "unique_id_ida" with formatted ID numbers for each tweet.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def split_df(df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
include.append('0')
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include
df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]
fulldir = os.path.join(parentdir, dir)
filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4] + '.txt'
outpath = os.path.join(fulldir, filename)
if os.path.exists(outpath):
pass
else:
if os.path.isdir(fulldir):
pass
else:
os.mkdir(fulldir)
if int(i) % 10000 == 0:
print('Files up to ' + str(i) + ' processed.')
f = open(outpath, 'w', encoding='utf-8')
f.write(content[i])
f.close()
def generate_freq(df):
|
def main():
print('Start time: ' + str(datetime.now()))
infile = 'Twitter_Russia_1906_sorted_strip_en_200929.pkl'
inpath = os.path.join(path, '1_DataFrames')
infilepath = os.path.join(inpath, infile)
stripped_en = pd.read_pickle(infilepath)
print(stripped_en.head()['unique_id_ida'])
# sorted = sort_df(df)
# split_df(sorted, 1)
#
# en_df = get_lang(sorted, 'en')
# stripped_en = strip_formatting(en_df, 10, 'en')
#
# ru_df = get_lang(sorted, 'ru')
# stripped_ru = strip_formatting(ru_df, 12, 'ru')
#
# zh_df = get_lang(sorted, 'zh')
# stripped_zh = strip_formatting(zh_df, 2, 'zh')
extract_content(stripped_en, 'English')
print('End time: ' + str(datetime.now()))
if __name__ == '__main__':
main()
# this code is **************************************(U) UNCLASSIFIED*************************************************** | '''Takes an input data frame and generates a histogram of number of tweets binned by month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Input parameter called "increment", which determined by what time interval the tweets are organized
Outputs: Histogram
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ym = (date_bounds.dt.strftime('%Y-%m')).tolist()
df['date_md'] = np.array(date_bounds_ym)
sort = df.sort_values(by=['date_md'])
frq = sort['date_md'].value_counts().to_dict()
frq_df = sort['date_md'].value_counts()
od = collections.OrderedDict(sorted(frq.items()))
rf_dates = list()
for item in list(od.keys()):
date_rf = date_reformat(item)
rf_dates.append(date_rf)
data = {"Date": rf_dates, "Freq": list(od.values())}
graph_frame = pd.dataframe(data=data)
frq_df.to_csv(os.path.join(path, 'tweet_freq_' + today + '.csv'))
ax = graph_frame.plot.bar(x="Date", y="Freq", rot=45)
plt.show() | identifier_body |
09_XGBC_img.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 28 19:17:34 2019
@author: Logan Rowe
"""
import numpy as np
import os
import sys
import pandas as pd
from pandas.plotting import scatter_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline,FeatureUnion
from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_val_score,GridSearchCV,RandomizedSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.externals import joblib
from scipy.stats import expon,reciprocal
import data_prep as dp
from imp import reload
reload(dp)
import matplotlib.pyplot as plt
import xgboost
################################################
# LOAD DATA
################################################
data_dir='C:\\Users\\Logan Rowe\\Desktop\\bowtie-defect-identification\\Wafer_Images\\bowtie-training-data'
X_raw=np.load(data_dir+'\\std0_std45_sh0-arr_sh45-arr_bow-bool_train.npy')
################################################
# ADD COLUMN NAMES AND CONVERT TO PANDAS DF
################################################
c1=['std0','std45']
c2=['sh0_{0}'.format(str(i)) for i in range(64)]
c3=['sh45_{0}'.format(str(i)) for i in range(64)]
c4=['bowties']
column_names=c1+c2+c3+c4
seeking=True
if seeking:
|
# =============================================================================
# Run again with more estimators and early stopping to check for over fitting
# =============================================================================
params['n_estimators']=100
clf=xgboost.XGBClassifier(**params)
eval_set=[(X_train_trans,y_train),(X_val_trans,y_val),(X_test_trans,y_test)]
eval_metric=['error','logloss']
clf.fit(X_train_trans,y_train,eval_metric=eval_metric,eval_set=eval_set,verbose=10)
evals_result=clf.evals_result()
#Errors
train_errors=evals_result['validation_0']['error']
val_errors=evals_result['validation_1']['error']
test_errors=evals_result['validation_2']['error']
#Logloss Errors
train_errors_log=evals_result['validation_0']['logloss']
val_errors_log=evals_result['validation_1']['logloss']
test_errors_log=evals_result['validation_2']['logloss']
N=np.linspace(1,params['n_estimators'],params['n_estimators'])
plt.close('all')
#Plot error
plt.figure(1)
plt.plot(N,val_errors,'b-')
plt.plot(N,train_errors,'r-')
plt.plot(N,test_errors,'g-')
plt.legend(['Validation','Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Error')
#Plot logloss error
plt.figure(2)
plt.plot(N,val_errors_log,'b-')
plt.plot(N,train_errors_log,'r-')
plt.plot(N,test_errors_log,'g-')
plt.legend(['Validation','Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Logloss Error')
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
# =============================================================================
# Based on the logloss curves the optimal number of estimators is
# between 46 and 58 so we will run it for 70 and use early stopping
# =============================================================================
params['n_estimators']=70
final_params_selected=True
if final_params_selected:
# =============================================================================
# Combine training and validation sets to increase training data
# =============================================================================
X_train_full=pd.concat([X_train_trans,X_val_trans])
y_train_full=pd.concat([y_train,y_val])
clf=xgboost.XGBClassifier(**params)
eval_set=[(X_train_trans,y_train),(X_test_trans,y_test)]
eval_metric=['error','logloss']
clf.fit(X_train_full,y_train_full,eval_metric=eval_metric,eval_set=eval_set,verbose=5,early_stopping_rounds=5)
evals_result=clf.evals_result()
#Logloss Errors
train_errors_log_2=evals_result['validation_0']['logloss']
test_errors_log_2=evals_result['validation_1']['logloss']
N_2=np.linspace(1,len(test_errors_log_2),len(test_errors_log_2))
#Plot logloss error
plt.figure(3)
plt.plot(N_2,train_errors_log_2,'r-')
plt.plot(N_2,test_errors_log_2,'g-')
plt.legend(['Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Logloss Error')
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
joblib.dump(clf,"C:\\Users\\Logan Rowe\\Desktop\\bowtie-defect-identification\\classifiers\\XGBC_img_classifier_.pkl")
final_test_results=True
if final_test_results:
y_pred_proba=clf.predict_proba(X_test)
export_full_transformed_dataset=False
if export_full_transformed_dataset:
processed_data_dir='C:\\Users\\Logan Rowe\\Desktop\\bowtie-defect-identification\\preprocessed_datasets'
#Training Data Set
training_full=X_train_full
training_full['bowties']=y_train_full
joblib.dump(training_full,processed_data_dir+'\\XGBC_img_train.pkl')
#Testing Data Set
testing_full=test
joblib.dump(testing_full,processed_data_dir+'\\XGBC_img_test.pkl')
| X=dp.numpy_to_pd(X_raw,column_names)
# =============================================================================
# SPLIT DATA INTO TEST AND TRAIN BOTH BALANCED
# WITH RESPECT TO (NON)BOWTIES
# =============================================================================
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(X,X['bowties']):
train=X.loc[train_index]
test=X.loc[test_index]
# =========================================================================
# Split the training set into training and validation subsets
# =========================================================================
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(train,train['bowties']):
train=X.loc[train_index]
train_val=X.loc[test_index]
y_train=train['bowties']
X_train=train.drop(columns='bowties')
y_test=test['bowties']
X_test=test.drop(columns='bowties')
y_val=train_val['bowties']
X_val=train_val.drop(columns='bowties')
pipeline=Pipeline([('Imputer',SimpleImputer(strategy='mean'))])
X_train_trans=pipeline.fit_transform(X_train)
X_val_trans=pipeline.fit_transform(X_val)
X_test_trans=pipeline.fit_transform(X_test)
# =========================================================================
# Convert back to panda dataframe because XGBoost and Scipy dont play nice
# =========================================================================
column_names_xgb=['f{}'.format(int(i)) for i in range(130)]
X_train_trans=dp.numpy_to_pd(X_train_trans,column_names_xgb)
X_val_trans=dp.numpy_to_pd(X_val_trans,column_names_xgb)
X_test_trans=dp.numpy_to_pd(X_test_trans,column_names_xgb)
param_grid={ 'gamma':[0.05],
'learning_rate':[0.1],
'max_depth':[7],
'min_child_weight':[1],
'n_estimators':[50],
'n_jobs':[-1],
'objective':['binary:logistic'],
'random_state':[42],
'reg_alpha':[0],
'reg_lambda':[1],
'scale_pos_weight':[1],
'subsample':[1],
'verbosity':[1]
}
xgb_clf=xgboost.XGBClassifier()
grid_search=GridSearchCV(xgb_clf,param_grid=param_grid,cv=5,scoring='f1',verbose=2,n_jobs=-1,iid=True)
grid_search.fit(X_train_trans,y_train)
params=grid_search.best_params_
#clf=xgboost.XGBRFClassifier(**params,n_estimators=100,n_jobs=-1,random_state=42)
clf=xgboost.XGBClassifier(**params)
clf.fit(X_train_trans,y_train,early_stopping_rounds=10,eval_set=[(X_val_trans,y_val)])
y_test=test['bowties']
X_test=test.drop(columns='bowties')
X_test=pipeline.fit_transform(X_test)
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
#0.8374384236453202 0.8808290155440415 0.8585858585858585 0.8345152519028066 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.8, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 7, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
#0.8366336633663366 0.8756476683937824 0.8556962025316456 0.8352625965436676 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.8, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 6, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
#0.8439024390243902 0.8963730569948186 0.8693467336683416 0.8497798540354795 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.2, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 7, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1} | conditional_block |
09_XGBC_img.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 28 19:17:34 2019
@author: Logan Rowe
"""
import numpy as np
import os
import sys
import pandas as pd
from pandas.plotting import scatter_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline,FeatureUnion
from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_val_score,GridSearchCV,RandomizedSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.externals import joblib
from scipy.stats import expon,reciprocal
import data_prep as dp
from imp import reload
reload(dp)
import matplotlib.pyplot as plt
import xgboost
################################################
# LOAD DATA
################################################
data_dir='C:\\Users\\Logan Rowe\\Desktop\\bowtie-defect-identification\\Wafer_Images\\bowtie-training-data'
X_raw=np.load(data_dir+'\\std0_std45_sh0-arr_sh45-arr_bow-bool_train.npy')
################################################
# ADD COLUMN NAMES AND CONVERT TO PANDAS DF
################################################
c1=['std0','std45']
c2=['sh0_{0}'.format(str(i)) for i in range(64)]
c3=['sh45_{0}'.format(str(i)) for i in range(64)]
c4=['bowties'] |
seeking=True
if seeking:
X=dp.numpy_to_pd(X_raw,column_names)
# =============================================================================
# SPLIT DATA INTO TEST AND TRAIN BOTH BALANCED
# WITH RESPECT TO (NON)BOWTIES
# =============================================================================
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(X,X['bowties']):
train=X.loc[train_index]
test=X.loc[test_index]
# =========================================================================
# Split the training set into training and validation subsets
# =========================================================================
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(train,train['bowties']):
train=X.loc[train_index]
train_val=X.loc[test_index]
y_train=train['bowties']
X_train=train.drop(columns='bowties')
y_test=test['bowties']
X_test=test.drop(columns='bowties')
y_val=train_val['bowties']
X_val=train_val.drop(columns='bowties')
pipeline=Pipeline([('Imputer',SimpleImputer(strategy='mean'))])
X_train_trans=pipeline.fit_transform(X_train)
X_val_trans=pipeline.fit_transform(X_val)
X_test_trans=pipeline.fit_transform(X_test)
# =========================================================================
# Convert back to panda dataframe because XGBoost and Scipy dont play nice
# =========================================================================
column_names_xgb=['f{}'.format(int(i)) for i in range(130)]
X_train_trans=dp.numpy_to_pd(X_train_trans,column_names_xgb)
X_val_trans=dp.numpy_to_pd(X_val_trans,column_names_xgb)
X_test_trans=dp.numpy_to_pd(X_test_trans,column_names_xgb)
param_grid={ 'gamma':[0.05],
'learning_rate':[0.1],
'max_depth':[7],
'min_child_weight':[1],
'n_estimators':[50],
'n_jobs':[-1],
'objective':['binary:logistic'],
'random_state':[42],
'reg_alpha':[0],
'reg_lambda':[1],
'scale_pos_weight':[1],
'subsample':[1],
'verbosity':[1]
}
xgb_clf=xgboost.XGBClassifier()
grid_search=GridSearchCV(xgb_clf,param_grid=param_grid,cv=5,scoring='f1',verbose=2,n_jobs=-1,iid=True)
grid_search.fit(X_train_trans,y_train)
params=grid_search.best_params_
#clf=xgboost.XGBRFClassifier(**params,n_estimators=100,n_jobs=-1,random_state=42)
clf=xgboost.XGBClassifier(**params)
clf.fit(X_train_trans,y_train,early_stopping_rounds=10,eval_set=[(X_val_trans,y_val)])
y_test=test['bowties']
X_test=test.drop(columns='bowties')
X_test=pipeline.fit_transform(X_test)
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
#0.8374384236453202 0.8808290155440415 0.8585858585858585 0.8345152519028066 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.8, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 7, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
#0.8366336633663366 0.8756476683937824 0.8556962025316456 0.8352625965436676 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.8, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 6, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
#0.8439024390243902 0.8963730569948186 0.8693467336683416 0.8497798540354795 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.2, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 7, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
# =============================================================================
# Run again with more estimators and early stopping to check for over fitting
# =============================================================================
params['n_estimators']=100
clf=xgboost.XGBClassifier(**params)
eval_set=[(X_train_trans,y_train),(X_val_trans,y_val),(X_test_trans,y_test)]
eval_metric=['error','logloss']
clf.fit(X_train_trans,y_train,eval_metric=eval_metric,eval_set=eval_set,verbose=10)
evals_result=clf.evals_result()
#Errors
train_errors=evals_result['validation_0']['error']
val_errors=evals_result['validation_1']['error']
test_errors=evals_result['validation_2']['error']
#Logloss Errors
train_errors_log=evals_result['validation_0']['logloss']
val_errors_log=evals_result['validation_1']['logloss']
test_errors_log=evals_result['validation_2']['logloss']
N=np.linspace(1,params['n_estimators'],params['n_estimators'])
plt.close('all')
#Plot error
plt.figure(1)
plt.plot(N,val_errors,'b-')
plt.plot(N,train_errors,'r-')
plt.plot(N,test_errors,'g-')
plt.legend(['Validation','Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Error')
#Plot logloss error
plt.figure(2)
plt.plot(N,val_errors_log,'b-')
plt.plot(N,train_errors_log,'r-')
plt.plot(N,test_errors_log,'g-')
plt.legend(['Validation','Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Logloss Error')
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
# =============================================================================
# Based on the logloss curves the optimal number of estimators is
# between 46 and 58 so we will run it for 70 and use early stopping
# =============================================================================
params['n_estimators']=70
final_params_selected=True
if final_params_selected:
# =============================================================================
# Combine training and validation sets to increase training data
# =============================================================================
X_train_full=pd.concat([X_train_trans,X_val_trans])
y_train_full=pd.concat([y_train,y_val])
clf=xgboost.XGBClassifier(**params)
eval_set=[(X_train_trans,y_train),(X_test_trans,y_test)]
eval_metric=['error','logloss']
clf.fit(X_train_full,y_train_full,eval_metric=eval_metric,eval_set=eval_set,verbose=5,early_stopping_rounds=5)
evals_result=clf.evals_result()
#Logloss Errors
train_errors_log_2=evals_result['validation_0']['logloss']
test_errors_log_2=evals_result['validation_1']['logloss']
N_2=np.linspace(1,len(test_errors_log_2),len(test_errors_log_2))
#Plot logloss error
plt.figure(3)
plt.plot(N_2,train_errors_log_2,'r-')
plt.plot(N_2,test_errors_log_2,'g-')
plt.legend(['Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Logloss Error')
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
joblib.dump(clf,"C:\\Users\\Logan Rowe\\Desktop\\bowtie-defect-identification\\classifiers\\XGBC_img_classifier_.pkl")
final_test_results=True
if final_test_results:
y_pred_proba=clf.predict_proba(X_test)
export_full_transformed_dataset=False
if export_full_transformed_dataset:
processed_data_dir='C:\\Users\\Logan Rowe\\Desktop\\bowtie-defect-identification\\preprocessed_datasets'
#Training Data Set
training_full=X_train_full
training_full['bowties']=y_train_full
joblib.dump(training_full,processed_data_dir+'\\XGBC_img_train.pkl')
#Testing Data Set
testing_full=test
joblib.dump(testing_full,processed_data_dir+'\\XGBC_img_test.pkl') | column_names=c1+c2+c3+c4
| random_line_split |
data_tensorboard.py | import tensorflow as tf
from tensorflow import keras
import numpy as np
import os
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
"""
crop_top takes as an input img which is an array of shape (x, y, 3) and
percentage of picture to crop
"""
def crop_top(img, percent=0.15):
offset = int(img.shape[0] * percent) #cut the top portion of image
return img[offset:] # return image
"""
central_crop takes as an input img which is an array of shape (x, y, 3)
"""
def central_crop(img):
size = min(img.shape[0], img.shape[1]) # min of x and y
offset_h = int((img.shape[0] - size) / 2) # horizontal len
offset_w = int((img.shape[1] - size) / 2) # vertical
return img[offset_h:offset_h + size, offset_w:offset_w + size] # makes square image and centered
"""
process_image_file take as an input path of the photo for example data/train/1-s2.0-S0929664620300449-gr2_lrg-a.jpg,
top_percante for e.g top_percante = 0.08, and size of on axis of the image. In our case it will be 480
"""
def process_image_file(filepath, top_percent, size):
img = cv2.imread(filepath) # load image as array of shape (x, y , 3)
img = crop_top(img, percent=top_percent) # use function define above
img = central_crop(img) # use function define above
img = cv2.resize(img, (size, size)) # resize image from (min(x, y), min(x,y)) to (480,480). noticed that it remains
# of the shape with 3 chanels (480,480,3)
return img
"""
random_ratio_resize takes as an input image path, prob ,the probability of rotation if the random value is bigger than
prob do nothing and delta set as default to 0.1. As this function is used in code after central_crop which squares the image
ration will be 1. So we take then random value between segmet form [1-dleta, 1+ dleta]
"""
def random_ratio_resize(img, prob=0.3, delta=0.1):
if np.random.rand() >= prob: # bigger do nothing
return img
ratio = img.shape[0] / img.shape[1] # in our case 1
ratio = np.random.uniform(max(ratio - delta, 0.01), ratio + delta) # random value form [1-delta, 1+delta]. if delta
# change we prevent from left end of segment being non positve
if ratio * img.shape[1] <= img.shape[1]:
size = (int(img.shape[1] * ratio), img.shape[1]) # e.g shape of (474, 480) after this operation
else:
size = (img.shape[0], int(img.shape[0] / ratio)) #e.g shape of (480, 472) after this operation
dh = img.shape[0] - size[1] # could be zero or (480 - less number than 480)
top, bot = dh // 2, dh - dh // 2 # could be zeros ot the sum up to dh e.g dh = 9 then top = 4, bot = 5
dw = img.shape[1] - size[0] # similar to above
left, right = dw // 2, dw - dw // 2
if size[0] > 480 or size[1] > 480: #should not happen casue one of the coordinates should always be 480
print(img.shape, size, ratio)
img = cv2.resize(img, size) # resize image
img = cv2.copyMakeBorder(img, top, bot, left, right, cv2.BORDER_CONSTANT,
(0, 0, 0)) # this function makes image back to shape of (480, 480, 3) however it add black famre
# around the image
if img.shape[0] != 480 or img.shape[1] != 480: # should have happned since the ouput shape after copyMakeBorder supposed to be
# (480,480, 3)
raise ValueError(img.shape, size) # in case of error raise exception
return img
_augmentation_transform = ImageDataGenerator(
featurewise_center=False, # Boolean. Set input mean to 0 over the dataset, feature-wise.
featurewise_std_normalization=False, # Boolean. Divide inputs by std of the dataset, feature-wise.
rotation_range=10, # Int. Degree range for random rotations.
width_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total width, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
# With width_shift_range=2 possible values are integers [-1, 0, +1], same as
# with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
height_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total height, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-height_shift_range, +height_shift_range)
#With height_shift_range=2 possible values are integers [-1, 0, +1], same as
# with height_shift_range=[-1, 0, +1], while with height_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
horizontal_flip=True, # Boolean. Randomly flip inputs horizontally.
brightness_range=(0.9, 1.1), # Tuple or list of two floats. Range for picking a brightness shift value from.
zoom_range=(0.85, 1.15), # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
fill_mode='constant', # One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside
# the boundaries of the input are filled according to the given mode:
#'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
#'nearest': aaaaaaaa|abcd|dddddddd
#'reflect': abcddcba|abcd|dcbaabcd
#'wrap': abcdabcd|abcd|abcdabcd
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
)
"""
apply_augmentation takes as input img which is an array of shape (x, y, 3)
"""
def apply_augmentation(img):
img = random_ratio_resize(img) #defina above
img = _augmentation_transform.random_transform(img) # Applies a random transformation to an image.
return img
"""
_process_csv_file take as an input a file in our case these are train_split.txt and test_split.txt (names may differ)
"""
def _process_csv_file(file):
with open(file, 'r') as fr: # open file
files = fr.readlines() # read lines
return files
class BalanceCovidDataset(keras.utils.Sequence):
'Generates data for Keras'
def __init__(
self,
data_dir,
csv_file,
is_training=True,
batch_size=8,
input_shape=(224, 224), # here default shape is (224, 224) becasue these values were for former models,
# In another file we set it for (480, 480)
n_classes=3, # normal, pneunomia, COVID-19
num_channels=3, # depth of image. Although the images are grey we keep this chanel (with possibility to delete this)
mapping={
'normal': 0,
'pneumonia': 1,
'COVID-19': 2
},
shuffle=True,
augmentation=apply_augmentation,
covid_percent=0.3,
class_weights=[1., 1., 6.], # default weight of classes. The less numbered class gets is more worthy than the others
# in this case COVID_19
top_percent=0.08 # here set to 0.08, though in above functions was set to 0.15
):
'Initialization' # seeting values in constructor
self.datadir = data_dir
self.dataset = _process_csv_file(csv_file)
self.is_training = is_training
self.batch_size = batch_size
self.N = len(self.dataset)
self.input_shape = input_shape
self.n_classes = n_classes
self.num_channels = num_channels
self.mapping = mapping
self.shuffle = True
self.covid_percent = covid_percent
self.class_weights = class_weights
self.n = 0
self.augmentation = augmentation
self.top_percent = top_percent
datasets = {'normal': [], 'pneumonia': [], 'COVID-19': []} #dictionary for classes
for l in self.dataset: # iterate for dataset
datasets[l.split()[2]].append(l) # the second argument describes the name of the class e.g l.split()[2] - normal
# append the whole line to dictionary.
self.datasets = [
datasets['normal'] + datasets['pneumonia'],
datasets['COVID-19'],
] # set dataset to list of list where the first one is the conctaenation of lists 'normal' and 'pneumonia', and the
# second one is COVID_19
print(len(self.datasets[0]), len(self.datasets[1]))
self.on_epoch_end() # is triggered once at the very beginning as well as at the end of each epoch.
# If the shuffle parameter is set to True, we will get a new order of exploration at
# each pass (or just keep a linear exploration scheme otherwise).
def __next__(self): # mothod that need to be implement, keras generator
# Get one batch of data
batch_x, batch_y, weights = self.__getitem__(self.n) # if the numer of batch is less than number of batches we call the
# __getitem__ methdd
# Batch index
self.n += 1
# If we have processed the entire dataset then
if self.n >= self.__len__(): # it means that we fed all of our training exmaples in this epoch
|
return batch_x, batch_y, weights
def __len__(self):
return int(np.ceil(len(self.datasets[0]) / float(self.batch_size))) # returns the numer of batches that we will have
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
for v in self.datasets:
np.random.shuffle(v) #shuffle afetr each epoch. This is done becouse we want our model to generalzie as much as we can
# it shuffles the concatenated list of normal and pneunomia, and shuffles the COVID_19 list
def __getitem__(self, idx):
batch_x, batch_y = np.zeros(
(self.batch_size, *self.input_shape,
self.num_channels)), np.zeros(self.batch_size) # batch_x has a shape of (8, 480, 480. 3) and batch_y = (8), the * is used beacuse
# the input_shape is a tuple which len don't have to be 2 (in our case yes, but the python
# languages requries this)
batch_files = self.datasets[0][idx * self.batch_size:(idx + 1) *
self.batch_size] # we take a batch_size of training examples from concatenated list of normal and pneunomia
# len of batch files is equal to batch_size = 8
# upsample covid cases
covid_size = max(int(len(batch_files) * self.covid_percent), 1) # we would like to have at lest 1 exmaple of COVID-19 in our batch_size
# setting the proper value of covid_percent gives us more. In case of
# covid_percent = 0.3 and batch_size = 8, it returns int(2.4) = 2
covid_inds = np.random.choice(np.arange(len(batch_files)), # chose covid_size (2) random indexes from range 0 to batch_files - 1.
size=covid_size,
replace=False)
covid_files = np.random.choice(self.datasets[1], # chose random COVID_19 examples (in our case 2) from list of COVID_19 records
size=covid_size,
replace=False)
for i in range(covid_size):
batch_files[covid_inds[i]] = covid_files[i] # change chosen examples with those COVID-19 ones. Noticed that in case of batch_size = 8
# we have 2 COVID-19 exmaples but the rest 6 is in unkown ratio. eg it could be 6:0, 1:5 etc
for i in range(len(batch_files)):
sample = batch_files[i].split() # take sample form batch
if self.is_training: # if is_training = true this i an training sample. We do not make augmentation for test set
folder = 'train'
else:
folder = 'test'
x = process_image_file(os.path.join(self.datadir, folder, sample[1]), # preprocess an image
self.top_percent,
self.input_shape[0])
if self.is_training and hasattr(self, 'augmentation'): # if traing sample we do augmentation
x = self.augmentation(x)
x = x.astype('float32') / 255.0 # we normalized the values to be [0,1] the format is png and jpeg not dicom
y = self.mapping[sample[2]] # label sample second argument in sample is class name
batch_x[i] = x # bulid X batch
batch_y[i] = y # build y batch
class_weights = self.class_weights # use class weight to denote its importance
weights = np.take(class_weights, batch_y.astype('int64')) # e.g we have a y batch of np.array([2, 2, 0, 1, 0, 0, 1, 0]) and
# class_weights = [1,1,6] we get for each sample te result of
# array([6, 6, 1, 1, 1, 1, 1, 1])
return batch_x, keras.utils.to_categorical(batch_y, num_classes=self.n_classes), weights # to categorcial makes one_hot_encding in our case
"""
Worthy note: The class ImageDataGenerertor wroks like this. We feed it e.g with 8 samples of our training set(batch). Then we use our define
transofrmation. As a result we get new 8 samples of that have never been seen by our model. We feed it with this not with the orgnial 8 examples.
The motivation behind this is to genelrize the model. In each epoch we feed model with difent batches with slitly chanhes to images caused by our
trasformations. Every Sequence must implement the __getitem__ and the __len__ methods. If you want to modify your dataset between epochs you may
implement on_epoch_end. The method __getitem__ should return a complete batch.
"""
| self.on_epoch_end() # schuffle traing set
self.n = 0 # set to zero | conditional_block |
data_tensorboard.py | import tensorflow as tf
from tensorflow import keras
import numpy as np
import os
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
"""
crop_top takes as an input img which is an array of shape (x, y, 3) and
percentage of picture to crop
"""
def | (img, percent=0.15):
offset = int(img.shape[0] * percent) #cut the top portion of image
return img[offset:] # return image
"""
central_crop takes as an input img which is an array of shape (x, y, 3)
"""
def central_crop(img):
size = min(img.shape[0], img.shape[1]) # min of x and y
offset_h = int((img.shape[0] - size) / 2) # horizontal len
offset_w = int((img.shape[1] - size) / 2) # vertical
return img[offset_h:offset_h + size, offset_w:offset_w + size] # makes square image and centered
"""
process_image_file take as an input path of the photo for example data/train/1-s2.0-S0929664620300449-gr2_lrg-a.jpg,
top_percante for e.g top_percante = 0.08, and size of on axis of the image. In our case it will be 480
"""
def process_image_file(filepath, top_percent, size):
img = cv2.imread(filepath) # load image as array of shape (x, y , 3)
img = crop_top(img, percent=top_percent) # use function define above
img = central_crop(img) # use function define above
img = cv2.resize(img, (size, size)) # resize image from (min(x, y), min(x,y)) to (480,480). noticed that it remains
# of the shape with 3 chanels (480,480,3)
return img
"""
random_ratio_resize takes as an input image path, prob ,the probability of rotation if the random value is bigger than
prob do nothing and delta set as default to 0.1. As this function is used in code after central_crop which squares the image
ration will be 1. So we take then random value between segmet form [1-dleta, 1+ dleta]
"""
def random_ratio_resize(img, prob=0.3, delta=0.1):
if np.random.rand() >= prob: # bigger do nothing
return img
ratio = img.shape[0] / img.shape[1] # in our case 1
ratio = np.random.uniform(max(ratio - delta, 0.01), ratio + delta) # random value form [1-delta, 1+delta]. if delta
# change we prevent from left end of segment being non positve
if ratio * img.shape[1] <= img.shape[1]:
size = (int(img.shape[1] * ratio), img.shape[1]) # e.g shape of (474, 480) after this operation
else:
size = (img.shape[0], int(img.shape[0] / ratio)) #e.g shape of (480, 472) after this operation
dh = img.shape[0] - size[1] # could be zero or (480 - less number than 480)
top, bot = dh // 2, dh - dh // 2 # could be zeros ot the sum up to dh e.g dh = 9 then top = 4, bot = 5
dw = img.shape[1] - size[0] # similar to above
left, right = dw // 2, dw - dw // 2
if size[0] > 480 or size[1] > 480: #should not happen casue one of the coordinates should always be 480
print(img.shape, size, ratio)
img = cv2.resize(img, size) # resize image
img = cv2.copyMakeBorder(img, top, bot, left, right, cv2.BORDER_CONSTANT,
(0, 0, 0)) # this function makes image back to shape of (480, 480, 3) however it add black famre
# around the image
if img.shape[0] != 480 or img.shape[1] != 480: # should have happned since the ouput shape after copyMakeBorder supposed to be
# (480,480, 3)
raise ValueError(img.shape, size) # in case of error raise exception
return img
_augmentation_transform = ImageDataGenerator(
featurewise_center=False, # Boolean. Set input mean to 0 over the dataset, feature-wise.
featurewise_std_normalization=False, # Boolean. Divide inputs by std of the dataset, feature-wise.
rotation_range=10, # Int. Degree range for random rotations.
width_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total width, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
# With width_shift_range=2 possible values are integers [-1, 0, +1], same as
# with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
height_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total height, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-height_shift_range, +height_shift_range)
#With height_shift_range=2 possible values are integers [-1, 0, +1], same as
# with height_shift_range=[-1, 0, +1], while with height_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
horizontal_flip=True, # Boolean. Randomly flip inputs horizontally.
brightness_range=(0.9, 1.1), # Tuple or list of two floats. Range for picking a brightness shift value from.
zoom_range=(0.85, 1.15), # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
fill_mode='constant', # One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside
# the boundaries of the input are filled according to the given mode:
#'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
#'nearest': aaaaaaaa|abcd|dddddddd
#'reflect': abcddcba|abcd|dcbaabcd
#'wrap': abcdabcd|abcd|abcdabcd
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
)
"""
apply_augmentation takes as input img which is an array of shape (x, y, 3)
"""
def apply_augmentation(img):
img = random_ratio_resize(img) #defina above
img = _augmentation_transform.random_transform(img) # Applies a random transformation to an image.
return img
"""
_process_csv_file take as an input a file in our case these are train_split.txt and test_split.txt (names may differ)
"""
def _process_csv_file(file):
with open(file, 'r') as fr: # open file
files = fr.readlines() # read lines
return files
class BalanceCovidDataset(keras.utils.Sequence):
'Generates data for Keras'
def __init__(
self,
data_dir,
csv_file,
is_training=True,
batch_size=8,
input_shape=(224, 224), # here default shape is (224, 224) becasue these values were for former models,
# In another file we set it for (480, 480)
n_classes=3, # normal, pneunomia, COVID-19
num_channels=3, # depth of image. Although the images are grey we keep this chanel (with possibility to delete this)
mapping={
'normal': 0,
'pneumonia': 1,
'COVID-19': 2
},
shuffle=True,
augmentation=apply_augmentation,
covid_percent=0.3,
class_weights=[1., 1., 6.], # default weight of classes. The less numbered class gets is more worthy than the others
# in this case COVID_19
top_percent=0.08 # here set to 0.08, though in above functions was set to 0.15
):
'Initialization' # seeting values in constructor
self.datadir = data_dir
self.dataset = _process_csv_file(csv_file)
self.is_training = is_training
self.batch_size = batch_size
self.N = len(self.dataset)
self.input_shape = input_shape
self.n_classes = n_classes
self.num_channels = num_channels
self.mapping = mapping
self.shuffle = True
self.covid_percent = covid_percent
self.class_weights = class_weights
self.n = 0
self.augmentation = augmentation
self.top_percent = top_percent
datasets = {'normal': [], 'pneumonia': [], 'COVID-19': []} #dictionary for classes
for l in self.dataset: # iterate for dataset
datasets[l.split()[2]].append(l) # the second argument describes the name of the class e.g l.split()[2] - normal
# append the whole line to dictionary.
self.datasets = [
datasets['normal'] + datasets['pneumonia'],
datasets['COVID-19'],
] # set dataset to list of list where the first one is the conctaenation of lists 'normal' and 'pneumonia', and the
# second one is COVID_19
print(len(self.datasets[0]), len(self.datasets[1]))
self.on_epoch_end() # is triggered once at the very beginning as well as at the end of each epoch.
# If the shuffle parameter is set to True, we will get a new order of exploration at
# each pass (or just keep a linear exploration scheme otherwise).
def __next__(self): # mothod that need to be implement, keras generator
# Get one batch of data
batch_x, batch_y, weights = self.__getitem__(self.n) # if the numer of batch is less than number of batches we call the
# __getitem__ methdd
# Batch index
self.n += 1
# If we have processed the entire dataset then
if self.n >= self.__len__(): # it means that we fed all of our training exmaples in this epoch
self.on_epoch_end() # schuffle traing set
self.n = 0 # set to zero
return batch_x, batch_y, weights
def __len__(self):
return int(np.ceil(len(self.datasets[0]) / float(self.batch_size))) # returns the numer of batches that we will have
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
for v in self.datasets:
np.random.shuffle(v) #shuffle afetr each epoch. This is done becouse we want our model to generalzie as much as we can
# it shuffles the concatenated list of normal and pneunomia, and shuffles the COVID_19 list
def __getitem__(self, idx):
batch_x, batch_y = np.zeros(
(self.batch_size, *self.input_shape,
self.num_channels)), np.zeros(self.batch_size) # batch_x has a shape of (8, 480, 480. 3) and batch_y = (8), the * is used beacuse
# the input_shape is a tuple which len don't have to be 2 (in our case yes, but the python
# languages requries this)
batch_files = self.datasets[0][idx * self.batch_size:(idx + 1) *
self.batch_size] # we take a batch_size of training examples from concatenated list of normal and pneunomia
# len of batch files is equal to batch_size = 8
# upsample covid cases
covid_size = max(int(len(batch_files) * self.covid_percent), 1) # we would like to have at lest 1 exmaple of COVID-19 in our batch_size
# setting the proper value of covid_percent gives us more. In case of
# covid_percent = 0.3 and batch_size = 8, it returns int(2.4) = 2
covid_inds = np.random.choice(np.arange(len(batch_files)), # chose covid_size (2) random indexes from range 0 to batch_files - 1.
size=covid_size,
replace=False)
covid_files = np.random.choice(self.datasets[1], # chose random COVID_19 examples (in our case 2) from list of COVID_19 records
size=covid_size,
replace=False)
for i in range(covid_size):
batch_files[covid_inds[i]] = covid_files[i] # change chosen examples with those COVID-19 ones. Noticed that in case of batch_size = 8
# we have 2 COVID-19 exmaples but the rest 6 is in unkown ratio. eg it could be 6:0, 1:5 etc
for i in range(len(batch_files)):
sample = batch_files[i].split() # take sample form batch
if self.is_training: # if is_training = true this i an training sample. We do not make augmentation for test set
folder = 'train'
else:
folder = 'test'
x = process_image_file(os.path.join(self.datadir, folder, sample[1]), # preprocess an image
self.top_percent,
self.input_shape[0])
if self.is_training and hasattr(self, 'augmentation'): # if traing sample we do augmentation
x = self.augmentation(x)
x = x.astype('float32') / 255.0 # we normalized the values to be [0,1] the format is png and jpeg not dicom
y = self.mapping[sample[2]] # label sample second argument in sample is class name
batch_x[i] = x # bulid X batch
batch_y[i] = y # build y batch
class_weights = self.class_weights # use class weight to denote its importance
weights = np.take(class_weights, batch_y.astype('int64')) # e.g we have a y batch of np.array([2, 2, 0, 1, 0, 0, 1, 0]) and
# class_weights = [1,1,6] we get for each sample te result of
# array([6, 6, 1, 1, 1, 1, 1, 1])
return batch_x, keras.utils.to_categorical(batch_y, num_classes=self.n_classes), weights # to categorcial makes one_hot_encding in our case
"""
Worthy note: The class ImageDataGenerertor wroks like this. We feed it e.g with 8 samples of our training set(batch). Then we use our define
transofrmation. As a result we get new 8 samples of that have never been seen by our model. We feed it with this not with the orgnial 8 examples.
The motivation behind this is to genelrize the model. In each epoch we feed model with difent batches with slitly chanhes to images caused by our
trasformations. Every Sequence must implement the __getitem__ and the __len__ methods. If you want to modify your dataset between epochs you may
implement on_epoch_end. The method __getitem__ should return a complete batch.
"""
| crop_top | identifier_name |
data_tensorboard.py | import tensorflow as tf
from tensorflow import keras
import numpy as np
import os
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
"""
crop_top takes as an input img which is an array of shape (x, y, 3) and
percentage of picture to crop
"""
def crop_top(img, percent=0.15):
offset = int(img.shape[0] * percent) #cut the top portion of image
return img[offset:] # return image
"""
central_crop takes as an input img which is an array of shape (x, y, 3)
"""
def central_crop(img):
size = min(img.shape[0], img.shape[1]) # min of x and y
offset_h = int((img.shape[0] - size) / 2) # horizontal len
offset_w = int((img.shape[1] - size) / 2) # vertical
return img[offset_h:offset_h + size, offset_w:offset_w + size] # makes square image and centered
"""
process_image_file take as an input path of the photo for example data/train/1-s2.0-S0929664620300449-gr2_lrg-a.jpg,
top_percante for e.g top_percante = 0.08, and size of on axis of the image. In our case it will be 480
"""
def process_image_file(filepath, top_percent, size):
img = cv2.imread(filepath) # load image as array of shape (x, y , 3)
img = crop_top(img, percent=top_percent) # use function define above
img = central_crop(img) # use function define above
img = cv2.resize(img, (size, size)) # resize image from (min(x, y), min(x,y)) to (480,480). noticed that it remains
# of the shape with 3 chanels (480,480,3)
return img
"""
random_ratio_resize takes as an input image path, prob ,the probability of rotation if the random value is bigger than
prob do nothing and delta set as default to 0.1. As this function is used in code after central_crop which squares the image
ration will be 1. So we take then random value between segmet form [1-dleta, 1+ dleta]
"""
def random_ratio_resize(img, prob=0.3, delta=0.1):
if np.random.rand() >= prob: # bigger do nothing
return img
ratio = img.shape[0] / img.shape[1] # in our case 1
ratio = np.random.uniform(max(ratio - delta, 0.01), ratio + delta) # random value form [1-delta, 1+delta]. if delta
# change we prevent from left end of segment being non positve
if ratio * img.shape[1] <= img.shape[1]:
size = (int(img.shape[1] * ratio), img.shape[1]) # e.g shape of (474, 480) after this operation
else:
size = (img.shape[0], int(img.shape[0] / ratio)) #e.g shape of (480, 472) after this operation
dh = img.shape[0] - size[1] # could be zero or (480 - less number than 480)
top, bot = dh // 2, dh - dh // 2 # could be zeros ot the sum up to dh e.g dh = 9 then top = 4, bot = 5
dw = img.shape[1] - size[0] # similar to above
left, right = dw // 2, dw - dw // 2
if size[0] > 480 or size[1] > 480: #should not happen casue one of the coordinates should always be 480
print(img.shape, size, ratio)
img = cv2.resize(img, size) # resize image
img = cv2.copyMakeBorder(img, top, bot, left, right, cv2.BORDER_CONSTANT,
(0, 0, 0)) # this function makes image back to shape of (480, 480, 3) however it add black famre
# around the image
if img.shape[0] != 480 or img.shape[1] != 480: # should have happned since the ouput shape after copyMakeBorder supposed to be
# (480,480, 3)
raise ValueError(img.shape, size) # in case of error raise exception
return img
_augmentation_transform = ImageDataGenerator(
featurewise_center=False, # Boolean. Set input mean to 0 over the dataset, feature-wise.
featurewise_std_normalization=False, # Boolean. Divide inputs by std of the dataset, feature-wise.
rotation_range=10, # Int. Degree range for random rotations.
width_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total width, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
# With width_shift_range=2 possible values are integers [-1, 0, +1], same as
# with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
height_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total height, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-height_shift_range, +height_shift_range)
#With height_shift_range=2 possible values are integers [-1, 0, +1], same as
# with height_shift_range=[-1, 0, +1], while with height_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
horizontal_flip=True, # Boolean. Randomly flip inputs horizontally.
brightness_range=(0.9, 1.1), # Tuple or list of two floats. Range for picking a brightness shift value from.
zoom_range=(0.85, 1.15), # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
fill_mode='constant', # One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside
# the boundaries of the input are filled according to the given mode:
#'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
#'nearest': aaaaaaaa|abcd|dddddddd
#'reflect': abcddcba|abcd|dcbaabcd
#'wrap': abcdabcd|abcd|abcdabcd
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
)
"""
apply_augmentation takes as input img which is an array of shape (x, y, 3)
"""
def apply_augmentation(img):
|
"""
_process_csv_file take as an input a file in our case these are train_split.txt and test_split.txt (names may differ)
"""
def _process_csv_file(file):
with open(file, 'r') as fr: # open file
files = fr.readlines() # read lines
return files
class BalanceCovidDataset(keras.utils.Sequence):
'Generates data for Keras'
def __init__(
self,
data_dir,
csv_file,
is_training=True,
batch_size=8,
input_shape=(224, 224), # here default shape is (224, 224) becasue these values were for former models,
# In another file we set it for (480, 480)
n_classes=3, # normal, pneunomia, COVID-19
num_channels=3, # depth of image. Although the images are grey we keep this chanel (with possibility to delete this)
mapping={
'normal': 0,
'pneumonia': 1,
'COVID-19': 2
},
shuffle=True,
augmentation=apply_augmentation,
covid_percent=0.3,
class_weights=[1., 1., 6.], # default weight of classes. The less numbered class gets is more worthy than the others
# in this case COVID_19
top_percent=0.08 # here set to 0.08, though in above functions was set to 0.15
):
'Initialization' # seeting values in constructor
self.datadir = data_dir
self.dataset = _process_csv_file(csv_file)
self.is_training = is_training
self.batch_size = batch_size
self.N = len(self.dataset)
self.input_shape = input_shape
self.n_classes = n_classes
self.num_channels = num_channels
self.mapping = mapping
self.shuffle = True
self.covid_percent = covid_percent
self.class_weights = class_weights
self.n = 0
self.augmentation = augmentation
self.top_percent = top_percent
datasets = {'normal': [], 'pneumonia': [], 'COVID-19': []} #dictionary for classes
for l in self.dataset: # iterate for dataset
datasets[l.split()[2]].append(l) # the second argument describes the name of the class e.g l.split()[2] - normal
# append the whole line to dictionary.
self.datasets = [
datasets['normal'] + datasets['pneumonia'],
datasets['COVID-19'],
] # set dataset to list of list where the first one is the conctaenation of lists 'normal' and 'pneumonia', and the
# second one is COVID_19
print(len(self.datasets[0]), len(self.datasets[1]))
self.on_epoch_end() # is triggered once at the very beginning as well as at the end of each epoch.
# If the shuffle parameter is set to True, we will get a new order of exploration at
# each pass (or just keep a linear exploration scheme otherwise).
def __next__(self): # mothod that need to be implement, keras generator
# Get one batch of data
batch_x, batch_y, weights = self.__getitem__(self.n) # if the numer of batch is less than number of batches we call the
# __getitem__ methdd
# Batch index
self.n += 1
# If we have processed the entire dataset then
if self.n >= self.__len__(): # it means that we fed all of our training exmaples in this epoch
self.on_epoch_end() # schuffle traing set
self.n = 0 # set to zero
return batch_x, batch_y, weights
def __len__(self):
return int(np.ceil(len(self.datasets[0]) / float(self.batch_size))) # returns the numer of batches that we will have
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
for v in self.datasets:
np.random.shuffle(v) #shuffle afetr each epoch. This is done becouse we want our model to generalzie as much as we can
# it shuffles the concatenated list of normal and pneunomia, and shuffles the COVID_19 list
def __getitem__(self, idx):
batch_x, batch_y = np.zeros(
(self.batch_size, *self.input_shape,
self.num_channels)), np.zeros(self.batch_size) # batch_x has a shape of (8, 480, 480. 3) and batch_y = (8), the * is used beacuse
# the input_shape is a tuple which len don't have to be 2 (in our case yes, but the python
# languages requries this)
batch_files = self.datasets[0][idx * self.batch_size:(idx + 1) *
self.batch_size] # we take a batch_size of training examples from concatenated list of normal and pneunomia
# len of batch files is equal to batch_size = 8
# upsample covid cases
covid_size = max(int(len(batch_files) * self.covid_percent), 1) # we would like to have at lest 1 exmaple of COVID-19 in our batch_size
# setting the proper value of covid_percent gives us more. In case of
# covid_percent = 0.3 and batch_size = 8, it returns int(2.4) = 2
covid_inds = np.random.choice(np.arange(len(batch_files)), # chose covid_size (2) random indexes from range 0 to batch_files - 1.
size=covid_size,
replace=False)
covid_files = np.random.choice(self.datasets[1], # chose random COVID_19 examples (in our case 2) from list of COVID_19 records
size=covid_size,
replace=False)
for i in range(covid_size):
batch_files[covid_inds[i]] = covid_files[i] # change chosen examples with those COVID-19 ones. Noticed that in case of batch_size = 8
# we have 2 COVID-19 exmaples but the rest 6 is in unkown ratio. eg it could be 6:0, 1:5 etc
for i in range(len(batch_files)):
sample = batch_files[i].split() # take sample form batch
if self.is_training: # if is_training = true this i an training sample. We do not make augmentation for test set
folder = 'train'
else:
folder = 'test'
x = process_image_file(os.path.join(self.datadir, folder, sample[1]), # preprocess an image
self.top_percent,
self.input_shape[0])
if self.is_training and hasattr(self, 'augmentation'): # if traing sample we do augmentation
x = self.augmentation(x)
x = x.astype('float32') / 255.0 # we normalized the values to be [0,1] the format is png and jpeg not dicom
y = self.mapping[sample[2]] # label sample second argument in sample is class name
batch_x[i] = x # bulid X batch
batch_y[i] = y # build y batch
class_weights = self.class_weights # use class weight to denote its importance
weights = np.take(class_weights, batch_y.astype('int64')) # e.g we have a y batch of np.array([2, 2, 0, 1, 0, 0, 1, 0]) and
# class_weights = [1,1,6] we get for each sample te result of
# array([6, 6, 1, 1, 1, 1, 1, 1])
return batch_x, keras.utils.to_categorical(batch_y, num_classes=self.n_classes), weights # to categorcial makes one_hot_encding in our case
"""
Worthy note: The class ImageDataGenerertor wroks like this. We feed it e.g with 8 samples of our training set(batch). Then we use our define
transofrmation. As a result we get new 8 samples of that have never been seen by our model. We feed it with this not with the orgnial 8 examples.
The motivation behind this is to genelrize the model. In each epoch we feed model with difent batches with slitly chanhes to images caused by our
trasformations. Every Sequence must implement the __getitem__ and the __len__ methods. If you want to modify your dataset between epochs you may
implement on_epoch_end. The method __getitem__ should return a complete batch.
"""
| img = random_ratio_resize(img) #defina above
img = _augmentation_transform.random_transform(img) # Applies a random transformation to an image.
return img | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.