file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
space_group.py | `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def __init__(self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`.
"""
perms = []
for p in self.point_group_:
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
|
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
)
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
| perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p))) | conditional_block |
space_group.py | `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def __init__(self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`.
"""
perms = []
for p in self.point_group_:
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
|
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
| """
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
) | identifier_body |
space_group.py | `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def | (self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`.
"""
perms = []
for p in self.point_group_:
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
)
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
| __init__ | identifier_name |
space_group.py | `Permutation` that represents a lattice permutation.
Stores translation lattice vector and generates a sensible name from it.
The product of two `Translation`s carries the appropriate displacement vector.
"""
def __init__(self, permutation: Array, displacement: Array):
r"""
Creates a `Translation` from a permutation array and a displacement vector
Arguments:
permutation: a 1D array listing :math:`g^{-1}(x)` for all
:math:`0\le x < N` (i.e., `V[permutation]` permutes the
elements of `V` as desired)
displacement: displacement vector is units of lattice basis vectors
Returns:
a `Translation` object encoding the same information
"""
super().__init__(permutation)
self._vector = np.asarray(displacement)
@property
def _name(self):
return f"Translation({self._vector.tolist()})"
@dispatch
def product(p: Translation, q: Translation):
return Translation(p(np.asarray(q)), p._vector + q._vector)
def _ensure_iterable(x):
"""Extracts iterables given in varargs"""
if isinstance(x[0], Iterable):
if len(x) > 1:
raise TypeError("Either Iterable or variable argument list expected")
return x[0]
else:
return x
@struct.dataclass
class SpaceGroupBuilder:
"""
Class to handle the space group symmetries of `Lattice`.
Constructs `PermutationGroup`s that represent the action on a `Lattice` of
* a geometrical point group given as a constructor argument,
* its rotational subgroup (i.e. point group symmetries with determinant +1)
* the translation group of the same lattice
* and the space group that is generated as the semidirect product of
the supplied point group and the translation group.
Also generates space group irreps for symmetrising wave functions.
"""
lattice: Lattice
point_group_: PointGroup
def __post_init__(self):
object.__setattr__(
self,
"point_group_",
self.point_group_.replace(unit_cell=self.lattice.basis_vectors),
)
# TODO describe ordering of group elements here and later in docstring
@struct.property_cached
def point_group(self) -> PermutationGroup:
"""
The point group as a `PermutationGroup` acting on the sites of `self.lattice`. | if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
@struct.property_cached
def rotation_group(self) -> PermutationGroup:
"""The group of rotations (i.e. point group symmetries with determinant +1)
as a `PermutationGroup` acting on the sites of `self.lattice`."""
perms = []
for p in self.point_group_.rotation_group():
if isinstance(p, Identity):
perms.append(Identity())
else:
# note that we need the preimages in the permutation
perm = self.lattice.id_from_position(p.preimage(self.lattice.positions))
perms.append(Permutation(perm, name=str(p)))
return PermutationGroup(perms, degree=self.lattice.n_nodes)
def _translations_along_axis(self, axis: int) -> PermutationGroup:
"""
The group of valid translations along an axis as a `PermutationGroup`
acting on the sites of `self.lattice.`
"""
if self.lattice._pbc[axis]:
trans_list = [Identity()]
# note that we need the preimages in the permutation
trans_perm = self.lattice.id_from_position(
self.lattice.positions - self.lattice.basis_vectors[axis]
)
vector = np.zeros(self.lattice.ndim, dtype=int)
vector[axis] = 1
trans_by_one = Translation(trans_perm, vector)
for _ in range(1, self.lattice.extent[axis]):
trans_list.append(trans_list[-1] @ trans_by_one)
return PermutationGroup(trans_list, degree=self.lattice.n_nodes)
else:
return PermutationGroup([Identity()], degree=self.lattice.n_nodes)
@struct.property_cached
def _full_translation_group(self) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in range(self.lattice.ndim)],
)
def translation_group(
self, axes: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
"""
The group of valid translations of `self.lattice` as a `PermutationGroup`
acting on the sites of the same.
"""
if axes is None:
return self._full_translation_group
elif isinstance(axes, int):
return self._translations_along_axis(axes)
else:
return reduce(
PermutationGroup.__matmul__,
[self._translations_along_axis(i) for i in axes],
)
@struct.property_cached
def space_group(self) -> PermutationGroup:
"""
The space group generated by `self.point_group` and `self.translation_group`.
"""
return self._full_translation_group @ self.point_group
def _little_group_index(self, k: Array) -> Array:
"""
Returns the indices of the elements of the little group corresponding to
wave vector `k`.
"""
# calculate k' = p(k) for all p in the point group
big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent
# should test for pbc before taking the modulus, but the only valid wave
# vector for non-pbc axes is 0 and 0 % anything == 0
# assumes point_group_[0] is the identity
is_in_little_group = np.all(big_star == big_star[0], axis=1)
return np.arange(len(self.point_group_))[is_in_little_group]
def little_group(self, *k: Array) -> PointGroup:
"""
Returns the little co-group corresponding to wave vector *k*.
This is the subgroup of the point group that leaves *k* invariant.
Arguments:
k: the wave vector in Cartesian axes
Returns:
the little co-group as a `PointGroup`
"""
k = _ensure_iterable(k)
return PointGroup(
[self.point_group_[i] for i in self._little_group_index(k)],
ndim=self.point_group_.ndim,
unit_cell=self.lattice.basis_vectors,
)
def _little_group_irreps(self, k: Array, divide: bool = False) -> Array:
"""
Returns the character table of the little group embedded in the full point
group. Symmetries outside the little group get 0.
If `divide` is `True`, the result gets divided by the size of the little group.
This is convenient when calculating space group irreps.
"""
idx = self._little_group_index(k)
CT = self.little_group(k).character_table()
CT_full = np.zeros((CT.shape[0], len(self.point_group_)))
CT_full[:, idx] = CT
return CT_full / idx.size if divide else CT_full
def space_group_irreps(self, *k: Array) -> Array:
"""
Returns the portion of the character table of the full space group corresponding
to the star of the wave vector *k*.
Arguments:
k: the wave vector in Cartesian axes
Returns:
An array `CT` listing the characters for a number of irreps of the
space group.
`CT[i]` for each `i` gives a distinct irrep, each corresponding to
`self.little_group(k).character_table[i].
`CT[i,j]` gives the character of `self.space_group[j]` in the same.
"""
k = _ensure_iterable(k)
# Wave vectors
big_star_Cart = np.tensordot(self.point_group_.matrices(), k, axes=1)
big_star = self.lattice.to_reciprocal_lattice(big_star_Cart) * (
2 * pi / self.lattice.extent
)
# Little-group-irrep factors
# Conjugacy_table[g,p] lists p^{-1}gp, so point_group_factors[i,:,p]
# of irrep #i for the little group of p(k) is the equivalent
# Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))
point_group_factors = self._little_group_irreps(k, divide=True)[
:, self.point_group_.conjugacy_table
] * np.exp(
-1j
* np.tensordot(
self.point_group_.translations(), big_star_Cart, axes=(-1, -1)
)
)
# | """
perms = []
for p in self.point_group_: | random_line_split |
basic_model.py | # taken from the paper
MLP_HIDDEN_DIM = 100
EPOCHS = 150
WORD_EMBEDDING_DIM = 100
POS_EMBEDDING_DIM = 25
HIDDEN_DIM = 125
LEARNING_RATE = 0.01
EARLY_STOPPING = 10 # num epochs with no validation acc improvement to stop training
PATH = "./basic_model_best_params"
cross_entropy_loss = nn.CrossEntropyLoss(reduction='mean')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# class not_efficientMLP(nn.Module):
# def __init__(self, lstm_dim, mlp_hidden_dim):
# super(not_efficientMLP, self).__init__()
# self.first_linear = nn.Linear(2 * lstm_dim, mlp_hidden_dim)
# self.non_linearity = nn.ReLU()
# self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
#
# def forward(self, lstm_out):
# sentence_length = lstm_out.shape[0]
# scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# for i, v_i in enumerate(lstm_out):
# for j, v_j in enumerate(lstm_out):
# if i == j:
# scores[i, j] = 0
# else:
# a = torch.cat((v_i, v_j), dim=0)
# x = self.first_linear(a)
# y = self.non_linearity(x)
# scores[i, j] = self.second_mlp(y)
# return scores
class SplittedMLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(SplittedMLP, self).__init__()
self.fc_h = nn.Linear(lstm_dim, mlp_hidden_dim, bias=True) # fully-connected to output mu
self.fc_m = nn.Linear(lstm_dim, mlp_hidden_dim, bias=False) # fully-connected to output mu
def forward(self, lstm_out):
heads_hidden = self.fc_h(lstm_out)
mods_hidden = self.fc_m(lstm_out)
return heads_hidden, mods_hidden
class MLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(MLP, self).__init__()
self.first_mlp = SplittedMLP(lstm_dim, mlp_hidden_dim)
self.non_linearity = nn.Tanh()
self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
def forward(self, lstm_out):
sentence_length = lstm_out.shape[0]
heads_hidden, mods_hidden = self.first_mlp(lstm_out)
scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# we will fill the table row by row, using broadcasting
for mod in range(sentence_length):
mod_hidden = mods_hidden[mod]
summed_values = mod_hidden + heads_hidden # a single mod with all heads possibilities
x = self.non_linearity(summed_values)
scores[:, mod] = torch.flatten(self.second_mlp(x))
scores[mod, mod] = -np.inf # a word cant be its head
return scores
class DnnDependencyParser(nn.Module):
def __init__(self, word_embedding_dim, pos_embedding_dim, hidden_dim, word_vocab_size, tag_vocab_size):
super(DnnDependencyParser, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get a tensor of size word_vocab_size and return a word embedding
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
# get a tensor of size tag_vocab_size and return a pos embedding
self.pos_embedding = nn.Embedding(tag_vocab_size, pos_embedding_dim)
self.lstm = nn.LSTM(input_size=word_embedding_dim + pos_embedding_dim, hidden_size=hidden_dim, num_layers=2,
bidirectional=True, batch_first=False)
self.mlp = MLP(2*hidden_dim, MLP_HIDDEN_DIM)
# self.mlp = not_efficientMLP(2*hidden_dim, MLP_HIDDEN_DIM)
def forward(self, word_idx_tensor, pos_idx_tensor):
# get x = concat(e(w), e(p))
e_w = self.word_embedding(word_idx_tensor.to(self.device)) # [batch_size, seq_length, e_w]
e_p = self.pos_embedding(pos_idx_tensor.to(self.device)) # [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
| import matplotlib.pyplot as plt
from chu_liu_edmonds import *
from os import path
| random_line_split | |
basic_model.py | # def forward(self, lstm_out):
# sentence_length = lstm_out.shape[0]
# scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# for i, v_i in enumerate(lstm_out):
# for j, v_j in enumerate(lstm_out):
# if i == j:
# scores[i, j] = 0
# else:
# a = torch.cat((v_i, v_j), dim=0)
# x = self.first_linear(a)
# y = self.non_linearity(x)
# scores[i, j] = self.second_mlp(y)
# return scores
class SplittedMLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(SplittedMLP, self).__init__()
self.fc_h = nn.Linear(lstm_dim, mlp_hidden_dim, bias=True) # fully-connected to output mu
self.fc_m = nn.Linear(lstm_dim, mlp_hidden_dim, bias=False) # fully-connected to output mu
def forward(self, lstm_out):
heads_hidden = self.fc_h(lstm_out)
mods_hidden = self.fc_m(lstm_out)
return heads_hidden, mods_hidden
class MLP(nn.Module):
def | (self, lstm_dim, mlp_hidden_dim):
super(MLP, self).__init__()
self.first_mlp = SplittedMLP(lstm_dim, mlp_hidden_dim)
self.non_linearity = nn.Tanh()
self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
def forward(self, lstm_out):
sentence_length = lstm_out.shape[0]
heads_hidden, mods_hidden = self.first_mlp(lstm_out)
scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# we will fill the table row by row, using broadcasting
for mod in range(sentence_length):
mod_hidden = mods_hidden[mod]
summed_values = mod_hidden + heads_hidden # a single mod with all heads possibilities
x = self.non_linearity(summed_values)
scores[:, mod] = torch.flatten(self.second_mlp(x))
scores[mod, mod] = -np.inf # a word cant be its head
return scores
class DnnDependencyParser(nn.Module):
def __init__(self, word_embedding_dim, pos_embedding_dim, hidden_dim, word_vocab_size, tag_vocab_size):
super(DnnDependencyParser, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get a tensor of size word_vocab_size and return a word embedding
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
# get a tensor of size tag_vocab_size and return a pos embedding
self.pos_embedding = nn.Embedding(tag_vocab_size, pos_embedding_dim)
self.lstm = nn.LSTM(input_size=word_embedding_dim + pos_embedding_dim, hidden_size=hidden_dim, num_layers=2,
bidirectional=True, batch_first=False)
self.mlp = MLP(2*hidden_dim, MLP_HIDDEN_DIM)
# self.mlp = not_efficientMLP(2*hidden_dim, MLP_HIDDEN_DIM)
def forward(self, word_idx_tensor, pos_idx_tensor):
# get x = concat(e(w), e(p))
e_w = self.word_embedding(word_idx_tensor.to(self.device)) # [batch_size, seq_length, e_w]
e_p = self.pos_embedding(pos_idx_tensor.to(self.device)) # [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
val_dataloader = DataLoader(val_set, shuffle=False)
test = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'test')
test_dataloader = DataLoader(test, shuffle=False)
a = next(iter(train_dataloader))
# a[0] -> word - idx of a sentence
# a[1] -> pos - idx of a sentence
# a[2] -> head token per sentence
assert len(a[0]) == len(a[1]) == len(a[2])
word_vocab_size = len(train.word2idx)
print(word_vocab_size)
tag_vocab_size = len(train.pos_idx_mappings)
print(tag_vocab_size)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model = DnnDependencyParser(WORD_EMBEDDING_DIM, POS_EMBEDDING_DIM, HIDDEN_DIM, word_vocab_size, tag_vocab_size).to(device)
if use_cuda:
model.cuda()
# Define the loss function as the Negative Log Likelihood loss (NLLLoss)
loss_function = nn.NLLLoss()
# We will be using a | __init__ | identifier_name |
basic_model.py | [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
val_dataloader = DataLoader(val_set, shuffle=False)
test = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'test')
test_dataloader = DataLoader(test, shuffle=False)
a = next(iter(train_dataloader))
# a[0] -> word - idx of a sentence
# a[1] -> pos - idx of a sentence
# a[2] -> head token per sentence
assert len(a[0]) == len(a[1]) == len(a[2])
word_vocab_size = len(train.word2idx)
print(word_vocab_size)
tag_vocab_size = len(train.pos_idx_mappings)
print(tag_vocab_size)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model = DnnDependencyParser(WORD_EMBEDDING_DIM, POS_EMBEDDING_DIM, HIDDEN_DIM, word_vocab_size, tag_vocab_size).to(device)
if use_cuda:
model.cuda()
# Define the loss function as the Negative Log Likelihood loss (NLLLoss)
loss_function = nn.NLLLoss()
# We will be using a simple SGD optimizer to minimize the loss function
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
acumulate_grad_steps = 128 # This is the actual batch_size, while we officially use batch_size=1
# Training start
print("Training Started")
epoch_loss_list = []
epoch_train_acc_list = []
epoch_test_acc_list = []
best_val_acc = 0
num_epochs_wo_improvement = 0
for epoch in range(EPOCHS):
val_acc = evaluate(model, val_dataloader)
print("EPOCH = ", epoch)
print("EPOCH val acc = ", val_acc)
if val_acc < best_val_acc: # no improvement
num_epochs_wo_improvement += 1
if num_epochs_wo_improvement >= EARLY_STOPPING:
print("STOPPED TRAINING DUE TO EARLY STOPPING")
return
else: # improvement
print("saving model since it improved on validation :)")
torch.save(model.state_dict(), PATH)
num_epochs_wo_improvement = 0
best_val_acc = val_acc
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
print(epoch_train_acc_list)
plt.savefig('./basic_model_graphs.png')
# train
acc = 0 # to keep track of accuracy
printable_loss = 0 # To keep track of the loss value
i = 0
batch_loss = 0
batch_acc = 0
epoch_loss = 0
for batch_idx, input_data in enumerate(train_dataloader):
i += 1
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
loss = NLLL_function(tag_scores, heads_tensor[0].to(device))
# epoch statistics
epoch_loss += loss
#
loss = loss / acumulate_grad_steps
loss.backward()
batch_loss += loss
acc = (accuracy(heads_tensor[0].cpu(), tag_scores.cpu())) / acumulate_grad_steps
batch_acc += acc
if i % acumulate_grad_steps == 0:
optimizer.step()
model.zero_grad()
print("batch_loss = ", batch_loss.item())
print("batch_acc = ", batch_acc)
batch_loss = 0
batch_acc = 0
# end of epoch - get statistics
epoch_loss_list.append(epoch_loss / i)
epoch_train_acc_list.append(evaluate(model, train_dataloader))
epoch_test_acc_list.append(evaluate(model, test_dataloader))
# end of train - plot the two graphs
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(epoch_loss_list)
plt.title("loss")
plt.subplot(3, 1, 2)
plt.plot(epoch_train_acc_list)
plt.title("train UAS")
plt.subplot(3, 1, 3)
plt.plot(epoch_test_acc_list)
plt.title("test UAS")
plt.show()
plt.savefig('basic_model_graphs.png')
if __name__ == "__main__" :
if HYPER_PARAMETER_TUNING:
| hyper_parameter_tuning() | conditional_block | |
basic_model.py | # def forward(self, lstm_out):
# sentence_length = lstm_out.shape[0]
# scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# for i, v_i in enumerate(lstm_out):
# for j, v_j in enumerate(lstm_out):
# if i == j:
# scores[i, j] = 0
# else:
# a = torch.cat((v_i, v_j), dim=0)
# x = self.first_linear(a)
# y = self.non_linearity(x)
# scores[i, j] = self.second_mlp(y)
# return scores
class SplittedMLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(SplittedMLP, self).__init__()
self.fc_h = nn.Linear(lstm_dim, mlp_hidden_dim, bias=True) # fully-connected to output mu
self.fc_m = nn.Linear(lstm_dim, mlp_hidden_dim, bias=False) # fully-connected to output mu
def forward(self, lstm_out):
|
class MLP(nn.Module):
def __init__(self, lstm_dim, mlp_hidden_dim):
super(MLP, self).__init__()
self.first_mlp = SplittedMLP(lstm_dim, mlp_hidden_dim)
self.non_linearity = nn.Tanh()
self.second_mlp = nn.Linear(mlp_hidden_dim, 1, bias=True) # will output a score of a pair
def forward(self, lstm_out):
sentence_length = lstm_out.shape[0]
heads_hidden, mods_hidden = self.first_mlp(lstm_out)
scores = torch.zeros(size=(sentence_length, sentence_length)).to(device)
# we will fill the table row by row, using broadcasting
for mod in range(sentence_length):
mod_hidden = mods_hidden[mod]
summed_values = mod_hidden + heads_hidden # a single mod with all heads possibilities
x = self.non_linearity(summed_values)
scores[:, mod] = torch.flatten(self.second_mlp(x))
scores[mod, mod] = -np.inf # a word cant be its head
return scores
class DnnDependencyParser(nn.Module):
def __init__(self, word_embedding_dim, pos_embedding_dim, hidden_dim, word_vocab_size, tag_vocab_size):
super(DnnDependencyParser, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get a tensor of size word_vocab_size and return a word embedding
self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
# get a tensor of size tag_vocab_size and return a pos embedding
self.pos_embedding = nn.Embedding(tag_vocab_size, pos_embedding_dim)
self.lstm = nn.LSTM(input_size=word_embedding_dim + pos_embedding_dim, hidden_size=hidden_dim, num_layers=2,
bidirectional=True, batch_first=False)
self.mlp = MLP(2*hidden_dim, MLP_HIDDEN_DIM)
# self.mlp = not_efficientMLP(2*hidden_dim, MLP_HIDDEN_DIM)
def forward(self, word_idx_tensor, pos_idx_tensor):
# get x = concat(e(w), e(p))
e_w = self.word_embedding(word_idx_tensor.to(self.device)) # [batch_size, seq_length, e_w]
e_p = self.pos_embedding(pos_idx_tensor.to(self.device)) # [batch_size, seq_length, e_p]
embeds = torch.cat((e_w, e_p), dim=2).to(self.device) # [batch_size, seq_length, e_w + e_p]
# assert embeds.shape[0] == 1 and embeds.shape[2] == POS_EMBEDDING_DIM + WORD_EMBEDDING_DIM
lstm_out, _ = self.lstm(embeds.view(embeds.shape[1], 1, -1)) # [seq_length, batch_size, 2*hidden_dim]
# Turns the output into one big tensor, each line is rep of a word in the sentence
lstm_out = lstm_out.view(lstm_out.shape[0], -1) # [seq_length, 2*hidden_dim]
out = self.mlp(lstm_out)
return out
def NLLL_function(scores, true_tree):
"""
Parameters
----------
scores - a matrix of size (sentence_length x sentence length)
true_tree - ground truth dependency tree
Returns the loss
-------
"""
clean_scores = scores[:, 1:] # ROOT cant be modifier
clean_true_tree = true_tree[1:]
sentence_length = clean_scores.shape[1] # without root
loss = 0
for mod in range(sentence_length):
loss += cross_entropy_loss(clean_scores[:, mod].unsqueeze(dim=0), clean_true_tree[mod:mod+1])
return (1.0/sentence_length) * loss
# def NLLL(output, target):
# """
# :param output: The table of MLP scores of each word pair
# :param target: The ground truth of the actual arcs
# :return:
# """
# # loss = -1/|Y|*[S_gt - sum(log(sum(exp(s_j_m))))]
# S_gt = 0
# mod_score = 0
# for idx, head in enumerate(target[0]):
# if idx == 0:
# continue
# head_idx = head.item()
# mod_idx = idx
# S_gt += output[head_idx, mod_idx]
# #
# S_j_m = output[:, mod_idx]
# mod_score += torch.log(torch.sum(torch.exp(S_j_m)))
# Y_i = target[0].shape[0]
# final_loss = (-1./Y_i)*(S_gt - mod_score)
# return final_loss
#
#
# def get_acc_measurements(GT, energy_table):
# predicted_mst, _ = decode_mst(energy=energy_table, length=energy_table.shape[0], has_labels=False)
# y_pred = torch.from_numpy(predicted_mst[1:])
# y_true = GT[1:]
# print("y_pred", y_pred)
# print("y_true = ", y_true)
# print((y_pred == y_true).sum())
# acc = (y_pred == y_true).sum()/float(y_true.shape[0])
# return acc.item()
def accuracy(ground_truth, energy_table):
predicted_mst, _ = decode_mst(energy=energy_table.detach(), length=energy_table.shape[0], has_labels=False)
# first one is the HEAD of root so we avoid taking it into account
y_pred = torch.from_numpy(predicted_mst[1:])
y_true = ground_truth[1:]
acc = (y_pred == y_true).sum()/float(y_true.shape[0])
return acc.item()
def evaluate(model, data_loader):
val_acc = 0
val_size = 0
for batch_idx, input_data in enumerate(data_loader):
val_size += 1
with torch.no_grad():
words_idx_tensor, pos_idx_tensor, heads_tensor = input_data
tag_scores = model(words_idx_tensor, pos_idx_tensor)
val_acc += (accuracy(heads_tensor[0].cpu(), tag_scores.cpu()))
return val_acc / val_size
def main():
# sanity check
data_dir = "HW2-files/"
path_train = data_dir + "train.labeled"
print("path_train -", path_train)
path_test = data_dir + "test.labeled"
print("path_test -", path_test)
paths_list = [path_train, path_test]
word_cnt, word_dict, pos_dict = get_vocabs(paths_list)
train = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'train')
# split into validation
train_set, val_set = torch.utils.data.random_split(train, [4000, 1000])
train_dataloader = DataLoader(train_set, shuffle=False) # TODO return to true after debugging
val_dataloader = DataLoader(val_set, shuffle=False)
test = PosDataset(word_cnt, word_dict, pos_dict, data_dir, 'test')
test_dataloader = DataLoader(test, shuffle=False)
a = next(iter(train_dataloader))
# a[0] -> word - idx of a sentence
# a[1] -> pos - idx of a sentence
# a[2] -> head token per sentence
assert len(a[0]) == len(a[1]) == len(a[2])
word_vocab_size = len(train.word2idx)
print(word_vocab_size)
tag_vocab_size = len(train.pos_idx_mappings)
print(tag_vocab_size)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
model = DnnDependencyParser(WORD_EMBEDDING_DIM, POS_EMBEDDING_DIM, HIDDEN_DIM, word_vocab_size, tag_vocab_size).to(device)
if use_cuda:
model.cuda()
# Define the loss function as the Negative Log Likelihood loss (NLLLoss)
loss_function = nn.NLLLoss()
# We will be using a simple | heads_hidden = self.fc_h(lstm_out)
mods_hidden = self.fc_m(lstm_out)
return heads_hidden, mods_hidden | identifier_body |
main.go | .UserService.ReadSession(&t)
if err != nil {
respond(w, rsp, err)
return
}
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: rsp.Session.UserId,
})
respond(w, map[string]interface{}{
"session": rsp.Session,
"account": readRsp.Account,
}, err)
}
func post(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t PostRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
if t.Post.Sub == "" || t.Post.Title == "" {
respond(w, nil, fmt.Errorf("both title and sub are required"))
return
}
if t.Post.Url == "" && t.Post.Content == "" {
respond(w, nil, fmt.Errorf("url or content required"))
return
}
if len(t.Post.Title) > 200 || len(t.Post.Url) > 200 {
respond(w, nil, fmt.Errorf("post url or title too long"))
return
}
if len(t.Post.Sub) > 50 {
respond(w, nil, fmt.Errorf("post sub too long"))
return
}
if len(t.Post.Content) > 3000 {
respond(w, nil, fmt.Errorf("post content too long"))
return
}
userID := ""
userName := ""
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
client.DbService.Create(&db.CreateRequest{
Table: "posts",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Post.Content,
"url": t.Post.Url,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"sub": t.Post.Sub,
"title": t.Post.Title,
"created": time.Now(),
},
})
}
func comment(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t CommentRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
userID := ""
userName := ""
// get user if available
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
if t.Comment.PostId == "" {
respond(w, nil, fmt.Errorf("no post id"))
return
}
// get post to update comment counter
readRsp, err := client.DbService.Read(&db.ReadRequest{
Table: "posts",
Id: t.Comment.PostId,
})
if err != nil {
respond(w, nil, err)
return
}
if readRsp == nil || len(readRsp.Records) == 0 {
respond(w, nil, fmt.Errorf("post not found"))
return
}
if len(readRsp.Records) > 1 {
respond(w, nil, fmt.Errorf("multiple posts found"))
return
}
// create comment
_, err = client.DbService.Create(&db.CreateRequest{
Table: "comments",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Comment.Content,
"parent": t.Comment.Parent,
"postId": t.Comment.PostId,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"created": time.Now(),
},
})
if err != nil {
respond(w, nil, err)
return
}
// update counter
oldCount, ok := readRsp.Records[0]["commentCount"].(float64)
if !ok {
oldCount = 0
}
oldCount++
readRsp.Records[0]["commentCount"] = oldCount
_, err = client.DbService.Update(&db.UpdateRequest{
Table: "posts",
Id: t.Comment.PostId,
Record: readRsp.Records[0],
})
respond(w, nil, err)
}
func score(m map[string]interface{}) float64 {
score, ok := m["score"].(float64)
if !ok {
return -10000
}
sign := float64(1)
if score == 0 {
sign = 0
}
if score < 0 {
sign = -1
}
order := math.Log10(math.Max(math.Abs(score), 1))
var created int64
switch v := m["created"].(type) {
case string:
t, err := time.Parse(time.RFC3339, v)
if err != nil {
fmt.Println(err)
}
created = t.Unix()
case float64:
created = int64(v)
case int64:
created = v
}
seconds := created - 1134028003
return sign*order + float64(seconds)/45000
}
func posts(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t PostsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
r := &db.ReadRequest{
Table: "posts",
Order: "desc",
OrderBy: "created",
Limit: 1000,
}
query := ""
// @TODO this should be != 0 but that causes an empty new page
if t.Min > 0 {
query += "score >= " + fmt.Sprintf("%v", t.Min)
}
if t.Max > 0 {
if query != "" {
query += " and "
}
query += "score <= " + fmt.Sprintf("%v", t.Max)
}
if t.Sub != "all" && t.Sub != "" {
if query != "" {
query += " and "
}
query += fmt.Sprintf("sub == '%v'", t.Sub)
}
if query != "" {
r.Query = query
}
rsp, err := client.DbService.Read(r)
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
func comments(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t CommentsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: "comments",
Order: "desc",
Query: "postId == '" + t.PostId + "'",
OrderBy: "created",
})
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
// Utils
func cors(w http.ResponseWriter, req *http.Request) bool {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Content-Type", "application/json")
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return true
}
return false
}
func respond(w http.ResponseWriter, i interface{}, err error) | {
if err != nil {
w.WriteHeader(500)
fmt.Println(err)
}
if i == nil {
i = map[string]interface{}{}
}
if err != nil {
i = map[string]interface{}{
"error": err.Error(),
}
}
bs, _ := json.Marshal(i)
fmt.Fprintf(w, fmt.Sprintf("%v", string(bs)))
} | identifier_body | |
main.go | sub"`
}
// Endpoints
// upvote or downvote a post or a comment
func vote(w http.ResponseWriter, req *http.Request, upvote bool, isComment bool, t VoteRequest) error {
if t.Id == "" {
return fmt.Errorf("missing post id")
}
table := "posts"
if isComment {
table = "comments"
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: table,
Id: t.Id,
})
if err != nil {
return err
}
if len(rsp.Records) == 0 {
return fmt.Errorf("post or comment not found")
}
// auth
sessionRsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
return err
}
if sessionRsp.Session.UserId == "" {
return fmt.Errorf("user id not found")
}
// prevent double votes
checkTable := table + "votecheck"
checkId := t.Id + sessionRsp.Session.UserId
checkRsp, err := client.DbService.Read(&db.ReadRequest{
Table: checkTable,
Id: checkId,
})
mod := isMod(sessionRsp.Session.UserId, mods)
if err == nil && (checkRsp != nil && len(checkRsp.Records) > 0) {
if !mod {
return fmt.Errorf("already voted")
}
}
val := float64(1)
if mod {
rand.Seed(time.Now().UnixNano())
val = float64(rand.Intn(17-4) + 4)
}
if !mod {
_, err = client.DbService.Create(&db.CreateRequest{
Table: checkTable,
Record: map[string]interface{}{
"id": checkId,
},
})
if err != nil {
return err
}
}
obj := rsp.Records[0]
key := "upvotes"
if !upvote {
key = "downvotes"
}
if _, ok := obj["upvotes"].(float64); !ok {
obj["upvotes"] = float64(0)
}
if _, ok := obj["downvotes"].(float64); !ok {
obj["downvotes"] = float64(0)
}
obj[key] = obj[key].(float64) + val
obj["score"] = obj["upvotes"].(float64) - obj["downvotes"].(float64)
_, err = client.DbService.Update(&db.UpdateRequest{
Table: table,
Id: t.Id,
Record: obj,
})
return err
}
func isMod(userId, s string) bool {
arr := strings.Split(s, ",")
for _, v := range arr {
if v == userId {
return true
}
}
return false
}
func voteWrapper(upvote bool, isComment bool) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t VoteRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
err = vote(w, req, upvote, isComment, t)
respond(w, nil, err)
}
}
func login(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t LoginRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, err, err)
return
}
_, err = client.UserService.Read(&user.ReadRequest{
Username: t.Username,
})
if err != nil {
createRsp, err := client.UserService.Create(&user.CreateRequest{
Username: t.Username,
Email: t.Username + "@" + t.Username + ".com",
Password: t.Password,
})
if err != nil {
respond(w, createRsp, err)
return
}
}
logRsp, err := client.UserService.Login(&user.LoginRequest{
Username: t.Username,
Password: t.Password,
})
respond(w, logRsp, err)
}
func readSession(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t user.ReadSessionRequest
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.UserService.ReadSession(&t)
if err != nil {
respond(w, rsp, err)
return
}
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: rsp.Session.UserId,
})
respond(w, map[string]interface{}{
"session": rsp.Session,
"account": readRsp.Account, |
func post(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t PostRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
if t.Post.Sub == "" || t.Post.Title == "" {
respond(w, nil, fmt.Errorf("both title and sub are required"))
return
}
if t.Post.Url == "" && t.Post.Content == "" {
respond(w, nil, fmt.Errorf("url or content required"))
return
}
if len(t.Post.Title) > 200 || len(t.Post.Url) > 200 {
respond(w, nil, fmt.Errorf("post url or title too long"))
return
}
if len(t.Post.Sub) > 50 {
respond(w, nil, fmt.Errorf("post sub too long"))
return
}
if len(t.Post.Content) > 3000 {
respond(w, nil, fmt.Errorf("post content too long"))
return
}
userID := ""
userName := ""
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
client.DbService.Create(&db.CreateRequest{
Table: "posts",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Post.Content,
"url": t.Post.Url,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"sub": t.Post.Sub,
"title": t.Post.Title,
"created": time.Now(),
},
})
}
func comment(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t CommentRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
userID := ""
userName := ""
// get user if available
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
if t.Comment.PostId == "" {
respond(w, nil, fmt.Errorf("no post id"))
return
}
// get post to update comment counter
readRsp, err := client.DbService.Read(&db.ReadRequest{
Table: "posts",
Id: t.Comment.PostId,
})
if err != nil {
respond(w, nil, err)
return
}
if readRsp == nil || len(readRsp.Records) == 0 {
respond(w, nil, fmt.Errorf("post not found"))
return
}
if len(readRsp.Records) > 1 {
respond(w, nil, fmt.Errorf("multiple posts found"))
return
}
// create comment
_, err = client.DbService.Create(&db.CreateRequest{
Table: "comments",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Comment.Content,
"parent": | }, err)
} | random_line_split |
main.go | sp.Session.UserId, mods)
if err == nil && (checkRsp != nil && len(checkRsp.Records) > 0) {
if !mod {
return fmt.Errorf("already voted")
}
}
val := float64(1)
if mod {
rand.Seed(time.Now().UnixNano())
val = float64(rand.Intn(17-4) + 4)
}
if !mod {
_, err = client.DbService.Create(&db.CreateRequest{
Table: checkTable,
Record: map[string]interface{}{
"id": checkId,
},
})
if err != nil {
return err
}
}
obj := rsp.Records[0]
key := "upvotes"
if !upvote {
key = "downvotes"
}
if _, ok := obj["upvotes"].(float64); !ok {
obj["upvotes"] = float64(0)
}
if _, ok := obj["downvotes"].(float64); !ok {
obj["downvotes"] = float64(0)
}
obj[key] = obj[key].(float64) + val
obj["score"] = obj["upvotes"].(float64) - obj["downvotes"].(float64)
_, err = client.DbService.Update(&db.UpdateRequest{
Table: table,
Id: t.Id,
Record: obj,
})
return err
}
func isMod(userId, s string) bool {
arr := strings.Split(s, ",")
for _, v := range arr {
if v == userId {
return true
}
}
return false
}
func voteWrapper(upvote bool, isComment bool) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t VoteRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
err = vote(w, req, upvote, isComment, t)
respond(w, nil, err)
}
}
func login(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t LoginRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, err, err)
return
}
_, err = client.UserService.Read(&user.ReadRequest{
Username: t.Username,
})
if err != nil {
createRsp, err := client.UserService.Create(&user.CreateRequest{
Username: t.Username,
Email: t.Username + "@" + t.Username + ".com",
Password: t.Password,
})
if err != nil {
respond(w, createRsp, err)
return
}
}
logRsp, err := client.UserService.Login(&user.LoginRequest{
Username: t.Username,
Password: t.Password,
})
respond(w, logRsp, err)
}
func readSession(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t user.ReadSessionRequest
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.UserService.ReadSession(&t)
if err != nil {
respond(w, rsp, err)
return
}
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: rsp.Session.UserId,
})
respond(w, map[string]interface{}{
"session": rsp.Session,
"account": readRsp.Account,
}, err)
}
func post(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t PostRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
if t.Post.Sub == "" || t.Post.Title == "" {
respond(w, nil, fmt.Errorf("both title and sub are required"))
return
}
if t.Post.Url == "" && t.Post.Content == "" {
respond(w, nil, fmt.Errorf("url or content required"))
return
}
if len(t.Post.Title) > 200 || len(t.Post.Url) > 200 {
respond(w, nil, fmt.Errorf("post url or title too long"))
return
}
if len(t.Post.Sub) > 50 {
respond(w, nil, fmt.Errorf("post sub too long"))
return
}
if len(t.Post.Content) > 3000 {
respond(w, nil, fmt.Errorf("post content too long"))
return
}
userID := ""
userName := ""
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
client.DbService.Create(&db.CreateRequest{
Table: "posts",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Post.Content,
"url": t.Post.Url,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"sub": t.Post.Sub,
"title": t.Post.Title,
"created": time.Now(),
},
})
}
func comment(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t CommentRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
userID := ""
userName := ""
// get user if available
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
if t.Comment.PostId == "" {
respond(w, nil, fmt.Errorf("no post id"))
return
}
// get post to update comment counter
readRsp, err := client.DbService.Read(&db.ReadRequest{
Table: "posts",
Id: t.Comment.PostId,
})
if err != nil {
respond(w, nil, err)
return
}
if readRsp == nil || len(readRsp.Records) == 0 {
respond(w, nil, fmt.Errorf("post not found"))
return
}
if len(readRsp.Records) > 1 {
respond(w, nil, fmt.Errorf("multiple posts found"))
return
}
// create comment
_, err = client.DbService.Create(&db.CreateRequest{
Table: "comments",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Comment.Content,
"parent": t.Comment.Parent,
"postId": t.Comment.PostId,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"created": time.Now(),
},
})
if err != nil {
respond(w, nil, err)
return
}
// update counter
oldCount, ok := readRsp.Records[0]["commentCount"].(float64)
if !ok {
oldCount = 0
}
oldCount++
readRsp.Records[0]["commentCount"] = oldCount
_, err = client.DbService.Update(&db.UpdateRequest{
Table: "posts",
Id: t.Comment.PostId,
Record: readRsp.Records[0],
})
respond(w, nil, err)
}
func score(m map[string]interface{}) float64 {
score, ok := m["score"].(float64)
if !ok {
return -10000
}
sign := float64(1)
if score == 0 {
sign = 0
}
if score < 0 | {
sign = -1
} | conditional_block | |
main.go | Password: t.Password,
})
respond(w, logRsp, err)
}
func readSession(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t user.ReadSessionRequest
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.UserService.ReadSession(&t)
if err != nil {
respond(w, rsp, err)
return
}
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: rsp.Session.UserId,
})
respond(w, map[string]interface{}{
"session": rsp.Session,
"account": readRsp.Account,
}, err)
}
func post(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t PostRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
if t.Post.Sub == "" || t.Post.Title == "" {
respond(w, nil, fmt.Errorf("both title and sub are required"))
return
}
if t.Post.Url == "" && t.Post.Content == "" {
respond(w, nil, fmt.Errorf("url or content required"))
return
}
if len(t.Post.Title) > 200 || len(t.Post.Url) > 200 {
respond(w, nil, fmt.Errorf("post url or title too long"))
return
}
if len(t.Post.Sub) > 50 {
respond(w, nil, fmt.Errorf("post sub too long"))
return
}
if len(t.Post.Content) > 3000 {
respond(w, nil, fmt.Errorf("post content too long"))
return
}
userID := ""
userName := ""
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
client.DbService.Create(&db.CreateRequest{
Table: "posts",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Post.Content,
"url": t.Post.Url,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"sub": t.Post.Sub,
"title": t.Post.Title,
"created": time.Now(),
},
})
}
func comment(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
decoder := json.NewDecoder(req.Body)
var t CommentRequest
err := decoder.Decode(&t)
if err != nil {
respond(w, nil, err)
return
}
userID := ""
userName := ""
// get user if available
if t.SessionID != "" {
rsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{
SessionId: t.SessionID,
})
if err != nil {
respond(w, rsp, err)
return
}
userID = rsp.Session.UserId
readRsp, err := client.UserService.Read(&user.ReadRequest{
Id: userID,
})
if err != nil {
respond(w, rsp, err)
return
}
userName = readRsp.Account.Username
}
if t.Comment.PostId == "" {
respond(w, nil, fmt.Errorf("no post id"))
return
}
// get post to update comment counter
readRsp, err := client.DbService.Read(&db.ReadRequest{
Table: "posts",
Id: t.Comment.PostId,
})
if err != nil {
respond(w, nil, err)
return
}
if readRsp == nil || len(readRsp.Records) == 0 {
respond(w, nil, fmt.Errorf("post not found"))
return
}
if len(readRsp.Records) > 1 {
respond(w, nil, fmt.Errorf("multiple posts found"))
return
}
// create comment
_, err = client.DbService.Create(&db.CreateRequest{
Table: "comments",
Record: map[string]interface{}{
"id": uuid.NewV4(),
"userId": userID,
"userName": userName,
"content": t.Comment.Content,
"parent": t.Comment.Parent,
"postId": t.Comment.PostId,
"upvotes": float64(0),
"downvotes": float64(0),
"score": float64(0),
"created": time.Now(),
},
})
if err != nil {
respond(w, nil, err)
return
}
// update counter
oldCount, ok := readRsp.Records[0]["commentCount"].(float64)
if !ok {
oldCount = 0
}
oldCount++
readRsp.Records[0]["commentCount"] = oldCount
_, err = client.DbService.Update(&db.UpdateRequest{
Table: "posts",
Id: t.Comment.PostId,
Record: readRsp.Records[0],
})
respond(w, nil, err)
}
func score(m map[string]interface{}) float64 {
score, ok := m["score"].(float64)
if !ok {
return -10000
}
sign := float64(1)
if score == 0 {
sign = 0
}
if score < 0 {
sign = -1
}
order := math.Log10(math.Max(math.Abs(score), 1))
var created int64
switch v := m["created"].(type) {
case string:
t, err := time.Parse(time.RFC3339, v)
if err != nil {
fmt.Println(err)
}
created = t.Unix()
case float64:
created = int64(v)
case int64:
created = v
}
seconds := created - 1134028003
return sign*order + float64(seconds)/45000
}
func posts(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t PostsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
r := &db.ReadRequest{
Table: "posts",
Order: "desc",
OrderBy: "created",
Limit: 1000,
}
query := ""
// @TODO this should be != 0 but that causes an empty new page
if t.Min > 0 {
query += "score >= " + fmt.Sprintf("%v", t.Min)
}
if t.Max > 0 {
if query != "" {
query += " and "
}
query += "score <= " + fmt.Sprintf("%v", t.Max)
}
if t.Sub != "all" && t.Sub != "" {
if query != "" {
query += " and "
}
query += fmt.Sprintf("sub == '%v'", t.Sub)
}
if query != "" {
r.Query = query
}
rsp, err := client.DbService.Read(r)
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
func comments(w http.ResponseWriter, req *http.Request) {
if cors(w, req) {
return
}
var t CommentsRequest
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&t)
if err != nil {
fmt.Fprintf(w, fmt.Sprintf("%v", err.Error()))
}
rsp, err := client.DbService.Read(&db.ReadRequest{
Table: "comments",
Order: "desc",
Query: "postId == '" + t.PostId + "'",
OrderBy: "created",
})
sort.Slice(rsp.Records, func(i, j int) bool {
return score(rsp.Records[i]) > score(rsp.Records[j])
})
respond(w, rsp, err)
}
// Utils
func cors(w http.ResponseWriter, req *http.Request) bool {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Content-Type", "application/json")
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return true
}
return false
}
func | respond | identifier_name | |
tcp.rs | , waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN != 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack != self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK != 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0 .. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size != hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN != 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK != 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST != 0 {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
}
else if hdr.flags & FLAG_FIN != 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq != hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x} != {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH != 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK != 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN != 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK != 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state, | ConnectionState::TimeWait => self.state,
| random_line_split | |
tcp.rs | : RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN != 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack != self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK != 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0 .. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size != hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN != 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK != 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST != 0 | {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
} | conditional_block | |
tcp.rs | drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn | (local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
{
Quad {
local_addr, local_port, remote_addr, remote_port
}
}
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2* | new | identifier_name |
tcp.rs | drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn new(local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
|
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2* | {
Quad {
local_addr, local_port, remote_addr, remote_port
}
} | identifier_body |
kegweblib.py | ictures"] = picture_or_pictures
c["thumb_size"] = thumb_size
c["gallery_id"] = gallery_id
return c
@register.inclusion_tag("kegweb/badge.html")
def badge(amount, caption, style="", is_volume=False, do_pluralize=False):
if is_volume:
amount = mark_safe(VolumeNode.format(amount, "mL"))
if do_pluralize:
caption += pluralize(amount)
return {
"badge_amount": amount,
"badge_caption": caption,
"badge_style": style,
}
@register.inclusion_tag("kegweb/includes/progress_bar.html")
def progress_bar(progress_int, extra_css=""):
c = {}
try:
progress_int = max(int(progress_int), 0)
except ValueError:
progress_int = 0
progress_int = min(progress_int, 100)
c["progress_int"] = progress_int
c["extra_css"] = extra_css
if progress_int < 10:
bar_type = "bar-danger"
elif progress_int < 25:
bar_type = "bar-warning"
else:
bar_type = "bar-success"
c["bar_type"] = bar_type
return c
# navitem
@register.tag("navitem")
def navitem(parser, token):
"""{% navitem <viewname> <title> [exact] %}"""
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("%s requires at least 3 tokens" % tokens[0])
return NavitemNode(*tokens[1:])
class NavitemNode(Node):
def __init__(self, *args):
self._viewname = args[0]
self._title = args[1]
self._exact = "exact" in args[2:]
def render(self, context):
viewname = Variable(self._viewname).resolve(context)
title = Variable(self._title).resolve(context)
if viewname.startswith("/"):
urlbase = viewname
else:
urlbase = reverse(viewname)
request_path = context["request_path"]
if self._exact:
active = request_path == urlbase
else:
active = request_path.startswith(urlbase)
if active:
res = '<li class="active">'
else:
res = "<li>"
res += '<a href="%s">%s</a></li>' % (urlbase, title)
return res
# timeago
@register.tag("timeago")
def | (parser, token):
"""{% timeago <timestamp> %}"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError("%s requires 2 tokens" % tokens[0])
return TimeagoNode(tokens[1])
class TimeagoNode(Node):
def __init__(self, timestamp_varname):
self._timestamp_varname = timestamp_varname
def render(self, context):
tv = Variable(self._timestamp_varname)
ts = tv.resolve(context)
# Try to set time zone information.
if settings.TIME_ZONE and not settings.USE_TZ:
try:
tz = pytz.timezone(settings.TIME_ZONE)
ts = tz.localize(ts)
except pytz.UnknownTimeZoneError:
pass
iso = ts.isoformat()
alt = timezone.localtime(ts).strftime("%A, %B %d, %Y %I:%M%p")
return '<abbr class="timeago" title="%s">%s</abbr>' % (iso, alt)
# temperature
@register.tag("temperature")
def temperature_tag(parser, token):
"""{% temperature <temp_c> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return TemperatureNode(tokens[1])
class TemperatureNode(Node):
TEMPLATE = "%(amount)s° %(unit)s"
def __init__(self, varname):
self.varname = varname
def render(self, context):
v = Variable(self.varname)
try:
amount = v.resolve(context)
except (VariableDoesNotExist, ValueError):
raise
amount = "unknown"
unit = "C"
kbsite = models.KegbotSite.get()
if kbsite.temperature_display_units == "f":
unit = "F"
amount = CtoF(amount)
return self.TEMPLATE % {"amount": amount, "unit": unit}
# volume
@register.tag("volume")
def volumetag(parser, token):
"""{% volume <amount> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return VolumeNode(tokens[1], tokens[2:])
class VolumeNode(Node):
TEMPLATE = """
<span class="hmeasure %(extra_css)s" title="%(title)s">
<span class="num">%(amount)s</span>
<span class="unit">%(units)s</span>
</span>""".strip()
def __init__(self, volume_varname, extra_args):
self._volume_varname = volume_varname
self._extra_args = extra_args
def render(self, context):
tv = Variable(self._volume_varname)
try:
num = float(tv.resolve(context))
except (VariableDoesNotExist, ValueError):
num = "unknown"
unit = "mL"
make_badge = "badge" in self._extra_args
return self.format(num, unit, make_badge)
@classmethod
def format(cls, amount, units, make_badge=False):
if amount < 0:
amount = 0
ctx = {
"units": units,
"amount": amount,
"title": "%s %s" % (amount, units),
"extra_css": "badge " if make_badge else "",
}
return cls.TEMPLATE % ctx
# drinker
@register.tag("drinker_name")
def drinker_name_tag(parser, token):
"""{% drinker_name <drink_or_user_obj> [nolink] %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return DrinkerNameNode(tokens[1], tokens[2:])
class DrinkerNameNode(Node):
def __init__(self, drink_varname, extra_args):
self._varname = drink_varname
self._extra_args = extra_args
def render(self, context):
obj = Variable(self._varname)
try:
obj = obj.resolve(context)
except (VariableDoesNotExist, ValueError):
obj = None
user = None
if obj:
if isinstance(obj, models.Drink) or isinstance(obj, models.SystemEvent):
user = obj.user
elif isinstance(obj, models.User):
user = obj
if user:
if "nolink" in self._extra_args:
return user.get_full_name()
else:
return '<a href="%s">%s</a>' % (
reverse("kb-drinker", args=[user.username]),
user.get_full_name(),
)
return context["guest_info"]["name"]
# chart
@register.tag("chart")
def chart(parser, tokens):
"""{% chart <charttype> <obj> width height %}"""
tokens = tokens.contents.split()
if len(tokens) < 4:
raise TemplateSyntaxError("chart requires at least 4 arguments")
charttype = tokens[1]
try:
width = int(tokens[-2])
height = int(tokens[-1])
except ValueError:
raise TemplateSyntaxError("invalid width or height")
args = tokens[2:-2]
return ChartNode(charttype, width, height, args)
class ChartNode(Node):
CHART_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox"></div>
<script type="text/javascript">
var chart_%(chart_id)s;
$(document).ready(function() {
var chart_data = %(chart_data)s;
chart_%(chart_id)s = new Highcharts.Chart(chart_data);
});
</script>
<!-- end chart %(chart_id)s -->
"""
ERROR_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox-error">
%(error_str)s
</div>
<!-- end chart %(chart_id)s -->
"""
def __init__(self, charttype, width, height, args):
self._charttype = charttype
self._width = width
self._height = height
self._args = args
self._chart_fn = getattr(charts, "chart_%s" % (self._charttype,), None)
def _get_chart_id(self, context):
# TODO(mikey): Is there a better way to store _CHART_ID?
if not hasattr(context, "_CHART_ID"):
context._CHART_ID = 0
context._CHART_ID += 1
return context._CHART | timeago | identifier_name |
kegweblib.py | ictures"] = picture_or_pictures
c["thumb_size"] = thumb_size
c["gallery_id"] = gallery_id
return c
@register.inclusion_tag("kegweb/badge.html")
def badge(amount, caption, style="", is_volume=False, do_pluralize=False):
if is_volume:
amount = mark_safe(VolumeNode.format(amount, "mL"))
if do_pluralize:
caption += pluralize(amount)
return {
"badge_amount": amount,
"badge_caption": caption,
"badge_style": style,
}
@register.inclusion_tag("kegweb/includes/progress_bar.html")
def progress_bar(progress_int, extra_css=""):
c = {}
try:
progress_int = max(int(progress_int), 0)
except ValueError:
progress_int = 0
progress_int = min(progress_int, 100)
c["progress_int"] = progress_int
c["extra_css"] = extra_css
if progress_int < 10:
bar_type = "bar-danger"
elif progress_int < 25:
bar_type = "bar-warning"
else:
bar_type = "bar-success"
c["bar_type"] = bar_type
return c
# navitem
@register.tag("navitem")
def navitem(parser, token):
"""{% navitem <viewname> <title> [exact] %}"""
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("%s requires at least 3 tokens" % tokens[0])
return NavitemNode(*tokens[1:])
class NavitemNode(Node):
def __init__(self, *args):
self._viewname = args[0]
self._title = args[1]
self._exact = "exact" in args[2:]
def render(self, context):
viewname = Variable(self._viewname).resolve(context)
title = Variable(self._title).resolve(context)
if viewname.startswith("/"):
urlbase = viewname
else:
urlbase = reverse(viewname)
request_path = context["request_path"]
if self._exact:
active = request_path == urlbase
else:
active = request_path.startswith(urlbase)
if active:
res = '<li class="active">'
else:
res = "<li>"
res += '<a href="%s">%s</a></li>' % (urlbase, title)
return res
# timeago
@register.tag("timeago")
def timeago(parser, token):
"""{% timeago <timestamp> %}"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError("%s requires 2 tokens" % tokens[0])
return TimeagoNode(tokens[1])
class TimeagoNode(Node):
def __init__(self, timestamp_varname):
self._timestamp_varname = timestamp_varname
def render(self, context):
tv = Variable(self._timestamp_varname)
ts = tv.resolve(context)
# Try to set time zone information.
if settings.TIME_ZONE and not settings.USE_TZ:
try:
tz = pytz.timezone(settings.TIME_ZONE)
ts = tz.localize(ts)
except pytz.UnknownTimeZoneError:
pass
iso = ts.isoformat()
alt = timezone.localtime(ts).strftime("%A, %B %d, %Y %I:%M%p")
return '<abbr class="timeago" title="%s">%s</abbr>' % (iso, alt)
# temperature
@register.tag("temperature")
def temperature_tag(parser, token):
"""{% temperature <temp_c> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return TemperatureNode(tokens[1])
class TemperatureNode(Node):
TEMPLATE = "%(amount)s° %(unit)s"
def __init__(self, varname):
self.varname = varname
def render(self, context):
v = Variable(self.varname)
try:
amount = v.resolve(context)
except (VariableDoesNotExist, ValueError):
raise
amount = "unknown"
unit = "C"
kbsite = models.KegbotSite.get()
if kbsite.temperature_display_units == "f":
unit = "F"
amount = CtoF(amount)
return self.TEMPLATE % {"amount": amount, "unit": unit}
# volume
@register.tag("volume")
def volumetag(parser, token):
"""{% volume <amount> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return VolumeNode(tokens[1], tokens[2:])
class VolumeNode(Node):
TEMPLATE = """
<span class="hmeasure %(extra_css)s" title="%(title)s">
<span class="num">%(amount)s</span>
<span class="unit">%(units)s</span>
</span>""".strip()
def __init__(self, volume_varname, extra_args):
self._volume_varname = volume_varname
self._extra_args = extra_args
def render(self, context):
|
@classmethod
def format(cls, amount, units, make_badge=False):
if amount < 0:
amount = 0
ctx = {
"units": units,
"amount": amount,
"title": "%s %s" % (amount, units),
"extra_css": "badge " if make_badge else "",
}
return cls.TEMPLATE % ctx
# drinker
@register.tag("drinker_name")
def drinker_name_tag(parser, token):
"""{% drinker_name <drink_or_user_obj> [nolink] %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return DrinkerNameNode(tokens[1], tokens[2:])
class DrinkerNameNode(Node):
def __init__(self, drink_varname, extra_args):
self._varname = drink_varname
self._extra_args = extra_args
def render(self, context):
obj = Variable(self._varname)
try:
obj = obj.resolve(context)
except (VariableDoesNotExist, ValueError):
obj = None
user = None
if obj:
if isinstance(obj, models.Drink) or isinstance(obj, models.SystemEvent):
user = obj.user
elif isinstance(obj, models.User):
user = obj
if user:
if "nolink" in self._extra_args:
return user.get_full_name()
else:
return '<a href="%s">%s</a>' % (
reverse("kb-drinker", args=[user.username]),
user.get_full_name(),
)
return context["guest_info"]["name"]
# chart
@register.tag("chart")
def chart(parser, tokens):
"""{% chart <charttype> <obj> width height %}"""
tokens = tokens.contents.split()
if len(tokens) < 4:
raise TemplateSyntaxError("chart requires at least 4 arguments")
charttype = tokens[1]
try:
width = int(tokens[-2])
height = int(tokens[-1])
except ValueError:
raise TemplateSyntaxError("invalid width or height")
args = tokens[2:-2]
return ChartNode(charttype, width, height, args)
class ChartNode(Node):
CHART_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox"></div>
<script type="text/javascript">
var chart_%(chart_id)s;
$(document).ready(function() {
var chart_data = %(chart_data)s;
chart_%(chart_id)s = new Highcharts.Chart(chart_data);
});
</script>
<!-- end chart %(chart_id)s -->
"""
ERROR_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox-error">
%(error_str)s
</div>
<!-- end chart %(chart_id)s -->
"""
def __init__(self, charttype, width, height, args):
self._charttype = charttype
self._width = width
self._height = height
self._args = args
self._chart_fn = getattr(charts, "chart_%s" % (self._charttype,), None)
def _get_chart_id(self, context):
# TODO(mikey): Is there a better way to store _CHART_ID?
if not hasattr(context, "_CHART_ID"):
context._CHART_ID = 0
context._CHART_ID += 1
return context._CHART | tv = Variable(self._volume_varname)
try:
num = float(tv.resolve(context))
except (VariableDoesNotExist, ValueError):
num = "unknown"
unit = "mL"
make_badge = "badge" in self._extra_args
return self.format(num, unit, make_badge) | identifier_body |
kegweblib.py | ictures"] = picture_or_pictures
c["thumb_size"] = thumb_size
c["gallery_id"] = gallery_id
return c
@register.inclusion_tag("kegweb/badge.html")
def badge(amount, caption, style="", is_volume=False, do_pluralize=False):
if is_volume:
amount = mark_safe(VolumeNode.format(amount, "mL"))
if do_pluralize:
caption += pluralize(amount)
return {
"badge_amount": amount,
"badge_caption": caption,
"badge_style": style,
}
@register.inclusion_tag("kegweb/includes/progress_bar.html")
def progress_bar(progress_int, extra_css=""):
c = {}
try:
progress_int = max(int(progress_int), 0)
except ValueError:
progress_int = 0
progress_int = min(progress_int, 100)
c["progress_int"] = progress_int
c["extra_css"] = extra_css
if progress_int < 10:
bar_type = "bar-danger"
elif progress_int < 25:
bar_type = "bar-warning"
else:
bar_type = "bar-success"
c["bar_type"] = bar_type
return c
# navitem
@register.tag("navitem")
def navitem(parser, token):
"""{% navitem <viewname> <title> [exact] %}"""
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("%s requires at least 3 tokens" % tokens[0])
return NavitemNode(*tokens[1:])
class NavitemNode(Node):
def __init__(self, *args):
self._viewname = args[0]
self._title = args[1]
self._exact = "exact" in args[2:]
def render(self, context):
viewname = Variable(self._viewname).resolve(context)
title = Variable(self._title).resolve(context)
if viewname.startswith("/"):
urlbase = viewname
else:
urlbase = reverse(viewname)
request_path = context["request_path"]
if self._exact:
active = request_path == urlbase
else:
active = request_path.startswith(urlbase)
if active:
res = '<li class="active">'
else:
res = "<li>"
res += '<a href="%s">%s</a></li>' % (urlbase, title)
return res
# timeago
@register.tag("timeago")
def timeago(parser, token):
"""{% timeago <timestamp> %}"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError("%s requires 2 tokens" % tokens[0])
return TimeagoNode(tokens[1])
class TimeagoNode(Node):
def __init__(self, timestamp_varname):
self._timestamp_varname = timestamp_varname
def render(self, context):
tv = Variable(self._timestamp_varname)
ts = tv.resolve(context)
# Try to set time zone information.
if settings.TIME_ZONE and not settings.USE_TZ:
try:
tz = pytz.timezone(settings.TIME_ZONE)
ts = tz.localize(ts)
except pytz.UnknownTimeZoneError:
pass
iso = ts.isoformat()
alt = timezone.localtime(ts).strftime("%A, %B %d, %Y %I:%M%p")
return '<abbr class="timeago" title="%s">%s</abbr>' % (iso, alt)
# temperature
@register.tag("temperature")
def temperature_tag(parser, token):
"""{% temperature <temp_c> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return TemperatureNode(tokens[1])
class TemperatureNode(Node):
TEMPLATE = "%(amount)s° %(unit)s"
def __init__(self, varname):
self.varname = varname
def render(self, context):
v = Variable(self.varname)
try:
amount = v.resolve(context)
except (VariableDoesNotExist, ValueError):
raise
amount = "unknown"
unit = "C"
kbsite = models.KegbotSite.get()
if kbsite.temperature_display_units == "f":
unit = "F"
amount = CtoF(amount)
return self.TEMPLATE % {"amount": amount, "unit": unit}
# volume
@register.tag("volume")
def volumetag(parser, token):
"""{% volume <amount> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return VolumeNode(tokens[1], tokens[2:])
class VolumeNode(Node):
TEMPLATE = """
<span class="hmeasure %(extra_css)s" title="%(title)s">
<span class="num">%(amount)s</span>
<span class="unit">%(units)s</span>
</span>""".strip()
def __init__(self, volume_varname, extra_args):
self._volume_varname = volume_varname
self._extra_args = extra_args
def render(self, context):
tv = Variable(self._volume_varname)
try:
num = float(tv.resolve(context))
except (VariableDoesNotExist, ValueError):
num = "unknown"
unit = "mL"
make_badge = "badge" in self._extra_args
return self.format(num, unit, make_badge)
@classmethod
def format(cls, amount, units, make_badge=False):
if amount < 0:
amount = 0
ctx = {
"units": units,
"amount": amount,
"title": "%s %s" % (amount, units),
"extra_css": "badge " if make_badge else "",
}
return cls.TEMPLATE % ctx
# drinker
@register.tag("drinker_name")
def drinker_name_tag(parser, token):
"""{% drinker_name <drink_or_user_obj> [nolink] %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return DrinkerNameNode(tokens[1], tokens[2:])
class DrinkerNameNode(Node):
def __init__(self, drink_varname, extra_args):
self._varname = drink_varname
self._extra_args = extra_args
def render(self, context):
obj = Variable(self._varname)
try:
obj = obj.resolve(context)
except (VariableDoesNotExist, ValueError):
obj = None
user = None
if obj:
if isinstance(obj, models.Drink) or isinstance(obj, models.SystemEvent):
user = obj.user
elif isinstance(obj, models.User):
user = obj
if user:
if "nolink" in self._extra_args:
return user.get_full_name()
else:
|
return context["guest_info"]["name"]
# chart
@register.tag("chart")
def chart(parser, tokens):
"""{% chart <charttype> <obj> width height %}"""
tokens = tokens.contents.split()
if len(tokens) < 4:
raise TemplateSyntaxError("chart requires at least 4 arguments")
charttype = tokens[1]
try:
width = int(tokens[-2])
height = int(tokens[-1])
except ValueError:
raise TemplateSyntaxError("invalid width or height")
args = tokens[2:-2]
return ChartNode(charttype, width, height, args)
class ChartNode(Node):
CHART_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox"></div>
<script type="text/javascript">
var chart_%(chart_id)s;
$(document).ready(function() {
var chart_data = %(chart_data)s;
chart_%(chart_id)s = new Highcharts.Chart(chart_data);
});
</script>
<!-- end chart %(chart_id)s -->
"""
ERROR_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox-error">
%(error_str)s
</div>
<!-- end chart %(chart_id)s -->
"""
def __init__(self, charttype, width, height, args):
self._charttype = charttype
self._width = width
self._height = height
self._args = args
self._chart_fn = getattr(charts, "chart_%s" % (self._charttype,), None)
def _get_chart_id(self, context):
# TODO(mikey): Is there a better way to store _CHART_ID?
if not hasattr(context, "_CHART_ID"):
context._CHART_ID = 0
context._CHART_ID += 1
return context._CHART | return '<a href="%s">%s</a>' % (
reverse("kb-drinker", args=[user.username]),
user.get_full_name(),
) | conditional_block |
kegweblib.py | c = {}
if not hasattr(picture_or_pictures, "__iter__"):
c["gallery_pictures"] = [picture_or_pictures]
else:
c["gallery_pictures"] = picture_or_pictures
c["thumb_size"] = thumb_size
c["gallery_id"] = gallery_id
return c
@register.inclusion_tag("kegweb/badge.html")
def badge(amount, caption, style="", is_volume=False, do_pluralize=False):
if is_volume:
amount = mark_safe(VolumeNode.format(amount, "mL"))
if do_pluralize:
caption += pluralize(amount)
return {
"badge_amount": amount,
"badge_caption": caption,
"badge_style": style,
}
@register.inclusion_tag("kegweb/includes/progress_bar.html")
def progress_bar(progress_int, extra_css=""):
c = {}
try:
progress_int = max(int(progress_int), 0)
except ValueError:
progress_int = 0
progress_int = min(progress_int, 100)
c["progress_int"] = progress_int
c["extra_css"] = extra_css
if progress_int < 10:
bar_type = "bar-danger"
elif progress_int < 25:
bar_type = "bar-warning"
else:
bar_type = "bar-success"
c["bar_type"] = bar_type
return c
# navitem
@register.tag("navitem")
def navitem(parser, token):
"""{% navitem <viewname> <title> [exact] %}"""
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("%s requires at least 3 tokens" % tokens[0])
return NavitemNode(*tokens[1:])
class NavitemNode(Node):
def __init__(self, *args):
self._viewname = args[0]
self._title = args[1]
self._exact = "exact" in args[2:]
def render(self, context):
viewname = Variable(self._viewname).resolve(context)
title = Variable(self._title).resolve(context)
if viewname.startswith("/"):
urlbase = viewname
else:
urlbase = reverse(viewname)
request_path = context["request_path"]
if self._exact:
active = request_path == urlbase
else:
active = request_path.startswith(urlbase)
if active:
res = '<li class="active">'
else:
res = "<li>"
res += '<a href="%s">%s</a></li>' % (urlbase, title)
return res
# timeago
@register.tag("timeago")
def timeago(parser, token):
"""{% timeago <timestamp> %}"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError("%s requires 2 tokens" % tokens[0])
return TimeagoNode(tokens[1])
class TimeagoNode(Node):
def __init__(self, timestamp_varname):
self._timestamp_varname = timestamp_varname
def render(self, context):
tv = Variable(self._timestamp_varname)
ts = tv.resolve(context)
# Try to set time zone information.
if settings.TIME_ZONE and not settings.USE_TZ:
try:
tz = pytz.timezone(settings.TIME_ZONE)
ts = tz.localize(ts)
except pytz.UnknownTimeZoneError:
pass
iso = ts.isoformat()
alt = timezone.localtime(ts).strftime("%A, %B %d, %Y %I:%M%p")
return '<abbr class="timeago" title="%s">%s</abbr>' % (iso, alt)
# temperature
@register.tag("temperature")
def temperature_tag(parser, token):
"""{% temperature <temp_c> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return TemperatureNode(tokens[1])
class TemperatureNode(Node):
TEMPLATE = "%(amount)s° %(unit)s"
def __init__(self, varname):
self.varname = varname
def render(self, context):
v = Variable(self.varname)
try:
amount = v.resolve(context)
except (VariableDoesNotExist, ValueError):
raise
amount = "unknown"
unit = "C"
kbsite = models.KegbotSite.get()
if kbsite.temperature_display_units == "f":
unit = "F"
amount = CtoF(amount)
return self.TEMPLATE % {"amount": amount, "unit": unit}
# volume
@register.tag("volume")
def volumetag(parser, token):
"""{% volume <amount> %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return VolumeNode(tokens[1], tokens[2:])
class VolumeNode(Node):
TEMPLATE = """
<span class="hmeasure %(extra_css)s" title="%(title)s">
<span class="num">%(amount)s</span>
<span class="unit">%(units)s</span>
</span>""".strip()
def __init__(self, volume_varname, extra_args):
self._volume_varname = volume_varname
self._extra_args = extra_args
def render(self, context):
tv = Variable(self._volume_varname)
try:
num = float(tv.resolve(context))
except (VariableDoesNotExist, ValueError):
num = "unknown"
unit = "mL"
make_badge = "badge" in self._extra_args
return self.format(num, unit, make_badge)
@classmethod
def format(cls, amount, units, make_badge=False):
if amount < 0:
amount = 0
ctx = {
"units": units,
"amount": amount,
"title": "%s %s" % (amount, units),
"extra_css": "badge " if make_badge else "",
}
return cls.TEMPLATE % ctx
# drinker
@register.tag("drinker_name")
def drinker_name_tag(parser, token):
"""{% drinker_name <drink_or_user_obj> [nolink] %}"""
tokens = token.contents.split()
if len(tokens) < 2:
raise TemplateSyntaxError("%s requires at least 2 tokens" % tokens[0])
return DrinkerNameNode(tokens[1], tokens[2:])
class DrinkerNameNode(Node):
def __init__(self, drink_varname, extra_args):
self._varname = drink_varname
self._extra_args = extra_args
def render(self, context):
obj = Variable(self._varname)
try:
obj = obj.resolve(context)
except (VariableDoesNotExist, ValueError):
obj = None
user = None
if obj:
if isinstance(obj, models.Drink) or isinstance(obj, models.SystemEvent):
user = obj.user
elif isinstance(obj, models.User):
user = obj
if user:
if "nolink" in self._extra_args:
return user.get_full_name()
else:
return '<a href="%s">%s</a>' % (
reverse("kb-drinker", args=[user.username]),
user.get_full_name(),
)
return context["guest_info"]["name"]
# chart
@register.tag("chart")
def chart(parser, tokens):
"""{% chart <charttype> <obj> width height %}"""
tokens = tokens.contents.split()
if len(tokens) < 4:
raise TemplateSyntaxError("chart requires at least 4 arguments")
charttype = tokens[1]
try:
width = int(tokens[-2])
height = int(tokens[-1])
except ValueError:
raise TemplateSyntaxError("invalid width or height")
args = tokens[2:-2]
return ChartNode(charttype, width, height, args)
class ChartNode(Node):
CHART_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox"></div>
<script type="text/javascript">
var chart_%(chart_id)s;
$(document).ready(function() {
var chart_data = %(chart_data)s;
chart_%(chart_id)s = new Highcharts.Chart(chart_data);
});
</script>
<!-- end chart %(chart_id)s -->
"""
ERROR_TMPL = """
<!-- begin chart %(chart_id)s -->
<div id="chart-%(chart_id)s-container"
style="height: %(height)spx; width: %(width)spx;"
class="kb-chartbox-error">
%(error_str)s
</div>
<!-- end chart %(chart_id)s -->
"""
def __init__(self, charttype, width, height, args):
self._charttype = charttype
self._width = width
self._height = height
self._args = args
self._chart_fn = getattr(charts, "chart_%s" % (self._ | @register.inclusion_tag("kegweb/picture-gallery.html")
def gallery(picture_or_pictures, thumb_size="span2", gallery_id=""): | random_line_split | |
ddpg-per.py | kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
epsilon = 0.001 # small amount to avoid zero priority
alpha = 0.5 # [0~1] convert the importance of TD error to priority
beta = 0.5 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.01
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTree(capacity)
self.full_flag = False
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), \
np.empty((n, self.tree.data[0].size)), \
np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
if min_prob == 0:
min_prob = 0.00001
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob/min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
###############################DDPG####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound, train_dir="./ddpg_models", batch_size=32, MEMORY_SIZE=10000):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = Memory(capacity=MEMORY_SIZE)
self.pointer = 0
self.per_batch_size = batch_size
self.learn_step = 0
self.explore_noise = OU_noise(self.a_dim)
self.sess = tf.Session()
self.train_dir = train_dir
if not os.path.isdir(self.train_dir):
os.mkdir(self.train_dir)
self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')
self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params,
self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
# td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.abs_errors = tf.reduce_sum(tf.abs(q_target - q), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(q_target, q))
self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.loss, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s, with_noise):
action = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
if with_noise:
noise = self.explore_noise.add_noise(action)
action = action + noise
return action
def learn(self, actor_lr_input, critic_lr_input, per_flag=True):
if per_flag:
tree_idx, batch_memory, ISWeights = self.memory.sample(self.per_batch_size) # sample for learning
batch_states = batch_memory[:,0:3]
batch_actions = batch_memory[:,3:4]
batch_rewards = [data[4] for data in batch_memory]
batch_states_ = batch_memory[:,5:8]
bs = np.array(batch_states)
ba = np.array(batch_actions)
br = np.array(batch_rewards)
bs_ = np.array(batch_states_)
br = br[:, np.newaxis] # Move the original (n,) to the row and add a new column
self.sess.run(self.atrain, {self.S: bs, self.actor_lr: actor_lr_input})
_, abs_errors, cost = self.sess.run([self.ctrain, self.abs_errors, self.loss],
{self.S: bs, self.a: ba, self.R: br, self.S_: bs_,
self.critic_lr: critic_lr_input,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
self.learn_step += 1
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, r, s_))
self.memory.store(transition)
self.pointer += 1
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)
# new_actor_layer = tf.layers.dense(net, 20, activation=tf.nn.relu, name='new_actor_layer', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, scope, trainable):
with tf.vari | able_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
# new_critic_layer = tf.layers.dense(net, 300, activation=tf.nn.relu, name='new_critic_layer',
# trainable=trainable)
return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)
def up | identifier_body | |
ddpg-per.py | _pointer = 0
def __init__(self, capacity):
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = list(np.zeros(capacity, dtype=object)) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, transition):
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = transition # update data_frame
self.update(tree_idx, p) # update tree_frame
self.b = np.array(self.data)
self.data_pointer += 1
if self.data_pointer >= self.capacity: # replace when exceed the capacity
self.data_pointer = 0
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
epsilon = 0.001 # small amount to avoid zero priority
alpha = 0.5 # [0~1] convert the importance of TD error to priority
beta = 0.5 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.01
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTree(capacity)
self.full_flag = False
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), \
np.empty((n, self.tree.data[0].size)), \
np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
if min_prob == 0:
min_prob = 0.00001
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob/min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
###############################DDPG####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound, train_dir="./ddpg_models", batch_size=32, MEMORY_SIZE=10000):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = Memory(capacity=MEMORY_SIZE)
self.pointer = 0
self.per_batch_size = batch_size
self.learn_step = 0
self.explore_noise = OU_noise(self.a_dim)
self.sess = tf.Session()
self.train_dir = train_dir
if not os.path.isdir(self.train_dir):
os.mkdir(self.train_dir)
self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')
self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params,
self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
# td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.abs_errors = tf.reduce_sum(tf.abs(q_target - q), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(q_target, q))
self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.loss, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s, with_noise):
action = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
if with_noise:
noise = self.explore_noise.add_noise(action)
action = action + noise
return action
def learn(self, actor_lr_input, critic_lr_input, per_flag=True):
if per_flag:
tree_idx, batch_memory, ISWeights = self.memory.sample(self.per_batch_size) # sample for learning
batch_states = batch_memory[:,0:3]
batch_actions = batch_memory[:,3:4]
batch_rewards = [data[4] for data in batch_memory]
batch_states_ = batch_memory[:,5:8]
bs = np.array(batch_states)
ba = np.array(batch_actions)
br = np.array(batch_rewards)
bs_ = np.array(batch_states_)
br = br[:, np.newaxis] # Move the original (n,) to the row and add a new column
self.sess.run(self.atrain, {self.S: bs, self.actor_lr: actor_lr_input})
_, abs_errors, cost = self.sess.run([self.ctrain, self.abs_errors, self.loss],
{self.S: bs, self.a: ba, self.R: br, self.S_: bs_,
self.critic_lr: critic_lr_input,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
self.learn_step += 1
def store_transi | tion(self, s, a, | identifier_name | |
ddpg-per.py | transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), \
np.empty((n, self.tree.data[0].size)), \
np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
if min_prob == 0:
min_prob = 0.00001
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob/min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
###############################DDPG####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound, train_dir="./ddpg_models", batch_size=32, MEMORY_SIZE=10000):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = Memory(capacity=MEMORY_SIZE)
self.pointer = 0
self.per_batch_size = batch_size
self.learn_step = 0
self.explore_noise = OU_noise(self.a_dim)
self.sess = tf.Session()
self.train_dir = train_dir
if not os.path.isdir(self.train_dir):
os.mkdir(self.train_dir)
self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')
self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params,
self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
# td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.abs_errors = tf.reduce_sum(tf.abs(q_target - q), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(q_target, q))
self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.loss, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s, with_noise):
action = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
if with_noise:
noise = self.explore_noise.add_noise(action)
action = action + noise
return action
def learn(self, actor_lr_input, critic_lr_input, per_flag=True):
if per_flag:
tree_idx, batch_memory, ISWeights = self.memory.sample(self.per_batch_size) # sample for learning
batch_states = batch_memory[:,0:3]
batch_actions = batch_memory[:,3:4]
batch_rewards = [data[4] for data in batch_memory]
batch_states_ = batch_memory[:,5:8]
bs = np.array(batch_states)
ba = np.array(batch_actions)
br = np.array(batch_rewards)
bs_ = np.array(batch_states_)
br = br[:, np.newaxis] # Move the original (n,) to the row and add a new column
self.sess.run(self.atrain, {self.S: bs, self.actor_lr: actor_lr_input})
_, abs_errors, cost = self.sess.run([self.ctrain, self.abs_errors, self.loss],
{self.S: bs, self.a: ba, self.R: br, self.S_: bs_,
self.critic_lr: critic_lr_input,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
self.learn_step += 1
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, r, s_))
self.memory.store(transition)
self.pointer += 1
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)
# new_actor_layer = tf.layers.dense(net, 20, activation=tf.nn.relu, name='new_actor_layer', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
# new_critic_layer = tf.layers.dense(net, 300, activation=tf.nn.relu, name='new_critic_layer',
# trainable=trainable)
return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)
def update_target_q_network(self, episode):
# update target Q netowrk by soft_replace
if episode % REPLACE_TARGET_FREQ == 0:
self.sess.run(self.soft_replace)
# print('episode '+str(episode) +', target Q network params replaced!')
def load_network(self, saver, load_path):
checkpoint = tf.train.get_checkpoint_state(load_path)
if checkpoint and checkpoint.model_checkpoint_path:
# self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
saver.restore(self.sess, tf.train.latest_checkpoint(load_path))
print("Successfully loaded:", checkpoint.model_checkpoint_path)
self.learn_step = int(checkpoint.model_checkpoint_path.split('-')[-1])
else:
print("Could not find old network weights")
def save_network(self, time_step, saver, save_path):
saver.save(self.sess, save_path + 'network', global_step=time_step,
write_meta_graph=False)
############################### training ####################################
def main():
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high |
agent = DDPG(a_dim, s_dim, a_bound)
total_steps = 0
var = 3 | random_line_split | |
ddpg-per.py | + state, self.action_low, self.action_high)
class SumTree(object):
data_pointer = 0
def __init__(self, capacity):
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = list(np.zeros(capacity, dtype=object)) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, transition):
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = transition # update data_frame
self.update(tree_idx, p) # update tree_frame
self.b = np.array(self.data)
self.data_pointer += 1
if self.data_pointer >= self.capacity: # replace when exceed the capacity
self.data_pointer = 0
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
epsilon = 0.001 # small amount to avoid zero priority
alpha = 0.5 # [0~1] convert the importance of TD error to priority
beta = 0.5 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.01
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTree(capacity)
self.full_flag = False
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), \
np.empty((n, self.tree.data[0].size)), \
np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
if min_prob == 0:
min_prob = 0.00001
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob/min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
###############################DDPG####################################
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound, train_dir="./ddpg_models", batch_size=32, MEMORY_SIZE=10000):
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.memory = Memory(capacity=MEMORY_SIZE)
self.pointer = 0
self.per_batch_size = batch_size
self.learn_step = 0
self.explore_noise = OU_noise(self.a_dim)
self.sess = tf.Session()
self.train_dir = train_dir
if not os.path.isdir(self.train_dir):
os.mkdir(self.train_dir)
self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')
self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params,
self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
# td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.abs_errors = tf.reduce_sum(tf.abs(q_target - q), axis=1) # for updating Sumtree
self.loss = tf.reduce_mean(self.ISWeights * tf.squared_difference(q_target, q))
self.ctrain = tf.train.AdamOptimizer(self.critic_lr).minimize(self.loss, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.actor_lr).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s, with_noise):
action = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
if with_noise:
noise = self.explore_noise.add_noise(action)
action = action + noise
return action
def learn(self, actor_lr_input, critic_lr_input, per_flag=True):
if per_flag:
tree_idx, ba | tch_memory, ISWeights = self.memory.sample(self.per_batch_size) # sample for learning
batch_states = batch_memory[:,0:3]
batch_actions = batch_memory[:,3:4]
batch_rewards = [data[4] for data in batch_memory]
batch_states_ = batch_memory[:,5:8]
bs = np.array(batch_states)
ba = np.array(batch_actions)
br = np.array(batch_rewards)
bs_ = np.array(batch_states_)
br = br[:, np.newaxis] # Move the original (n,) to the row and add a new column
self.sess.run(self.atrain, {self.S: bs, self.actor_lr: actor_lr_input})
_, abs_errors, cost = self.sess.run([self.ctrain, self.abs_errors, self.loss],
{self.S: bs, self.a: ba, self.R: br, self.S_: bs_,
self.critic_lr: critic_lr_input,
self.ISWeights: ISWeights})
self.memory.batch_update(tree_idx, abs_errors) # update priority
| conditional_block | |
bot.js | client.join(client.CFG.JOIN_CHANNELS);
client.capReq(":twitch.tv/tags twitch.tv/commands twitch.tv/membership");
console.log("Bot: Ready!");
client.send("#supibot", "@Supinic I'm back MrDestructoid");
});
client.on("message", (evt) => {
const user = evt.user.getNick().toLowerCase();
const chan = evt.channel.getName();
const msg = evt.message;
const now = Date.now();
if (chan === "#supinic") {
SUPINIC_CHANNEL.message(
user,
msg,
client.CFG.USER_LEVELS[user] <= -1e6,
client.CFG.USER_LEVELS[user] < 1e6 && (client.GLOBAL_COOLDOWNS[chan] && now <= client.GLOBAL_COOLDOWNS[chan])
);
}
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
return;
}
// Declare AFK people as non AFK - silently, if necessary
checkAFK(user, chan, evt);
// If it's a stealth channel, skip everything
if (client.CFG.STEALTH_CHANNELS.indexOf(chan) !== -1) {
return;
}
// Mirror messages to discord, if it's a linked channel
if (chan === client.CFG.CHAN.CEREBOT && client.CFG.DISCORD_LINK_ENABLED) {
client.DiscordClient && client.DiscordClient.send(user, msg, chan, evt.tags);
}
// Return if global cooldown did not pass. Does not apply to supermods
if (client.CFG.USER_LEVELS[user] < 1e6 && (client.GLOBAL_COOLDOWNS[chan] && now <= client.GLOBAL_COOLDOWNS[chan])) {
return;
}
if (msg.indexOf("$debug") === 0 && client.CFG.USER_LEVELS[user] >= DEBUG.level) {
DEBUG.exec(user, msg.split("$debug")[1].split(" "), evt);
}
else if (msg === "bot" || msg.indexOf("!afk") === 0) {
let silent = false;
if (msg === "bot") {
evt.reply("smol bot made by @supinic supiniL my commands start with $ - try $help for a list of commands");
}
else if (msg.indexOf("!afk") === 0) {
silent = true;
AFK.exec(user, msg.split(" ").splice(1), evt, true);
}
if (!silent) {
client.GLOBAL_COOLDOWNS[chan] = client.GLOBAL_COOLDOWNS[chan] || 0;
client.GLOBAL_COOLDOWNS[chan] = now + (client.CFG.CHANNEL_GLOBAL_COOLDOWNS[chan] || client.CFG.DEFAULT_GLOBAL_COOLDOWN);
}
}
else if (chan === "#forsen" && (user === "forsenai" || user === "snusbot")) {
if (
(user === "forsenai" && msg.indexOf("forsenThink") !== -1) ||
(user === "snusbot" && msg.indexOf("question/hint/clue") !== -1)
) {
let query = msg
.replace(" forsenThink", "")
.replace(/.*clue is(.*)" OMGScoots(.*)/, "$1")
.replace(/ /g, "+");
let url = "http://www.j-archive.com/search.php?submit=Search&search=" + query;
request(url, (err, data, body) => {
let parsedData = body.match(/class="search_correct_response">(.*?)<\/span>/);
let answer = (parsedData && Utils.removeHTML(parsedData[1])) || null;
if (answer) {
client.latestTriviaAnswer = answer;
client.AUTO_TRIVIA && evt.reply(answer);
// console.log("[" + new Date().simpleDateTime() + "] Answer: ", answer);
}
else {
client.latestTriviaAnswer = "eShrug idk kev";
// console.log("idk");
}
});
}
}
else if (chan === "#forsen" && user === "gazatu2" && msg.has("question:")) {
const question = (msg.match(/question: (.*)/) || [])[1];
COMMANDS.autoGazatu(question)
.then(answer => {
if (client.AUTO_GAZ && answer) {
evt.reply(answer);
}
// console.log(`GAZATU TRIVIA [${answer || "<no answer found>"}] <- ${question}`);
})
.catch(err => console.log("[GAZATU TRIVIA ERROR] ", err));
}
});
client.on("command", (evt) => {
if (!evt.channel) {
console.log("An event with no channel?", evt);
return;
}
const args = (evt.args || []).map(i => i.replace(new RegExp(client.CFG.BAN_EVASION_CHARACTER, "g"), "").trim());
const cmd = evt.cmd.toLowerCase(); // @todo remove this, it is just temporary
const chan = evt.channel.getName().toLowerCase();
const user = evt.user.getNick().toLowerCase();
const now = Date.now();
client.CFG.USER_LEVELS[user] = client.CFG.USER_LEVELS[user] || 1;
const command = COMMANDS.find(i =>
cmd === i.name.toLowerCase() || (Array.isArray(i.aliases) && i.aliases.some(j => cmd === j.toLowerCase()))
);
if (!command) {
return;
}
// Skip own commands, if that would ever happen for some reason.
if (user === "supibot") return;
console.log(`CMD REQUEST (${chan}) [${new Date().simpleDateTime()}] <${user}>: ${client.CFG.COMMAND_PREFIX}${cmd} ${(args && args.join(" ")) || ""}`);
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
console.log("CMD REQUEST FAILED - BANNED");
return;
}
// Declare AFK people as non AFK - silently, if necessary
// checkAFK(user, chan, evt);
// If it's a stealth channel, skip everything
if (client.CFG.STEALTH_CHANNELS.indexOf(chan) !== -1) {
console.log("CMD REQUEST FAILED - STEALTH CHANNEL");
return;
}
// Skip if global cooldown hasn't passed yet. Doesn't apply to supermods.
// Also doesn't apply to read-only commands, those never reply - no global cooldown is needed.
if (!command.readOnly && client.CFG.USER_LEVELS[user] < 1e6 && now <= client.GLOBAL_COOLDOWNS[chan]) {
console.log("CMD REQUEST FAILED - GLOBAL COOLDOWN", (client.GLOBAL_COOLDOWNS[chan] - now));
return;
}
client.USER_COOLDOWNS[user] = client.USER_COOLDOWNS[user] || {};
// Skip execution if the user cooldown isn't expired
if (client.CFG.USER_LEVELS[user] < 1e6 && now <= client.USER_COOLDOWNS[user][command.name]) {
const time = (client.USER_COOLDOWNS[user][command.name] - now) / 1000;
client.send("#supibot",
".w " + user + " " +
"Your cooldown for " + client.CFG.COMMAND_PREFIX + cmd + " " +
"has not expired yet: " + time + " seconds remaining."
);
console.log("CMD REQUEST FAILED - USER COOLDOWN", (client.USER_COOLDOWNS[user][command.name] - now));
return;
}
// Set the global cooldown in all cases
client.GLOBAL_COOLDOWNS[chan] = now + (client.CFG.CHANNEL_GLOBAL_COOLDOWNS[chan] || client.CFG.DEFAULT_GLOBAL_COOLDOWN);
const msgLimit = client.CFG.CHANNEL_MSG_LIMIT[chan] || client.CFG.DEFAULT_MSG_LIMIT || 450;
// If it's a protected channel, pajbot-check it. This is done by overwriting the reply function with a call to the snusbot API, and checking its result
if (client.CFG.PAJLADIFIED_CHANNELS.indexOf(chan) !== -1) {
evt.reply = pajladify(user, chan, msgLimit, command).bind(evt);
}
// If it isn't, modify the reply function so that we always send the ban-evasion character and do some basic banphrase checking.
else {
evt.reply = (function (msg) {
const isDiscord = (Object.values(client.CFG.DISCORD_LINK).indexOf(chan) !== -1);
let ping = "";
if (client.CFG.PING_CHANNELS.has(chan) && !client.CFG.PING_EXCLUDED_COMMANDS.has(command.name)) {
ping = user + ", ";
}
client.BAN_EVASION_FLAGS[chan] = !client.BAN_EVASION_FLAGS[chan];
msg = ping
+ msg + " " | random_line_split | ||
bot.js | }
// Declare AFK people as non AFK - silently, if necessary
checkAFK(user, chan, evt);
// If it's a stealth channel, skip everything
if (client.CFG.STEALTH_CHANNELS.indexOf(chan) !== -1) {
return;
}
// Mirror messages to discord, if it's a linked channel
if (chan === client.CFG.CHAN.CEREBOT && client.CFG.DISCORD_LINK_ENABLED) {
client.DiscordClient && client.DiscordClient.send(user, msg, chan, evt.tags);
}
// Return if global cooldown did not pass. Does not apply to supermods
if (client.CFG.USER_LEVELS[user] < 1e6 && (client.GLOBAL_COOLDOWNS[chan] && now <= client.GLOBAL_COOLDOWNS[chan])) {
return;
}
if (msg.indexOf("$debug") === 0 && client.CFG.USER_LEVELS[user] >= DEBUG.level) {
DEBUG.exec(user, msg.split("$debug")[1].split(" "), evt);
}
else if (msg === "bot" || msg.indexOf("!afk") === 0) {
let silent = false;
if (msg === "bot") {
evt.reply("smol bot made by @supinic supiniL my commands start with $ - try $help for a list of commands");
}
else if (msg.indexOf("!afk") === 0) {
silent = true;
AFK.exec(user, msg.split(" ").splice(1), evt, true);
}
if (!silent) {
client.GLOBAL_COOLDOWNS[chan] = client.GLOBAL_COOLDOWNS[chan] || 0;
client.GLOBAL_COOLDOWNS[chan] = now + (client.CFG.CHANNEL_GLOBAL_COOLDOWNS[chan] || client.CFG.DEFAULT_GLOBAL_COOLDOWN);
}
}
else if (chan === "#forsen" && (user === "forsenai" || user === "snusbot")) {
if (
(user === "forsenai" && msg.indexOf("forsenThink") !== -1) ||
(user === "snusbot" && msg.indexOf("question/hint/clue") !== -1)
) {
let query = msg
.replace(" forsenThink", "")
.replace(/.*clue is(.*)" OMGScoots(.*)/, "$1")
.replace(/ /g, "+");
let url = "http://www.j-archive.com/search.php?submit=Search&search=" + query;
request(url, (err, data, body) => {
let parsedData = body.match(/class="search_correct_response">(.*?)<\/span>/);
let answer = (parsedData && Utils.removeHTML(parsedData[1])) || null;
if (answer) {
client.latestTriviaAnswer = answer;
client.AUTO_TRIVIA && evt.reply(answer);
// console.log("[" + new Date().simpleDateTime() + "] Answer: ", answer);
}
else {
client.latestTriviaAnswer = "eShrug idk kev";
// console.log("idk");
}
});
}
}
else if (chan === "#forsen" && user === "gazatu2" && msg.has("question:")) {
const question = (msg.match(/question: (.*)/) || [])[1];
COMMANDS.autoGazatu(question)
.then(answer => {
if (client.AUTO_GAZ && answer) {
evt.reply(answer);
}
// console.log(`GAZATU TRIVIA [${answer || "<no answer found>"}] <- ${question}`);
})
.catch(err => console.log("[GAZATU TRIVIA ERROR] ", err));
}
});
client.on("command", (evt) => {
if (!evt.channel) {
console.log("An event with no channel?", evt);
return;
}
const args = (evt.args || []).map(i => i.replace(new RegExp(client.CFG.BAN_EVASION_CHARACTER, "g"), "").trim());
const cmd = evt.cmd.toLowerCase(); // @todo remove this, it is just temporary
const chan = evt.channel.getName().toLowerCase();
const user = evt.user.getNick().toLowerCase();
const now = Date.now();
client.CFG.USER_LEVELS[user] = client.CFG.USER_LEVELS[user] || 1;
const command = COMMANDS.find(i =>
cmd === i.name.toLowerCase() || (Array.isArray(i.aliases) && i.aliases.some(j => cmd === j.toLowerCase()))
);
if (!command) {
return;
}
// Skip own commands, if that would ever happen for some reason.
if (user === "supibot") return;
console.log(`CMD REQUEST (${chan}) [${new Date().simpleDateTime()}] <${user}>: ${client.CFG.COMMAND_PREFIX}${cmd} ${(args && args.join(" ")) || ""}`);
// Skip banned users
if (client.CFG.USER_LEVELS[user] <= -1e6) {
console.log("CMD REQUEST FAILED - BANNED");
return;
}
// Declare AFK people as non AFK - silently, if necessary
// checkAFK(user, chan, evt);
// If it's a stealth channel, skip everything
if (client.CFG.STEALTH_CHANNELS.indexOf(chan) !== -1) {
console.log("CMD REQUEST FAILED - STEALTH CHANNEL");
return;
}
// Skip if global cooldown hasn't passed yet. Doesn't apply to supermods.
// Also doesn't apply to read-only commands, those never reply - no global cooldown is needed.
if (!command.readOnly && client.CFG.USER_LEVELS[user] < 1e6 && now <= client.GLOBAL_COOLDOWNS[chan]) {
console.log("CMD REQUEST FAILED - GLOBAL COOLDOWN", (client.GLOBAL_COOLDOWNS[chan] - now));
return;
}
client.USER_COOLDOWNS[user] = client.USER_COOLDOWNS[user] || {};
// Skip execution if the user cooldown isn't expired
if (client.CFG.USER_LEVELS[user] < 1e6 && now <= client.USER_COOLDOWNS[user][command.name]) {
const time = (client.USER_COOLDOWNS[user][command.name] - now) / 1000;
client.send("#supibot",
".w " + user + " " +
"Your cooldown for " + client.CFG.COMMAND_PREFIX + cmd + " " +
"has not expired yet: " + time + " seconds remaining."
);
console.log("CMD REQUEST FAILED - USER COOLDOWN", (client.USER_COOLDOWNS[user][command.name] - now));
return;
}
// Set the global cooldown in all cases
client.GLOBAL_COOLDOWNS[chan] = now + (client.CFG.CHANNEL_GLOBAL_COOLDOWNS[chan] || client.CFG.DEFAULT_GLOBAL_COOLDOWN);
const msgLimit = client.CFG.CHANNEL_MSG_LIMIT[chan] || client.CFG.DEFAULT_MSG_LIMIT || 450;
// If it's a protected channel, pajbot-check it. This is done by overwriting the reply function with a call to the snusbot API, and checking its result
if (client.CFG.PAJLADIFIED_CHANNELS.indexOf(chan) !== -1) {
evt.reply = pajladify(user, chan, msgLimit, command).bind(evt);
}
// If it isn't, modify the reply function so that we always send the ban-evasion character and do some basic banphrase checking.
else {
evt.reply = (function (msg) {
const isDiscord = (Object.values(client.CFG.DISCORD_LINK).indexOf(chan) !== -1);
let ping = "";
if (client.CFG.PING_CHANNELS.has(chan) && !client.CFG.PING_EXCLUDED_COMMANDS.has(command.name)) {
ping = user + ", ";
}
client.BAN_EVASION_FLAGS[chan] = !client.BAN_EVASION_FLAGS[chan];
msg = ping
+ msg + " "
+ (client.BAN_EVASION_FLAGS[chan] ? client.CFG.BAN_EVASION_CHARACTER : "");
if (Utils.globalCheck(msg, client.CFG.GLOBAL_BANPHRASES)) {
for (const phrase of client.CFG.GLOBAL_BANPHRASES) {
msg = msg.replace(new RegExp(phrase, "gi"), "[REDACTED]");
}
}
this._reply("send", Utils.safeWrap(msg, msgLimit));
(isDiscord) && setTimeout(() =>
client.DiscordClient.send(user, " used " + client.CFG.COMMAND_PREFIX + cmd + ": " + msg, chan, evt.tags),
500
);
}).bind(evt);
}
if (args.join(" ").length > 400) {
ev | t.reply(":z message too long.");
console.log("CMD REQUEST FAILED - MESSAGE TOO LONG", args.join(" ").length);
}
else | conditional_block | |
jpt_location.py | plot_grid
from pycram.plan_failures import PlanFailure
class JPTCostmapLocation(pycram.designators.location_designator.CostmapLocation):
"""Costmap Locations using Joint Probability Trees (JPTs).
JPT costmaps are trained to model the dependency with a robot position relative to the object, the robots type,
the objects type, the robot torso height, and the grasp parameters.
Solutions to the problem definitions are chosen in such a way that the success probability is highest.
"""
@dataclasses.dataclass
class Location(pycram.designators.location_designator.LocationDesignatorDescription.Location):
pose: Tuple[List[float], List[float]]
reachable_arm: str
torso_height: float
grasp: str
def __init__(self, target, reachable_for=None, reachable_arm=None,
model: Optional[jpt.trees.JPT] = None, path: Optional[str] = None, resolver=None):
"""
Create a JPT Costmap
:param target: The target object
:param reachable_for: The robot to grab the object with
:param reachable_arm: The arm to use
:param model: The JPT model as a loaded tree in memory, either model or path must be set
:param path: The path to the JPT model, either model or path must be set
"""
super().__init__(target, reachable_for, None, reachable_arm, resolver)
# check if arguments are plausible
if (not model and not path) or (model and path):
raise ValueError("Either model or path must be set.")
# set model
if model:
self.model = model
# load model from path
if path:
self.model = jpt.trees.JPT.load(path)
# initialize member for visualized objects
self.visual_ids: List[int] = []
def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:
"""
Create a list of boxes that can be used as evidences for a jpt. The list of boxes describe areas where the
robot can stand.
:return: List of evidences describing the found boxes
"""
# create Occupancy costmap for the target object
position, orientation = self.target.get_position_and_orientation()
position = list(position)
position[-1] = 0
ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,
origin=(position, orientation))
# ocm.visualize()
# working on a copy of the costmap, since found rectangles are deleted
map = np.copy(ocm.map)
# initialize result
queries = []
origin = np.array([ocm.height/2, ocm.width/2])
# for every index pair (i, j) in the occupancy map
for i in range(0, map.shape[0]):
for j in range(0, map.shape[1]):
# if this index has not been used yet
if map[i][j] > 0:
# get consecutive box
width = ocm._find_consectuive_line((i, j), map)
height = ocm._find_max_box_height((i, j), width, map)
# mark box as used
map[i:i+height, j:j+width] = 0
# calculate to coordinates relative to the objects pose
pose = np.array([i, j])
lower_corner = (pose - origin) * ocm.resolution
upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution
rectangle = np.array([lower_corner, upper_corner]).T
# transform to jpt query
query = self.model.bind({"x": list(rectangle[0]), "y": list(rectangle[1])})
queries.append(query)
return queries
def create_evidence(self, use_success=True) -> jpt.variables.LabelAssignment:
"""
Create evidence usable for JPTs where type and status are set if wanted.
:param use_success: Rather to set success or not
:return: The usable label-assignment
"""
evidence = dict()
evidence["type"] = {self.target.type}
if use_success:
evidence["status"] = {"SUCCEEDED"}
return self.model.bind(evidence)
def sample(self, amount: int = 1) -> np.ndarray:
"""
Sample from the locations that fit the CostMap and are not occupied.
:param amount: The amount of samples to draw
:return: A numpy array containing the samples drawn from the tree.
"""
evidence = self.create_evidence()
locations = self.evidence_from_occupancy_costmap()
solutions = []
for location in locations:
for variable, value in evidence.items():
location[variable] = value
for leaf in self.model.apply(location):
if leaf.probability(location) == 0:
continue
altered_leaf = leaf.conditional_leaf(location)
success_probability = altered_leaf.probability(location)
_, mpe_state = altered_leaf.mpe(self.model.minimal_distances)
location["grasp"] = mpe_state["grasp"]
location["arm"] = mpe_state["arm"]
location["relative torso height"] = mpe_state["relative torso height"]
location["x"] = mpe_state["x"]
location["y"] = mpe_state["y"]
solutions.append((location, success_probability, leaf.prior))
solutions = sorted(solutions, key=lambda x: x[1], reverse=True)
best_solution = solutions[0]
conditional_model = self.model.conditional_jpt(best_solution[0])
# conditional_model.plot(plotvars=conditional_model.variables)
return conditional_model.sample(amount)
def | (self, sample: np.ndarray) -> Location:
"""
Convert a numpy array sampled from the JPT to a costmap-location
:param sample: The drawn sample
:return: The usable costmap-location
"""
sample_dict = {variable.name: value for variable, value in zip(self.model.variables, sample)}
target_x, target_y, target_z = self.target.pose
pose = [target_x + sample_dict["x"], target_y + sample_dict["y"], 0]
angle = np.arctan2(pose[1] - target_y, pose[0] - target_x) + np.pi
orientation = list(tf.transformations.quaternion_from_euler(0, 0, angle, axes="sxyz"))
torso_height = np.clip(target_z - sample_dict["relative torso height"], 0, 0.33)
result = self.Location((pose, orientation), sample_dict["arm"], torso_height, sample_dict["grasp"])
return result
def __iter__(self):
samples = self.sample(200)
for sample in samples:
yield self.sample_to_location(sample)
def visualize(self):
"""
Plot the possible areas to stand in the BulletWorld. The opacity is the probability of success.
"""
evidence = self.create_evidence(use_success=False)
conditional_model = self.model.conditional_jpt(evidence)
for leaf in conditional_model.leaves.values():
success = leaf.distributions["status"].p({"SUCCEEDED"})
if success == 0:
continue
x_intervals = leaf.distributions["x"].cdf.intervals
y_intervals = leaf.distributions["y"].cdf.intervals
x_range = np.array([x_intervals[0].upper, x_intervals[-1].lower])
y_range = np.array([y_intervals[0].upper, y_intervals[-1].lower])
center = np.array([sum(x_range) / 2, sum(y_range) / 2])
visual = pybullet.createVisualShape(pybullet.GEOM_BOX,
halfExtents=[(x_range[1] - x_range[0]) / 2,
(y_range[1] - y_range[0]) / 2, 0.001],
rgbaColor=[1, 0, 0, success],
visualFramePosition=[*center, 0])
self.visual_ids.append(visual)
for id_list in np.array_split(np.array(self.visual_ids), np.ceil(len(self.visual_ids) / 127)):
# Dummy paramater since these are needed to spawn visual shapes as a multibody.
link_poses = [[0, 0, 0] for c in id_list]
link_orientations = [[0, 0, 0, 1] for c in id_list]
link_masses = [1.0 for c in id_list]
link_parent = [0 for c in id_list]
link_joints = [pybullet.JOINT_FIXED for c in id_list]
link_collision = [-1 for c in id_list]
link_joint_axis = [[1, 0, 0] for c in id_list]
# The position at which the multibody will be spawned. Offset such that
# the origin referes to the centre of the costmap.
origin_pose = self.target.get_position_and_orientation()
base_position = list(origin_pose[0])
base_position[2] = 0
map_obj = pybullet.createMultiBody(baseVisualShapeIndex=-1, linkVisualShapeIndices=id_list,
basePosition=base_position, baseOrientation | sample_to_location | identifier_name |
jpt_location.py | plot_grid
from pycram.plan_failures import PlanFailure
class JPTCostmapLocation(pycram.designators.location_designator.CostmapLocation):
"""Costmap Locations using Joint Probability Trees (JPTs).
JPT costmaps are trained to model the dependency with a robot position relative to the object, the robots type,
the objects type, the robot torso height, and the grasp parameters.
Solutions to the problem definitions are chosen in such a way that the success probability is highest.
"""
@dataclasses.dataclass
class Location(pycram.designators.location_designator.LocationDesignatorDescription.Location):
pose: Tuple[List[float], List[float]]
reachable_arm: str
torso_height: float
grasp: str
def __init__(self, target, reachable_for=None, reachable_arm=None,
model: Optional[jpt.trees.JPT] = None, path: Optional[str] = None, resolver=None):
"""
Create a JPT Costmap
:param target: The target object
:param reachable_for: The robot to grab the object with
:param reachable_arm: The arm to use
:param model: The JPT model as a loaded tree in memory, either model or path must be set
:param path: The path to the JPT model, either model or path must be set
"""
super().__init__(target, reachable_for, None, reachable_arm, resolver)
# check if arguments are plausible
if (not model and not path) or (model and path):
raise ValueError("Either model or path must be set.")
# set model
if model:
|
# load model from path
if path:
self.model = jpt.trees.JPT.load(path)
# initialize member for visualized objects
self.visual_ids: List[int] = []
def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:
"""
Create a list of boxes that can be used as evidences for a jpt. The list of boxes describe areas where the
robot can stand.
:return: List of evidences describing the found boxes
"""
# create Occupancy costmap for the target object
position, orientation = self.target.get_position_and_orientation()
position = list(position)
position[-1] = 0
ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,
origin=(position, orientation))
# ocm.visualize()
# working on a copy of the costmap, since found rectangles are deleted
map = np.copy(ocm.map)
# initialize result
queries = []
origin = np.array([ocm.height/2, ocm.width/2])
# for every index pair (i, j) in the occupancy map
for i in range(0, map.shape[0]):
for j in range(0, map.shape[1]):
# if this index has not been used yet
if map[i][j] > 0:
# get consecutive box
width = ocm._find_consectuive_line((i, j), map)
height = ocm._find_max_box_height((i, j), width, map)
# mark box as used
map[i:i+height, j:j+width] = 0
# calculate to coordinates relative to the objects pose
pose = np.array([i, j])
lower_corner = (pose - origin) * ocm.resolution
upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution
rectangle = np.array([lower_corner, upper_corner]).T
# transform to jpt query
query = self.model.bind({"x": list(rectangle[0]), "y": list(rectangle[1])})
queries.append(query)
return queries
def create_evidence(self, use_success=True) -> jpt.variables.LabelAssignment:
"""
Create evidence usable for JPTs where type and status are set if wanted.
:param use_success: Rather to set success or not
:return: The usable label-assignment
"""
evidence = dict()
evidence["type"] = {self.target.type}
if use_success:
evidence["status"] = {"SUCCEEDED"}
return self.model.bind(evidence)
def sample(self, amount: int = 1) -> np.ndarray:
"""
Sample from the locations that fit the CostMap and are not occupied.
:param amount: The amount of samples to draw
:return: A numpy array containing the samples drawn from the tree.
"""
evidence = self.create_evidence()
locations = self.evidence_from_occupancy_costmap()
solutions = []
for location in locations:
for variable, value in evidence.items():
location[variable] = value
for leaf in self.model.apply(location):
if leaf.probability(location) == 0:
continue
altered_leaf = leaf.conditional_leaf(location)
success_probability = altered_leaf.probability(location)
_, mpe_state = altered_leaf.mpe(self.model.minimal_distances)
location["grasp"] = mpe_state["grasp"]
location["arm"] = mpe_state["arm"]
location["relative torso height"] = mpe_state["relative torso height"]
location["x"] = mpe_state["x"]
location["y"] = mpe_state["y"]
solutions.append((location, success_probability, leaf.prior))
solutions = sorted(solutions, key=lambda x: x[1], reverse=True)
best_solution = solutions[0]
conditional_model = self.model.conditional_jpt(best_solution[0])
# conditional_model.plot(plotvars=conditional_model.variables)
return conditional_model.sample(amount)
def sample_to_location(self, sample: np.ndarray) -> Location:
"""
Convert a numpy array sampled from the JPT to a costmap-location
:param sample: The drawn sample
:return: The usable costmap-location
"""
sample_dict = {variable.name: value for variable, value in zip(self.model.variables, sample)}
target_x, target_y, target_z = self.target.pose
pose = [target_x + sample_dict["x"], target_y + sample_dict["y"], 0]
angle = np.arctan2(pose[1] - target_y, pose[0] - target_x) + np.pi
orientation = list(tf.transformations.quaternion_from_euler(0, 0, angle, axes="sxyz"))
torso_height = np.clip(target_z - sample_dict["relative torso height"], 0, 0.33)
result = self.Location((pose, orientation), sample_dict["arm"], torso_height, sample_dict["grasp"])
return result
def __iter__(self):
samples = self.sample(200)
for sample in samples:
yield self.sample_to_location(sample)
def visualize(self):
"""
Plot the possible areas to stand in the BulletWorld. The opacity is the probability of success.
"""
evidence = self.create_evidence(use_success=False)
conditional_model = self.model.conditional_jpt(evidence)
for leaf in conditional_model.leaves.values():
success = leaf.distributions["status"].p({"SUCCEEDED"})
if success == 0:
continue
x_intervals = leaf.distributions["x"].cdf.intervals
y_intervals = leaf.distributions["y"].cdf.intervals
x_range = np.array([x_intervals[0].upper, x_intervals[-1].lower])
y_range = np.array([y_intervals[0].upper, y_intervals[-1].lower])
center = np.array([sum(x_range) / 2, sum(y_range) / 2])
visual = pybullet.createVisualShape(pybullet.GEOM_BOX,
halfExtents=[(x_range[1] - x_range[0]) / 2,
(y_range[1] - y_range[0]) / 2, 0.001],
rgbaColor=[1, 0, 0, success],
visualFramePosition=[*center, 0])
self.visual_ids.append(visual)
for id_list in np.array_split(np.array(self.visual_ids), np.ceil(len(self.visual_ids) / 127)):
# Dummy paramater since these are needed to spawn visual shapes as a multibody.
link_poses = [[0, 0, 0] for c in id_list]
link_orientations = [[0, 0, 0, 1] for c in id_list]
link_masses = [1.0 for c in id_list]
link_parent = [0 for c in id_list]
link_joints = [pybullet.JOINT_FIXED for c in id_list]
link_collision = [-1 for c in id_list]
link_joint_axis = [[1, 0, 0] for c in id_list]
# The position at which the multibody will be spawned. Offset such that
# the origin referes to the centre of the costmap.
origin_pose = self.target.get_position_and_orientation()
base_position = list(origin_pose[0])
base_position[2] = 0
map_obj = pybullet.createMultiBody(baseVisualShapeIndex=-1, linkVisualShapeIndices=id_list,
basePosition=base_position, baseOrientation= | self.model = model | conditional_block |
jpt_location.py | plot_grid
from pycram.plan_failures import PlanFailure
class JPTCostmapLocation(pycram.designators.location_designator.CostmapLocation):
"""Costmap Locations using Joint Probability Trees (JPTs).
JPT costmaps are trained to model the dependency with a robot position relative to the object, the robots type,
the objects type, the robot torso height, and the grasp parameters.
Solutions to the problem definitions are chosen in such a way that the success probability is highest.
"""
@dataclasses.dataclass
class Location(pycram.designators.location_designator.LocationDesignatorDescription.Location):
pose: Tuple[List[float], List[float]]
reachable_arm: str
torso_height: float
grasp: str
def __init__(self, target, reachable_for=None, reachable_arm=None,
model: Optional[jpt.trees.JPT] = None, path: Optional[str] = None, resolver=None):
"""
Create a JPT Costmap
:param target: The target object
:param reachable_for: The robot to grab the object with
:param reachable_arm: The arm to use
:param model: The JPT model as a loaded tree in memory, either model or path must be set
:param path: The path to the JPT model, either model or path must be set
"""
super().__init__(target, reachable_for, None, reachable_arm, resolver)
# check if arguments are plausible
if (not model and not path) or (model and path):
raise ValueError("Either model or path must be set.")
# set model
if model:
self.model = model
# load model from path
if path:
self.model = jpt.trees.JPT.load(path)
# initialize member for visualized objects
self.visual_ids: List[int] = []
def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:
"""
Create a list of boxes that can be used as evidences for a jpt. The list of boxes describe areas where the
robot can stand.
:return: List of evidences describing the found boxes
"""
# create Occupancy costmap for the target object
position, orientation = self.target.get_position_and_orientation()
position = list(position)
position[-1] = 0
ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,
origin=(position, orientation))
# ocm.visualize()
# working on a copy of the costmap, since found rectangles are deleted
map = np.copy(ocm.map)
# initialize result
queries = []
origin = np.array([ocm.height/2, ocm.width/2])
# for every index pair (i, j) in the occupancy map
for i in range(0, map.shape[0]):
for j in range(0, map.shape[1]):
# if this index has not been used yet
if map[i][j] > 0:
# get consecutive box
width = ocm._find_consectuive_line((i, j), map)
height = ocm._find_max_box_height((i, j), width, map)
# mark box as used
map[i:i+height, j:j+width] = 0
# calculate to coordinates relative to the objects pose
pose = np.array([i, j])
lower_corner = (pose - origin) * ocm.resolution
upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution
rectangle = np.array([lower_corner, upper_corner]).T
# transform to jpt query
query = self.model.bind({"x": list(rectangle[0]), "y": list(rectangle[1])})
queries.append(query)
return queries
def create_evidence(self, use_success=True) -> jpt.variables.LabelAssignment:
|
def sample(self, amount: int = 1) -> np.ndarray:
"""
Sample from the locations that fit the CostMap and are not occupied.
:param amount: The amount of samples to draw
:return: A numpy array containing the samples drawn from the tree.
"""
evidence = self.create_evidence()
locations = self.evidence_from_occupancy_costmap()
solutions = []
for location in locations:
for variable, value in evidence.items():
location[variable] = value
for leaf in self.model.apply(location):
if leaf.probability(location) == 0:
continue
altered_leaf = leaf.conditional_leaf(location)
success_probability = altered_leaf.probability(location)
_, mpe_state = altered_leaf.mpe(self.model.minimal_distances)
location["grasp"] = mpe_state["grasp"]
location["arm"] = mpe_state["arm"]
location["relative torso height"] = mpe_state["relative torso height"]
location["x"] = mpe_state["x"]
location["y"] = mpe_state["y"]
solutions.append((location, success_probability, leaf.prior))
solutions = sorted(solutions, key=lambda x: x[1], reverse=True)
best_solution = solutions[0]
conditional_model = self.model.conditional_jpt(best_solution[0])
# conditional_model.plot(plotvars=conditional_model.variables)
return conditional_model.sample(amount)
def sample_to_location(self, sample: np.ndarray) -> Location:
"""
Convert a numpy array sampled from the JPT to a costmap-location
:param sample: The drawn sample
:return: The usable costmap-location
"""
sample_dict = {variable.name: value for variable, value in zip(self.model.variables, sample)}
target_x, target_y, target_z = self.target.pose
pose = [target_x + sample_dict["x"], target_y + sample_dict["y"], 0]
angle = np.arctan2(pose[1] - target_y, pose[0] - target_x) + np.pi
orientation = list(tf.transformations.quaternion_from_euler(0, 0, angle, axes="sxyz"))
torso_height = np.clip(target_z - sample_dict["relative torso height"], 0, 0.33)
result = self.Location((pose, orientation), sample_dict["arm"], torso_height, sample_dict["grasp"])
return result
def __iter__(self):
samples = self.sample(200)
for sample in samples:
yield self.sample_to_location(sample)
def visualize(self):
"""
Plot the possible areas to stand in the BulletWorld. The opacity is the probability of success.
"""
evidence = self.create_evidence(use_success=False)
conditional_model = self.model.conditional_jpt(evidence)
for leaf in conditional_model.leaves.values():
success = leaf.distributions["status"].p({"SUCCEEDED"})
if success == 0:
continue
x_intervals = leaf.distributions["x"].cdf.intervals
y_intervals = leaf.distributions["y"].cdf.intervals
x_range = np.array([x_intervals[0].upper, x_intervals[-1].lower])
y_range = np.array([y_intervals[0].upper, y_intervals[-1].lower])
center = np.array([sum(x_range) / 2, sum(y_range) / 2])
visual = pybullet.createVisualShape(pybullet.GEOM_BOX,
halfExtents=[(x_range[1] - x_range[0]) / 2,
(y_range[1] - y_range[0]) / 2, 0.001],
rgbaColor=[1, 0, 0, success],
visualFramePosition=[*center, 0])
self.visual_ids.append(visual)
for id_list in np.array_split(np.array(self.visual_ids), np.ceil(len(self.visual_ids) / 127)):
# Dummy paramater since these are needed to spawn visual shapes as a multibody.
link_poses = [[0, 0, 0] for c in id_list]
link_orientations = [[0, 0, 0, 1] for c in id_list]
link_masses = [1.0 for c in id_list]
link_parent = [0 for c in id_list]
link_joints = [pybullet.JOINT_FIXED for c in id_list]
link_collision = [-1 for c in id_list]
link_joint_axis = [[1, 0, 0] for c in id_list]
# The position at which the multibody will be spawned. Offset such that
# the origin referes to the centre of the costmap.
origin_pose = self.target.get_position_and_orientation()
base_position = list(origin_pose[0])
base_position[2] = 0
map_obj = pybullet.createMultiBody(baseVisualShapeIndex=-1, linkVisualShapeIndices=id_list,
basePosition=base_position, baseOrientation | """
Create evidence usable for JPTs where type and status are set if wanted.
:param use_success: Rather to set success or not
:return: The usable label-assignment
"""
evidence = dict()
evidence["type"] = {self.target.type}
if use_success:
evidence["status"] = {"SUCCEEDED"}
return self.model.bind(evidence) | identifier_body |
jpt_location.py | import pycram.designators.location_designator
import pycram.task
from pycram.costmaps import OccupancyCostmap, plot_grid
from pycram.plan_failures import PlanFailure
class JPTCostmapLocation(pycram.designators.location_designator.CostmapLocation):
"""Costmap Locations using Joint Probability Trees (JPTs).
JPT costmaps are trained to model the dependency with a robot position relative to the object, the robots type,
the objects type, the robot torso height, and the grasp parameters.
Solutions to the problem definitions are chosen in such a way that the success probability is highest.
"""
@dataclasses.dataclass
class Location(pycram.designators.location_designator.LocationDesignatorDescription.Location):
pose: Tuple[List[float], List[float]]
reachable_arm: str
torso_height: float
grasp: str
def __init__(self, target, reachable_for=None, reachable_arm=None,
model: Optional[jpt.trees.JPT] = None, path: Optional[str] = None, resolver=None):
"""
Create a JPT Costmap
:param target: The target object
:param reachable_for: The robot to grab the object with
:param reachable_arm: The arm to use
:param model: The JPT model as a loaded tree in memory, either model or path must be set
:param path: The path to the JPT model, either model or path must be set
"""
super().__init__(target, reachable_for, None, reachable_arm, resolver)
# check if arguments are plausible
if (not model and not path) or (model and path):
raise ValueError("Either model or path must be set.")
# set model
if model:
self.model = model
# load model from path
if path:
self.model = jpt.trees.JPT.load(path)
# initialize member for visualized objects
self.visual_ids: List[int] = []
def evidence_from_occupancy_costmap(self) -> List[jpt.variables.LabelAssignment]:
"""
Create a list of boxes that can be used as evidences for a jpt. The list of boxes describe areas where the
robot can stand.
:return: List of evidences describing the found boxes
"""
# create Occupancy costmap for the target object
position, orientation = self.target.get_position_and_orientation()
position = list(position)
position[-1] = 0
ocm = OccupancyCostmap(distance_to_obstacle=0.3, from_ros=False, size=200, resolution=0.02,
origin=(position, orientation))
# ocm.visualize()
# working on a copy of the costmap, since found rectangles are deleted
map = np.copy(ocm.map)
# initialize result
queries = []
origin = np.array([ocm.height/2, ocm.width/2])
# for every index pair (i, j) in the occupancy map
for i in range(0, map.shape[0]):
for j in range(0, map.shape[1]):
# if this index has not been used yet
if map[i][j] > 0:
# get consecutive box
width = ocm._find_consectuive_line((i, j), map)
height = ocm._find_max_box_height((i, j), width, map)
# mark box as used
map[i:i+height, j:j+width] = 0
# calculate to coordinates relative to the objects pose
pose = np.array([i, j])
lower_corner = (pose - origin) * ocm.resolution
upper_corner = (pose - origin + np.array([height, width])) * ocm.resolution
rectangle = np.array([lower_corner, upper_corner]).T
# transform to jpt query
query = self.model.bind({"x": list(rectangle[0]), "y": list(rectangle[1])})
queries.append(query)
return queries
def create_evidence(self, use_success=True) -> jpt.variables.LabelAssignment:
"""
Create evidence usable for JPTs where type and status are set if wanted.
:param use_success: Rather to set success or not
:return: The usable label-assignment
"""
evidence = dict()
evidence["type"] = {self.target.type}
if use_success:
evidence["status"] = {"SUCCEEDED"}
return self.model.bind(evidence)
def sample(self, amount: int = 1) -> np.ndarray:
"""
Sample from the locations that fit the CostMap and are not occupied.
:param amount: The amount of samples to draw
:return: A numpy array containing the samples drawn from the tree.
"""
evidence = self.create_evidence()
locations = self.evidence_from_occupancy_costmap()
solutions = []
for location in locations:
for variable, value in evidence.items():
location[variable] = value
for leaf in self.model.apply(location):
if leaf.probability(location) == 0:
continue
altered_leaf = leaf.conditional_leaf(location)
success_probability = altered_leaf.probability(location)
_, mpe_state = altered_leaf.mpe(self.model.minimal_distances)
location["grasp"] = mpe_state["grasp"]
location["arm"] = mpe_state["arm"]
location["relative torso height"] = mpe_state["relative torso height"]
location["x"] = mpe_state["x"]
location["y"] = mpe_state["y"]
solutions.append((location, success_probability, leaf.prior))
solutions = sorted(solutions, key=lambda x: x[1], reverse=True)
best_solution = solutions[0]
conditional_model = self.model.conditional_jpt(best_solution[0])
# conditional_model.plot(plotvars=conditional_model.variables)
return conditional_model.sample(amount)
def sample_to_location(self, sample: np.ndarray) -> Location:
"""
Convert a numpy array sampled from the JPT to a costmap-location
:param sample: The drawn sample
:return: The usable costmap-location
"""
sample_dict = {variable.name: value for variable, value in zip(self.model.variables, sample)}
target_x, target_y, target_z = self.target.pose
pose = [target_x + sample_dict["x"], target_y + sample_dict["y"], 0]
angle = np.arctan2(pose[1] - target_y, pose[0] - target_x) + np.pi
orientation = list(tf.transformations.quaternion_from_euler(0, 0, angle, axes="sxyz"))
torso_height = np.clip(target_z - sample_dict["relative torso height"], 0, 0.33)
result = self.Location((pose, orientation), sample_dict["arm"], torso_height, sample_dict["grasp"])
return result
def __iter__(self):
samples = self.sample(200)
for sample in samples:
yield self.sample_to_location(sample)
def visualize(self):
"""
Plot the possible areas to stand in the BulletWorld. The opacity is the probability of success.
"""
evidence = self.create_evidence(use_success=False)
conditional_model = self.model.conditional_jpt(evidence)
for leaf in conditional_model.leaves.values():
success = leaf.distributions["status"].p({"SUCCEEDED"})
if success == 0:
continue
x_intervals = leaf.distributions["x"].cdf.intervals
y_intervals = leaf.distributions["y"].cdf.intervals
x_range = np.array([x_intervals[0].upper, x_intervals[-1].lower])
y_range = np.array([y_intervals[0].upper, y_intervals[-1].lower])
center = np.array([sum(x_range) / 2, sum(y_range) / 2])
visual = pybullet.createVisualShape(pybullet.GEOM_BOX,
halfExtents=[(x_range[1] - x_range[0]) / 2,
(y_range[1] - y_range[0]) / 2, 0.001],
rgbaColor=[1, 0, 0, success],
visualFramePosition=[*center, 0])
self.visual_ids.append(visual)
for id_list in np.array_split(np.array(self.visual_ids), np.ceil(len(self.visual_ids) / 127)):
# Dummy paramater since these are needed to spawn visual shapes as a multibody.
link_poses = [[0, 0, 0] for c in id_list]
link_orientations = [[0, 0, 0, 1] for c in id_list]
link_masses = [1.0 for c in id_list]
link_parent = [0 for c in id_list]
link_joints = [pybullet.JOINT_FIXED for c in id_list]
link_collision = [-1 for c in id_list]
link_joint_axis = [[1, 0, 0] for c in id_list]
# The position at which the multibody will be spawned. Offset such that
# the origin referes to the centre of the costmap.
origin_pose = self.target.get_position_and_orientation()
base_position = list(origin_pose[0])
base_position | import numpy as np
import pybullet
import tf
| random_line_split | |
main.go | BookService := orderbook.NewService(db, addressRepository, liquidityPoolRepository, contextLogger)
return &Extender{
Metrics: metrics.New(),
env: env,
nodeApi: nodeApi,
blockService: block.NewBlockService(blockRepository, validatorRepository, broadcastService),
eventService: eventService,
blockRepository: blockRepository,
validatorService: validatorService,
transactionService: transaction.NewService(env, transactionRepository, addressRepository, validatorRepository, coinRepository, coinService, broadcastService, contextLogger, validatorService.GetUnbondSaverJobChannel(), liquidityPoolRepository, validatorService.GetMoveStakeJobChannel()),
addressService: addressService,
validatorRepository: validatorRepository,
balanceService: balanceService,
coinService: coinService,
broadcastService: broadcastService,
orderBookService: orderBookService,
chasingMode: false,
currentNodeHeight: 0,
startBlockHeight: nodeStatus.InitialHeight + 1,
log: contextLogger,
lpSnapshotChannel: make(chan *api_pb.BlockResponse),
lpWorkerChannel: make(chan *api_pb.BlockResponse),
orderBookChannel: make(chan *api_pb.BlockResponse),
}
}
func (ext *Extender) GetInfo() {
fmt.Printf("%s v%s\n", "Minter Explorer Extender", Version)
}
func (ext *Extender) Run() {
//check connections to node
_, err := ext.nodeApi.Status()
if err == nil {
err = ext.blockRepository.DeleteLastBlockData()
}
if err != nil {
ext.log.Fatal(err)
}
var height uint64
// ----- Workers -----
ext.runWorkers()
lastExplorerBlock, err := ext.blockRepository.GetLastFromDB()
if err != nil && err != pg.ErrNoRows {
ext.log.Fatal(err)
}
if lastExplorerBlock != nil {
height = lastExplorerBlock.ID + 1
ext.blockService.SetBlockCache(lastExplorerBlock)
} else {
height = ext.startBlockHeight
}
for {
eet := ExtenderElapsedTime{
Height: height,
GettingBlock: 0,
GettingEvents: 0,
HandleCoinsFromTransactions: 0,
HandleAddressesFromResponses: 0,
HandleBlockResponse: 0,
Total: 0,
}
start := time.Now()
//ext.findOutChasingMode(height)
//Pulling block data
countStart := time.Now()
blockResponse, err := ext.nodeApi.BlockExtended(height, true, true)
if err != nil {
grpcErr, ok := status.FromError(err)
if !ok {
ext.log.Error(err)
time.Sleep(2 * time.Second)
continue
}
if grpcErr.Message() == "Block not found" || grpcErr.Message() == "Block results not found" {
time.Sleep(2 * time.Second)
continue
}
ext.log.Fatal(err)
}
eet.GettingBlock = time.Since(countStart)
countStart = time.Now()
ext.handleCoinsFromTransactions(blockResponse)
eet.HandleCoinsFromTransactions = time.Since(countStart)
countStart = time.Now()
ext.handleAddressesFromResponses(blockResponse)
eet.HandleAddressesFromResponses = time.Since(countStart)
countStart = time.Now()
ext.handleBlockResponse(blockResponse)
eet.HandleBlockResponse = time.Since(countStart)
ext.balanceService.UpdateChannel() <- blockResponse
go ext.handleEventResponse(height, blockResponse)
if len(blockResponse.Transactions) > 0 {
ext.orderBookChannel <- blockResponse
}
//ext.validatorService.GetUpdateStakesJobChannel() <- height
ext.validatorService.GetUpdateValidatorsJobChannel() <- height
ext.validatorService.GetClearJobChannel() <- height
eet.Total = time.Since(start)
ext.printSpentTimeLog(eet)
height++
}
}
func (ext *Extender) runWorkers() {
// Addresses
for w := 1; w <= ext.env.WrkSaveAddressesCount; w++ {
go ext.addressService.SaveAddressesWorker(ext.addressService.GetSaveAddressesJobChannel())
}
// Transactions
for w := 1; w <= ext.env.WrkSaveTxsCount; w++ {
go ext.transactionService.SaveTransactionsWorker(ext.transactionService.GetSaveTxJobChannel())
}
for w := 1; w <= ext.env.WrkSaveTxsOutputCount; w++ {
go ext.transactionService.SaveTransactionsOutputWorker(ext.transactionService.GetSaveTxsOutputJobChannel())
}
for w := 1; w <= ext.env.WrkSaveInvTxsCount; w++ {
go ext.transactionService.SaveInvalidTransactionsWorker(ext.transactionService.GetSaveInvalidTxsJobChannel())
}
go ext.transactionService.UpdateTxsIndexWorker()
// Validators
for w := 1; w <= ext.env.WrkSaveValidatorTxsCount; w++ {
go ext.transactionService.SaveTxValidatorWorker(ext.transactionService.GetSaveTxValidatorJobChannel())
}
go ext.validatorService.UpdateValidatorsWorker(ext.validatorService.GetUpdateValidatorsJobChannel())
//go ext.validatorService.UpdateStakesWorker(ext.validatorService.GetUpdateStakesJobChannel())
// Events
for w := 1; w <= ext.env.WrkSaveRewardsCount; w++ {
go ext.eventService.SaveRewardsWorker(ext.eventService.GetSaveRewardsJobChannel())
}
for w := 1; w <= ext.env.WrkSaveSlashesCount; w++ {
go ext.eventService.SaveSlashesWorker(ext.eventService.GetSaveSlashesJobChannel())
}
// Balances
go ext.balanceService.BalanceManager()
//Coins
go ext.coinService.UpdateCoinsInfoFromTxsWorker(ext.coinService.GetUpdateCoinsFromTxsJobChannel())
go ext.coinService.UpdateCoinsInfoFromCoinsMap(ext.coinService.GetUpdateCoinsFromCoinsMapJobChannel())
go ext.coinService.UpdateHubInfoWorker()
//Unbonds
go ext.validatorService.UnbondSaverWorker(ext.validatorService.GetUnbondSaverJobChannel())
//Move Stake
go ext.validatorService.MoveStakeWorker(ext.validatorService.GetMoveStakeJobChannel())
go ext.validatorService.ClearMoveStakeAndUnbondWorker(ext.validatorService.GetClearJobChannel())
//OrderBook
go ext.orderBookService.OrderBookWorker(ext.orderBookChannel)
go ext.orderBookService.UpdateOrderBookWorker(ext.orderBookService.UpdateOrderChannel())
//Broadcast
go ext.broadcastService.Manager()
}
func (ext *Extender) handleAddressesFromResponses(blockResponse *api_pb.BlockResponse) {
err := ext.addressService.SaveAddressesFromResponses(blockResponse)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) handleBlockResponse(response *api_pb.BlockResponse) {
// Save validators if not exist
err := ext.validatorService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
// Save block
err = ext.blockService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
ext.linkBlockValidator(response)
//first block don't have validators
if response.TransactionCount > 0 {
ext.handleTransactions(response)
}
}
func (ext *Extender) handleCoinsFromTransactions(block *api_pb.BlockResponse) {
if len(block.Transactions) == 0 {
return
}
err := ext.coinService.HandleCoinsFromBlock(block)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) handleTransactions(response *api_pb.BlockResponse) {
chunksCount := int(math.Ceil(float64(len(response.Transactions)) / float64(ext.env.TxChunkSize)))
for i := 0; i < chunksCount; i++ {
start := ext.env.TxChunkSize * i
end := start + ext.env.TxChunkSize
if end > len(response.Transactions) {
end = len(response.Transactions)
}
layout := "2006-01-02T15:04:05Z"
blockTime, err := time.Parse(layout, response.Time)
if err != nil {
ext.log.Panic(err)
}
ext.saveTransactions(response.Height, blockTime, response.Transactions[start:end])
}
}
func (ext *Extender) handleEventResponse(blockHeight uint64, response *api_pb.BlockResponse) {
if len(response.Events) > 0 {
//Save events
err := ext.eventService.HandleEventResponse(blockHeight, response)
if err != nil {
ext.log.Fatal(err)
}
}
}
func (ext *Extender) linkBlockValidator(response *api_pb.BlockResponse) | {
if response.Height == 1 {
return
}
var links []*models.BlockValidator
for _, v := range response.Validators {
vId, err := ext.validatorRepository.FindIdByPk(helpers.RemovePrefix(v.PublicKey))
if err != nil {
ext.log.Error(err)
}
helpers.HandleError(err)
link := models.BlockValidator{
ValidatorID: uint64(vId),
BlockID: response.Height,
Signed: v.Signed,
}
links = append(links, &link)
}
err := ext.blockRepository.LinkWithValidators(links)
if err != nil { | identifier_body | |
main.go | .Now()
return ctx, nil
}
func (eh eventHook) AfterQuery(ctx context.Context, event *pg.QueryEvent) error {
critical := time.Millisecond * 500
result := time.Duration(0)
if event.Stash != nil {
if v, ok := event.Stash["query_time"]; ok {
result = time.Now().Sub(v.(time.Time))
}
} | if result > critical {
bigQueryLog, err := os.OpenFile("big_query.log", os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
eh.log.Error("error opening file: %v", err)
}
// don't forget to close it
defer bigQueryLog.Close()
eh.log.SetReportCaller(false)
eh.log.SetFormatter(&logrus.JSONFormatter{})
eh.log.SetOutput(bigQueryLog)
q, err := event.UnformattedQuery()
if err != nil {
eh.log.Error(err)
}
r := regexp.MustCompile("\\s+")
replace := r.ReplaceAllString(fmt.Sprintf("%v", string(q)), " ")
eh.log.WithFields(logrus.Fields{
"query": strings.TrimSpace(replace),
"time": fmt.Sprintf("%s", result),
}).Error("DB query time is too height")
}
return nil
}
func NewExtender(env *env.ExtenderEnvironment) *Extender {
//Init Logger
logger := logrus.New()
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetOutput(os.Stdout)
logger.SetReportCaller(true)
if env.Debug {
logger.SetFormatter(&logrus.TextFormatter{
DisableColors: false,
FullTimestamp: true,
})
} else {
logger.SetFormatter(&logrus.JSONFormatter{})
logger.SetLevel(logrus.WarnLevel)
}
contextLogger := logger.WithFields(logrus.Fields{
"version": Version,
"app": "Minter Explorer Extender",
})
//Init DB
pgOptions := &pg.Options{
Addr: fmt.Sprintf("%s:%s", env.DbHost, env.DbPort),
User: env.DbUser,
Password: env.DbPassword,
Database: env.DbName,
}
if os.Getenv("POSTGRES_SSL_ENABLED") == "true" {
pgOptions.TLSConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
//hookImpl := eventHook{
// log: logrus.New(),
// beforeTime: time.Now(),
//}
db := pg.Connect(pgOptions)
//db.AddQueryHook(hookImpl)
uploader := genesisUploader.New(genesisEnv.Config{
Debug: false,
PostgresHost: env.DbHost,
PostgresPort: env.DbPort,
PostgresDB: env.DbName,
PostgresUser: env.DbUser,
PostgresPassword: env.DbPassword,
PostgresSSLEnabled: os.Getenv("POSTGRES_SSL_ENABLED") == "true",
MinterBaseCoin: env.BaseCoin,
NodeGrpc: env.NodeApi,
AddressChunkSize: uint64(env.AddrChunkSize),
CoinsChunkSize: 1000,
BalanceChunkSize: 10000,
StakeChunkSize: uint64(env.StakeChunkSize),
ValidatorChunkSize: uint64(env.StakeChunkSize),
})
err := uploader.Do()
if err != nil {
logger.Warn(err)
}
//api
nodeApi, err := grpc_client.New(env.NodeApi)
if err != nil {
panic(err)
}
nodeStatus, err := nodeApi.Status()
if err != nil {
panic(err)
}
// Repositories
blockRepository := block.NewRepository(db)
validatorRepository := validator.NewRepository(db, contextLogger)
transactionRepository := transaction.NewRepository(db)
addressRepository := address.NewRepository(db)
coinRepository := coin.NewRepository(db)
eventsRepository := events.NewRepository(db)
balanceRepository := balance.NewRepository(db)
liquidityPoolRepository := liquidity_pool.NewRepository(db)
orderbookRepository := orderbook.NewRepository(db)
coins.GlobalRepository = coins.NewRepository(db) //temporary solution
// Services
addressService := address.NewService(env, addressRepository, contextLogger)
broadcastService := broadcast.NewService(env, addressRepository, coinRepository, nodeApi, contextLogger)
balanceService := balance.NewService(env, balanceRepository, nodeApi, addressService, coinRepository, broadcastService, contextLogger)
coinService := coin.NewService(env, nodeApi, coinRepository, addressRepository, contextLogger)
validatorService := validator.NewService(env, nodeApi, validatorRepository, addressRepository, coinRepository, contextLogger)
eventService := events.NewService(env, eventsRepository, validatorRepository, addressRepository, coinRepository, coinService, blockRepository, orderbookRepository, balanceRepository, broadcastService, contextLogger, nodeStatus.InitialHeight+1)
orderBookService := orderbook.NewService(db, addressRepository, liquidityPoolRepository, contextLogger)
return &Extender{
Metrics: metrics.New(),
env: env,
nodeApi: nodeApi,
blockService: block.NewBlockService(blockRepository, validatorRepository, broadcastService),
eventService: eventService,
blockRepository: blockRepository,
validatorService: validatorService,
transactionService: transaction.NewService(env, transactionRepository, addressRepository, validatorRepository, coinRepository, coinService, broadcastService, contextLogger, validatorService.GetUnbondSaverJobChannel(), liquidityPoolRepository, validatorService.GetMoveStakeJobChannel()),
addressService: addressService,
validatorRepository: validatorRepository,
balanceService: balanceService,
coinService: coinService,
broadcastService: broadcastService,
orderBookService: orderBookService,
chasingMode: false,
currentNodeHeight: 0,
startBlockHeight: nodeStatus.InitialHeight + 1,
log: contextLogger,
lpSnapshotChannel: make(chan *api_pb.BlockResponse),
lpWorkerChannel: make(chan *api_pb.BlockResponse),
orderBookChannel: make(chan *api_pb.BlockResponse),
}
}
func (ext *Extender) GetInfo() {
fmt.Printf("%s v%s\n", "Minter Explorer Extender", Version)
}
func (ext *Extender) Run() {
//check connections to node
_, err := ext.nodeApi.Status()
if err == nil {
err = ext.blockRepository.DeleteLastBlockData()
}
if err != nil {
ext.log.Fatal(err)
}
var height uint64
// ----- Workers -----
ext.runWorkers()
lastExplorerBlock, err := ext.blockRepository.GetLastFromDB()
if err != nil && err != pg.ErrNoRows {
ext.log.Fatal(err)
}
if lastExplorerBlock != nil {
height = lastExplorerBlock.ID + 1
ext.blockService.SetBlockCache(lastExplorerBlock)
} else {
height = ext.startBlockHeight
}
for {
eet := ExtenderElapsedTime{
Height: height,
GettingBlock: 0,
GettingEvents: 0,
HandleCoinsFromTransactions: 0,
HandleAddressesFromResponses: 0,
HandleBlockResponse: 0,
Total: 0,
}
start := time.Now()
//ext.findOutChasingMode(height)
//Pulling block data
countStart := time.Now()
blockResponse, err := ext.nodeApi.BlockExtended(height, true, true)
if err != nil {
grpcErr, ok := status.FromError(err)
if !ok {
ext.log.Error(err)
time.Sleep(2 * time.Second)
continue
}
if grpcErr.Message() == "Block not found" || grpcErr.Message() == "Block results not found" {
time.Sleep(2 * time.Second)
continue
}
ext.log.Fatal(err)
}
eet.GettingBlock = time.Since(countStart)
countStart = time.Now()
ext.handleCoinsFromTransactions(blockResponse)
eet.HandleCoinsFromTransactions = time.Since(countStart)
countStart = time.Now()
ext.handleAddressesFromResponses(blockResponse)
eet.HandleAddressesFromResponses = time.Since(countStart)
countStart = time.Now()
ext.handleBlockResponse(blockResponse)
eet.HandleBlockResponse = time.Since(countStart)
ext.balanceService.UpdateChannel() <- blockResponse
go ext.handleEventResponse(height, blockResponse)
if len(blockResponse.Transactions) > 0 {
ext.orderBookChannel <- blockResponse
}
//ext.validatorService.GetUpdateStakesJobChannel() <- height
ext.validatorService.GetUpdateValidatorsJobChannel() <- height
ext.validatorService.GetClearJobChannel() <- height
eet.Total = time.Since(start)
ext.printSpentTimeLog(eet)
height++
}
}
func (ext *Extender) runWorkers() {
// Addresses
for w := 1; w <= ext.env.WrkSaveAddressesCount; w++ {
go ext.addressService.SaveAddressesWorker(ext.addressService.GetSaveAddressesJobChannel())
}
// Transactions
for w := 1; w <= ext.env.WrkSaveT | random_line_split | |
main.go | //hookImpl := eventHook{
// log: logrus.New(),
// beforeTime: time.Now(),
//}
db := pg.Connect(pgOptions)
//db.AddQueryHook(hookImpl)
uploader := genesisUploader.New(genesisEnv.Config{
Debug: false,
PostgresHost: env.DbHost,
PostgresPort: env.DbPort,
PostgresDB: env.DbName,
PostgresUser: env.DbUser,
PostgresPassword: env.DbPassword,
PostgresSSLEnabled: os.Getenv("POSTGRES_SSL_ENABLED") == "true",
MinterBaseCoin: env.BaseCoin,
NodeGrpc: env.NodeApi,
AddressChunkSize: uint64(env.AddrChunkSize),
CoinsChunkSize: 1000,
BalanceChunkSize: 10000,
StakeChunkSize: uint64(env.StakeChunkSize),
ValidatorChunkSize: uint64(env.StakeChunkSize),
})
err := uploader.Do()
if err != nil {
logger.Warn(err)
}
//api
nodeApi, err := grpc_client.New(env.NodeApi)
if err != nil {
panic(err)
}
nodeStatus, err := nodeApi.Status()
if err != nil {
panic(err)
}
// Repositories
blockRepository := block.NewRepository(db)
validatorRepository := validator.NewRepository(db, contextLogger)
transactionRepository := transaction.NewRepository(db)
addressRepository := address.NewRepository(db)
coinRepository := coin.NewRepository(db)
eventsRepository := events.NewRepository(db)
balanceRepository := balance.NewRepository(db)
liquidityPoolRepository := liquidity_pool.NewRepository(db)
orderbookRepository := orderbook.NewRepository(db)
coins.GlobalRepository = coins.NewRepository(db) //temporary solution
// Services
addressService := address.NewService(env, addressRepository, contextLogger)
broadcastService := broadcast.NewService(env, addressRepository, coinRepository, nodeApi, contextLogger)
balanceService := balance.NewService(env, balanceRepository, nodeApi, addressService, coinRepository, broadcastService, contextLogger)
coinService := coin.NewService(env, nodeApi, coinRepository, addressRepository, contextLogger)
validatorService := validator.NewService(env, nodeApi, validatorRepository, addressRepository, coinRepository, contextLogger)
eventService := events.NewService(env, eventsRepository, validatorRepository, addressRepository, coinRepository, coinService, blockRepository, orderbookRepository, balanceRepository, broadcastService, contextLogger, nodeStatus.InitialHeight+1)
orderBookService := orderbook.NewService(db, addressRepository, liquidityPoolRepository, contextLogger)
return &Extender{
Metrics: metrics.New(),
env: env,
nodeApi: nodeApi,
blockService: block.NewBlockService(blockRepository, validatorRepository, broadcastService),
eventService: eventService,
blockRepository: blockRepository,
validatorService: validatorService,
transactionService: transaction.NewService(env, transactionRepository, addressRepository, validatorRepository, coinRepository, coinService, broadcastService, contextLogger, validatorService.GetUnbondSaverJobChannel(), liquidityPoolRepository, validatorService.GetMoveStakeJobChannel()),
addressService: addressService,
validatorRepository: validatorRepository,
balanceService: balanceService,
coinService: coinService,
broadcastService: broadcastService,
orderBookService: orderBookService,
chasingMode: false,
currentNodeHeight: 0,
startBlockHeight: nodeStatus.InitialHeight + 1,
log: contextLogger,
lpSnapshotChannel: make(chan *api_pb.BlockResponse),
lpWorkerChannel: make(chan *api_pb.BlockResponse),
orderBookChannel: make(chan *api_pb.BlockResponse),
}
}
func (ext *Extender) GetInfo() {
fmt.Printf("%s v%s\n", "Minter Explorer Extender", Version)
}
func (ext *Extender) Run() {
//check connections to node
_, err := ext.nodeApi.Status()
if err == nil {
err = ext.blockRepository.DeleteLastBlockData()
}
if err != nil {
ext.log.Fatal(err)
}
var height uint64
// ----- Workers -----
ext.runWorkers()
lastExplorerBlock, err := ext.blockRepository.GetLastFromDB()
if err != nil && err != pg.ErrNoRows {
ext.log.Fatal(err)
}
if lastExplorerBlock != nil {
height = lastExplorerBlock.ID + 1
ext.blockService.SetBlockCache(lastExplorerBlock)
} else {
height = ext.startBlockHeight
}
for {
eet := ExtenderElapsedTime{
Height: height,
GettingBlock: 0,
GettingEvents: 0,
HandleCoinsFromTransactions: 0,
HandleAddressesFromResponses: 0,
HandleBlockResponse: 0,
Total: 0,
}
start := time.Now()
//ext.findOutChasingMode(height)
//Pulling block data
countStart := time.Now()
blockResponse, err := ext.nodeApi.BlockExtended(height, true, true)
if err != nil {
grpcErr, ok := status.FromError(err)
if !ok {
ext.log.Error(err)
time.Sleep(2 * time.Second)
continue
}
if grpcErr.Message() == "Block not found" || grpcErr.Message() == "Block results not found" {
time.Sleep(2 * time.Second)
continue
}
ext.log.Fatal(err)
}
eet.GettingBlock = time.Since(countStart)
countStart = time.Now()
ext.handleCoinsFromTransactions(blockResponse)
eet.HandleCoinsFromTransactions = time.Since(countStart)
countStart = time.Now()
ext.handleAddressesFromResponses(blockResponse)
eet.HandleAddressesFromResponses = time.Since(countStart)
countStart = time.Now()
ext.handleBlockResponse(blockResponse)
eet.HandleBlockResponse = time.Since(countStart)
ext.balanceService.UpdateChannel() <- blockResponse
go ext.handleEventResponse(height, blockResponse)
if len(blockResponse.Transactions) > 0 {
ext.orderBookChannel <- blockResponse
}
//ext.validatorService.GetUpdateStakesJobChannel() <- height
ext.validatorService.GetUpdateValidatorsJobChannel() <- height
ext.validatorService.GetClearJobChannel() <- height
eet.Total = time.Since(start)
ext.printSpentTimeLog(eet)
height++
}
}
func (ext *Extender) runWorkers() {
// Addresses
for w := 1; w <= ext.env.WrkSaveAddressesCount; w++ {
go ext.addressService.SaveAddressesWorker(ext.addressService.GetSaveAddressesJobChannel())
}
// Transactions
for w := 1; w <= ext.env.WrkSaveTxsCount; w++ {
go ext.transactionService.SaveTransactionsWorker(ext.transactionService.GetSaveTxJobChannel())
}
for w := 1; w <= ext.env.WrkSaveTxsOutputCount; w++ {
go ext.transactionService.SaveTransactionsOutputWorker(ext.transactionService.GetSaveTxsOutputJobChannel())
}
for w := 1; w <= ext.env.WrkSaveInvTxsCount; w++ {
go ext.transactionService.SaveInvalidTransactionsWorker(ext.transactionService.GetSaveInvalidTxsJobChannel())
}
go ext.transactionService.UpdateTxsIndexWorker()
// Validators
for w := 1; w <= ext.env.WrkSaveValidatorTxsCount; w++ {
go ext.transactionService.SaveTxValidatorWorker(ext.transactionService.GetSaveTxValidatorJobChannel())
}
go ext.validatorService.UpdateValidatorsWorker(ext.validatorService.GetUpdateValidatorsJobChannel())
//go ext.validatorService.UpdateStakesWorker(ext.validatorService.GetUpdateStakesJobChannel())
// Events
for w := 1; w <= ext.env.WrkSaveRewardsCount; w++ {
go ext.eventService.SaveRewardsWorker(ext.eventService.GetSaveRewardsJobChannel())
}
for w := 1; w <= ext.env.WrkSaveSlashesCount; w++ {
go ext.eventService.SaveSlashesWorker(ext.eventService.GetSaveSlashesJobChannel())
}
// Balances
go ext.balanceService.BalanceManager()
//Coins
go ext.coinService.UpdateCoinsInfoFromTxsWorker(ext.coinService.GetUpdateCoinsFromTxsJobChannel())
go ext.coinService.UpdateCoinsInfoFromCoinsMap(ext.coinService.GetUpdateCoinsFromCoinsMapJobChannel())
go ext.coinService.UpdateHubInfoWorker()
//Unbonds
go ext.validatorService.UnbondSaverWorker(ext.validatorService.GetUnbondSaverJobChannel())
//Move Stake
go ext.validatorService.MoveStakeWorker(ext.validatorService.GetMoveStakeJobChannel())
go ext.validatorService.ClearMoveStakeAndUnbondWorker(ext.validatorService.GetClearJobChannel())
//OrderBook
go ext.orderBookService.OrderBookWorker(ext.orderBookChannel)
go ext.orderBookService.UpdateOrderBookWorker(ext.orderBookService.UpdateOrderChannel())
//Broadcast
go ext.broadcastService.Manager()
}
func (ext *Extender) handleAddressesFromResponses(blockResponse *api_pb.BlockResponse) {
err := ext.addressService.SaveAddressesFromResponses(blockResponse)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) | handleBlockResponse | identifier_name | |
main.go | := address.NewRepository(db)
coinRepository := coin.NewRepository(db)
eventsRepository := events.NewRepository(db)
balanceRepository := balance.NewRepository(db)
liquidityPoolRepository := liquidity_pool.NewRepository(db)
orderbookRepository := orderbook.NewRepository(db)
coins.GlobalRepository = coins.NewRepository(db) //temporary solution
// Services
addressService := address.NewService(env, addressRepository, contextLogger)
broadcastService := broadcast.NewService(env, addressRepository, coinRepository, nodeApi, contextLogger)
balanceService := balance.NewService(env, balanceRepository, nodeApi, addressService, coinRepository, broadcastService, contextLogger)
coinService := coin.NewService(env, nodeApi, coinRepository, addressRepository, contextLogger)
validatorService := validator.NewService(env, nodeApi, validatorRepository, addressRepository, coinRepository, contextLogger)
eventService := events.NewService(env, eventsRepository, validatorRepository, addressRepository, coinRepository, coinService, blockRepository, orderbookRepository, balanceRepository, broadcastService, contextLogger, nodeStatus.InitialHeight+1)
orderBookService := orderbook.NewService(db, addressRepository, liquidityPoolRepository, contextLogger)
return &Extender{
Metrics: metrics.New(),
env: env,
nodeApi: nodeApi,
blockService: block.NewBlockService(blockRepository, validatorRepository, broadcastService),
eventService: eventService,
blockRepository: blockRepository,
validatorService: validatorService,
transactionService: transaction.NewService(env, transactionRepository, addressRepository, validatorRepository, coinRepository, coinService, broadcastService, contextLogger, validatorService.GetUnbondSaverJobChannel(), liquidityPoolRepository, validatorService.GetMoveStakeJobChannel()),
addressService: addressService,
validatorRepository: validatorRepository,
balanceService: balanceService,
coinService: coinService,
broadcastService: broadcastService,
orderBookService: orderBookService,
chasingMode: false,
currentNodeHeight: 0,
startBlockHeight: nodeStatus.InitialHeight + 1,
log: contextLogger,
lpSnapshotChannel: make(chan *api_pb.BlockResponse),
lpWorkerChannel: make(chan *api_pb.BlockResponse),
orderBookChannel: make(chan *api_pb.BlockResponse),
}
}
func (ext *Extender) GetInfo() {
fmt.Printf("%s v%s\n", "Minter Explorer Extender", Version)
}
func (ext *Extender) Run() {
//check connections to node
_, err := ext.nodeApi.Status()
if err == nil {
err = ext.blockRepository.DeleteLastBlockData()
}
if err != nil {
ext.log.Fatal(err)
}
var height uint64
// ----- Workers -----
ext.runWorkers()
lastExplorerBlock, err := ext.blockRepository.GetLastFromDB()
if err != nil && err != pg.ErrNoRows {
ext.log.Fatal(err)
}
if lastExplorerBlock != nil {
height = lastExplorerBlock.ID + 1
ext.blockService.SetBlockCache(lastExplorerBlock)
} else {
height = ext.startBlockHeight
}
for {
eet := ExtenderElapsedTime{
Height: height,
GettingBlock: 0,
GettingEvents: 0,
HandleCoinsFromTransactions: 0,
HandleAddressesFromResponses: 0,
HandleBlockResponse: 0,
Total: 0,
}
start := time.Now()
//ext.findOutChasingMode(height)
//Pulling block data
countStart := time.Now()
blockResponse, err := ext.nodeApi.BlockExtended(height, true, true)
if err != nil {
grpcErr, ok := status.FromError(err)
if !ok {
ext.log.Error(err)
time.Sleep(2 * time.Second)
continue
}
if grpcErr.Message() == "Block not found" || grpcErr.Message() == "Block results not found" {
time.Sleep(2 * time.Second)
continue
}
ext.log.Fatal(err)
}
eet.GettingBlock = time.Since(countStart)
countStart = time.Now()
ext.handleCoinsFromTransactions(blockResponse)
eet.HandleCoinsFromTransactions = time.Since(countStart)
countStart = time.Now()
ext.handleAddressesFromResponses(blockResponse)
eet.HandleAddressesFromResponses = time.Since(countStart)
countStart = time.Now()
ext.handleBlockResponse(blockResponse)
eet.HandleBlockResponse = time.Since(countStart)
ext.balanceService.UpdateChannel() <- blockResponse
go ext.handleEventResponse(height, blockResponse)
if len(blockResponse.Transactions) > 0 {
ext.orderBookChannel <- blockResponse
}
//ext.validatorService.GetUpdateStakesJobChannel() <- height
ext.validatorService.GetUpdateValidatorsJobChannel() <- height
ext.validatorService.GetClearJobChannel() <- height
eet.Total = time.Since(start)
ext.printSpentTimeLog(eet)
height++
}
}
func (ext *Extender) runWorkers() {
// Addresses
for w := 1; w <= ext.env.WrkSaveAddressesCount; w++ {
go ext.addressService.SaveAddressesWorker(ext.addressService.GetSaveAddressesJobChannel())
}
// Transactions
for w := 1; w <= ext.env.WrkSaveTxsCount; w++ {
go ext.transactionService.SaveTransactionsWorker(ext.transactionService.GetSaveTxJobChannel())
}
for w := 1; w <= ext.env.WrkSaveTxsOutputCount; w++ {
go ext.transactionService.SaveTransactionsOutputWorker(ext.transactionService.GetSaveTxsOutputJobChannel())
}
for w := 1; w <= ext.env.WrkSaveInvTxsCount; w++ {
go ext.transactionService.SaveInvalidTransactionsWorker(ext.transactionService.GetSaveInvalidTxsJobChannel())
}
go ext.transactionService.UpdateTxsIndexWorker()
// Validators
for w := 1; w <= ext.env.WrkSaveValidatorTxsCount; w++ {
go ext.transactionService.SaveTxValidatorWorker(ext.transactionService.GetSaveTxValidatorJobChannel())
}
go ext.validatorService.UpdateValidatorsWorker(ext.validatorService.GetUpdateValidatorsJobChannel())
//go ext.validatorService.UpdateStakesWorker(ext.validatorService.GetUpdateStakesJobChannel())
// Events
for w := 1; w <= ext.env.WrkSaveRewardsCount; w++ {
go ext.eventService.SaveRewardsWorker(ext.eventService.GetSaveRewardsJobChannel())
}
for w := 1; w <= ext.env.WrkSaveSlashesCount; w++ {
go ext.eventService.SaveSlashesWorker(ext.eventService.GetSaveSlashesJobChannel())
}
// Balances
go ext.balanceService.BalanceManager()
//Coins
go ext.coinService.UpdateCoinsInfoFromTxsWorker(ext.coinService.GetUpdateCoinsFromTxsJobChannel())
go ext.coinService.UpdateCoinsInfoFromCoinsMap(ext.coinService.GetUpdateCoinsFromCoinsMapJobChannel())
go ext.coinService.UpdateHubInfoWorker()
//Unbonds
go ext.validatorService.UnbondSaverWorker(ext.validatorService.GetUnbondSaverJobChannel())
//Move Stake
go ext.validatorService.MoveStakeWorker(ext.validatorService.GetMoveStakeJobChannel())
go ext.validatorService.ClearMoveStakeAndUnbondWorker(ext.validatorService.GetClearJobChannel())
//OrderBook
go ext.orderBookService.OrderBookWorker(ext.orderBookChannel)
go ext.orderBookService.UpdateOrderBookWorker(ext.orderBookService.UpdateOrderChannel())
//Broadcast
go ext.broadcastService.Manager()
}
func (ext *Extender) handleAddressesFromResponses(blockResponse *api_pb.BlockResponse) {
err := ext.addressService.SaveAddressesFromResponses(blockResponse)
if err != nil {
ext.log.Panic(err)
}
}
func (ext *Extender) handleBlockResponse(response *api_pb.BlockResponse) {
// Save validators if not exist
err := ext.validatorService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
// Save block
err = ext.blockService.HandleBlockResponse(response)
if err != nil {
ext.log.Panic(err)
}
ext.linkBlockValidator(response)
//first block don't have validators
if response.TransactionCount > 0 {
ext.handleTransactions(response)
}
}
func (ext *Extender) handleCoinsFromTransactions(block *api_pb.BlockResponse) {
if len(block.Transactions) == 0 {
return
}
err := ext.coinService.HandleCoinsFromBlock(block)
if err != nil {
ext.log.Fatal(err)
}
}
func (ext *Extender) handleTransactions(response *api_pb.BlockResponse) {
chunksCount := int(math.Ceil(float64(len(response.Transactions)) / float64(ext.env.TxChunkSize)))
for i := 0; i < chunksCount; i++ | {
start := ext.env.TxChunkSize * i
end := start + ext.env.TxChunkSize
if end > len(response.Transactions) {
end = len(response.Transactions)
}
layout := "2006-01-02T15:04:05Z"
blockTime, err := time.Parse(layout, response.Time)
if err != nil {
ext.log.Panic(err)
}
ext.saveTransactions(response.Height, blockTime, response.Transactions[start:end])
} | conditional_block | |
utils.py | binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
|
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class BinColorDataset(Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin_transform | step = stride
s_h = stride
s_w = stride | conditional_block |
utils.py | binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
step = stride
s_h = stride
s_w = stride
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
| else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class BinColorDataset(Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin_transform | if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches) | identifier_body |
utils.py | binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
step = stride
s_h = stride
s_w = stride
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else: | patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class BinColorDataset(Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin_transform | random_line_split | |
utils.py | binary_img = gray_img.point(lambda x: 255 if x < otsu else 0, '1')
return binary_img
class Binary(object):
def __call__(self, img):
return binary(img)
def squeeze_weights(m):
m.weight.data = m.weight.data.sum(dim=1)[:, None]
m.in_channels = 1
def change_out_features(m, classes):
m.out_features = classes
return m
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def dataset_mean_and_std(train_path, test_path):
# Dataset should be a folder which follows
# ImageFolder format with pages in each label folder
transform = transforms.Compose([
transforms.ToTensor(),
])
train_data = ImageFolder(train_path,
transform=transform)
test_data = ImageFolder(test_path,
transform=transform)
data = ConcatDataset([train_data, test_data])
loader = DataLoader(data, batch_size=1)
n = 0
m = 0.0
var = 0.0
with tqdm(total=len(loader)) as pbar:
for data in loader:
batch = data[0]
# Rearrange batch to be the shape of [B, C, W * H]
batch = batch.view(batch.size(0), batch.size(1), -1)
# Update total number of images
n += batch.size(0)
# Compute mean and std here
m += batch.mean(2).sum(0)
var += batch.var(2).sum(0)
pbar.update(1)
m /= n
var /= n
s = sqrt(var)
print(m)
print(s)
return m, s
def _extract_patches(arr, patch_shape=8, extraction_step=1):
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None,
random_state=None, stride=1, th=2000):
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
if isinstance(stride, numbers.Number):
step = stride
s_h = stride
s_w = stride
else:
s_h, s_w = stride
step = (s_h, s_w, n_colors)
extracted_patches = _extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=step)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint((i_h - p_h + 1) // s_h, size=n_patches)
j_s = rng.randint((i_w - p_w + 1) // s_w, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
patches = patches.reshape((n_patches, p_h, p_w))
# return clean_patches(patches, th)
return patches
def _compute_n_patches(i_h, i_w, p_h, p_w, stride, max_patches=None):
if isinstance(stride, numbers.Number):
s_h = stride
s_w = stride
else:
s_h, s_w = stride
n_h = (i_h - p_h) // s_h + 1
n_w = (i_w - p_w) // s_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, numbers.Integral)
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, numbers.Integral)
and max_patches >= all_patches):
return all_patches
elif (isinstance(max_patches, numbers.Real)
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def clean_patches(patches, th=2000):
indices = []
for i, patch in enumerate(patches):
if patch.shape[-1] == 3:
patch = patch / 255
num_features = feature.canny(patch.mean(axis=2), sigma=2).sum()
else:
num_features = feature.canny(patch, sigma=2).sum()
if num_features > th:
indices.append(i)
return patches[indices]
def get_labels_and_class_counts(labels_list):
'''
Calculates the counts of all unique classes.
'''
labels = np.array(labels_list)
_, class_counts = np.unique(labels, return_counts=True)
return labels, class_counts
def plot_class_distributions(class_names, train_class_counts,
test_class_counts, validation_class_counts):
'''
Plots the class distributions for the training and test set asa barplot.
'''
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharey=True, figsize=(15, 6))
ax1.bar(class_names, train_class_counts)
ax1.set_title('Training dataset distribution')
ax1.set_xlabel('Classes')
ax1.set_ylabel('Class counts')
ax2.bar(class_names, test_class_counts)
ax2.set_title('Test dataset distribution')
ax2.set_xlabel('Classes')
ax2.set_ylabel('Class counts')
ax3.bar(class_names, validation_class_counts)
ax3.set_title('Validation dataset distribution')
ax3.set_xlabel('Classes')
ax3.set_ylabel('Class counts')
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None, callback_get_label=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# define custom callback
self.callback_get_label = callback_get_label
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
if isinstance(dataset, torchvision.datasets.MNIST):
return dataset.train_labels[idx].item()
elif isinstance(dataset, torchvision.datasets.ImageFolder):
return dataset.imgs[idx][1]
elif isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset.imgs[idx][1]
elif self.callback_get_label:
return self.callback_get_label(dataset, idx)
elif isinstance(dataset, BinColorDataset):
return dataset.dataset.imgs[idx][1]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
class | (Dataset):
def __init__(self, dataset, col_transform=None, bin_transform=None):
self.dataset = dataset
self.col_transform = col_transform
self.bin | BinColorDataset | identifier_name |
reaper-rush.rs | Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag| !mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) { | }
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
| builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
} | random_line_split |
reaper-rush.rs | => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag| !mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn | (&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
| train | identifier_name |
reaper-rush.rs | => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() | else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag| !mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
| {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} | conditional_block |
reaper-rush.rs | Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag| !mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) | .units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
| {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self | identifier_body |
p_test.go | 1"},
{test.Variable("function"), "function"},
}
for _, v := range variables {
if name := parser.identifierName(v.source); name != v.expected {
t.Errorf("'%s' expected, '%s' found.\n", v.expected, name)
}
}
nop := test.Nop()
if l := nodeList(nop); l[0] != nop {
t.Error("Nothing should happen to passed node.")
}
if l := nodeList(nil); len(l) != 0 {
t.Error("Nil cannot create non-empty statement list.")
}
list := test.List([]node.Node{nop})
if l := nodeList(list); l[0] != nop {
t.Error("Nothing should happen to the nodes passed in the node list.")
}
}
func functionDef(t *testing.T) {
t.Helper()
parser := fileParser{
parser: &parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
},
}
// This tests which name and return type will
// be used. lang.NewFunc(string) is tested
// elsewhere.
f, _ := parser.funcDef(nil)
if f != nil {
t.Error("From nil nothing can be created.")
}
funcDefs := []struct {
f *stmt.Function
name string
ret string
}{
{test.Func("f"), "f", lang.Void},
{test.Func("function"), "function", lang.Void},
{test.Func("func"), "func1", lang.Void},
}
for _, f := range funcDefs {
def, _ := parser.funcDef(f.f)
if def.Name != f.name {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Name)
}
if !def.Return.Equal(f.ret) {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Return)
}
}
// f = mainDef(parser.file, false)
// if f.Name != "main" {
// t.Errorf("'%s' expected, '%s' found.\n", "main", f.Name)
// }
// if !f.Return.Equal(lang.Void) {
// t.Errorf("'%s' expected, '%s' found.\n", lang.Void, f.Return)
// }
// It used to be empty string, but because
// funcDef translates the function name,
// it had to be changed to something
// meaningful.
placeholderFunction := test.Func("placeholderFunction")
returnTypes := []struct {
typ *name.Name
expected string
}{
{test.Name("void"), lang.Void},
{test.Name("int"), lang.Int},
{test.Name("string"), lang.String},
}
for _, rt := range returnTypes {
placeholderFunction.ReturnType = rt.typ
f, _ := parser.funcDef(placeholderFunction)
if !f.Return.Equal(rt.expected) {
t.Errorf("'%s' expected, '%s' found.\n", rt.expected, f.Return)
}
}
}
func testBinaryOp(t *testing.T) {
t.Helper()
left := test.Int("1")
right := test.Int("2")
cases := []struct {
op string
ret string
}{
{"+", lang.Int},
{"-", lang.Int},
{"*", lang.Int},
{"<", lang.Bool},
{"<=", lang.Bool},
{">=", lang.Bool},
{">", lang.Bool},
{"==", lang.Bool},
}
parser := fileParser{parser: &parser{}}
for _, c := range cases {
expr := parser.expression(nil, test.BinaryOp(left, c.op, right))
op, ok := expr.(*lang.BinaryOp)
if !ok {
t.Fatal("Expected binary operation, something else found.")
}
if op.Operation != c.op {
t.Errorf("'%s' expected, '%s' found.", c.op, op.Operation)
}
if !op.Type().Equal(c.ret) {
t.Errorf("'%s' expected, '%s' found.", c.ret, op.Type())
}
}
}
func unaryOp(t *testing.T) {
t.Helper()
parent := lang.NewCode(nil)
parser := fileParser{parser: &parser{}}
for _, n := range []node.Node{
test.Plus(test.String(`"test"`)),
test.Plus(test.String(`""`)),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Str); !ok {
t.Error("lang.Str expected.")
}
if typ := e.Type(); !typ.Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Int("0")),
test.Plus(test.Int("2")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Number); !ok {
t.Error("lang.Number expected.")
}
if typ := e.Type(); !typ.Equal(lang.Int) {
t.Errorf("'int' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Float("0")),
test.Plus(test.Float("1.0")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Float); !ok {
t.Error("lang.Float expected.")
}
if typ := e.Type(); !typ.Equal(lang.Float64) {
t.Errorf("'float' expected, '%s' found.", typ)
}
}
for _, c := range []struct {
n node.Node
t string
}{
{test.Minus(test.String(`"test"`)), lang.String},
{test.Minus(test.String(`""`)), lang.String},
{test.Minus(test.Int("0")), lang.Int},
{test.Minus(test.Int("2")), lang.Int},
{test.Minus(test.Float("0")), lang.Float64},
{test.Minus(test.Float("1.0")), lang.Float64},
} {
e := parser.expression(parent, c.n)
u, ok := e.(*lang.UnaryMinus)
if !ok {
t.Fatal("lang.UnaryMinus expected.")
}
if u.Parent() != parent {
t.Error("Parent not set.")
}
if u.Expr.Parent() != u {
t.Error("Parent not set.")
}
if typ := u.Type(); !typ.Equal(c.t) {
t.Errorf("'%s' expected, '%s' found.", c.t, typ)
}
}
}
func testStatements(t *testing.T) {
t.Helper()
gc := lang.NewGlobalContext()
funcs := NewFunc(gc)
parser := fileParser{
parser: &parser{
gc: gc,
funcs: funcs,
},
}
parser.file = lang.NewFile(gc, "dummy", false, true)
parser.funcs = &FileFunc{funcs, parser.file}
b := lang.NewCode(nil)
html := test.HTML("<html></html>")
parser.createFunction(b, []node.Node{html})
if len(b.Statements) != 1 {
t.Fatal("Wrong amount of statements in the block.")
}
h, ok := b.Statements[0].(*lang.FunctionCall)
if !ok {
t.Fatal("That one statement should be function call.")
}
if h.Parent() != b {
t.Error("Parent not set.")
}
if !h.Return.Equal(lang.Void) {
t.Errorf("'void' expected, '%s' found.", h.Return)
}
if h.Name != "fmt.Print" {
t.Errorf("'fmt.Print' expected, '%s' found.", h.Name)
}
if len(h.Args) != 1 {
t.Fatal("'fmt.Print' should have only one argument.")
}
a, ok := h.Args[0].(*lang.Str)
if !ok {
t.Fatal("That one argument should be string.")
}
if a.Parent() != h {
t.Error("Parent not set.")
}
if a.Value != "`<html></html>`" {
t.Errorf("'`<html></html>`' expected, '%s' found", a.Value)
}
if !a.Type().Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", a.Type())
}
}
func testMain(tt *testing.T) { | tests := []struct {
source []byte
expected string
}{
// Sandbox
{
source: []byte(`<?php
function fc() {
$a = 1 + 2;
}
`),
expected: `func fc() {
a := 1 + 2
}`,
},
// examples/04.php
{
source: []byte(`<?php
function fc() {
$a = 2 + 3 + 4 * 2;
echo $a * $a | tt.Helper()
| random_line_split |
p_test.go | "},
{test.Variable("function"), "function"},
}
for _, v := range variables {
if name := parser.identifierName(v.source); name != v.expected {
t.Errorf("'%s' expected, '%s' found.\n", v.expected, name)
}
}
nop := test.Nop()
if l := nodeList(nop); l[0] != nop {
t.Error("Nothing should happen to passed node.")
}
if l := nodeList(nil); len(l) != 0 {
t.Error("Nil cannot create non-empty statement list.")
}
list := test.List([]node.Node{nop})
if l := nodeList(list); l[0] != nop {
t.Error("Nothing should happen to the nodes passed in the node list.")
}
}
func functionDef(t *testing.T) {
t.Helper()
parser := fileParser{
parser: &parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
},
}
// This tests which name and return type will
// be used. lang.NewFunc(string) is tested
// elsewhere.
f, _ := parser.funcDef(nil)
if f != nil {
t.Error("From nil nothing can be created.")
}
funcDefs := []struct {
f *stmt.Function
name string
ret string
}{
{test.Func("f"), "f", lang.Void},
{test.Func("function"), "function", lang.Void},
{test.Func("func"), "func1", lang.Void},
}
for _, f := range funcDefs {
def, _ := parser.funcDef(f.f)
if def.Name != f.name {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Name)
}
if !def.Return.Equal(f.ret) {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Return)
}
}
// f = mainDef(parser.file, false)
// if f.Name != "main" {
// t.Errorf("'%s' expected, '%s' found.\n", "main", f.Name)
// }
// if !f.Return.Equal(lang.Void) {
// t.Errorf("'%s' expected, '%s' found.\n", lang.Void, f.Return)
// }
// It used to be empty string, but because
// funcDef translates the function name,
// it had to be changed to something
// meaningful.
placeholderFunction := test.Func("placeholderFunction")
returnTypes := []struct {
typ *name.Name
expected string
}{
{test.Name("void"), lang.Void},
{test.Name("int"), lang.Int},
{test.Name("string"), lang.String},
}
for _, rt := range returnTypes {
placeholderFunction.ReturnType = rt.typ
f, _ := parser.funcDef(placeholderFunction)
if !f.Return.Equal(rt.expected) {
t.Errorf("'%s' expected, '%s' found.\n", rt.expected, f.Return)
}
}
}
func testBinaryOp(t *testing.T) {
t.Helper()
left := test.Int("1")
right := test.Int("2")
cases := []struct {
op string
ret string
}{
{"+", lang.Int},
{"-", lang.Int},
{"*", lang.Int},
{"<", lang.Bool},
{"<=", lang.Bool},
{">=", lang.Bool},
{">", lang.Bool},
{"==", lang.Bool},
}
parser := fileParser{parser: &parser{}}
for _, c := range cases |
}
func unaryOp(t *testing.T) {
t.Helper()
parent := lang.NewCode(nil)
parser := fileParser{parser: &parser{}}
for _, n := range []node.Node{
test.Plus(test.String(`"test"`)),
test.Plus(test.String(`""`)),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Str); !ok {
t.Error("lang.Str expected.")
}
if typ := e.Type(); !typ.Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Int("0")),
test.Plus(test.Int("2")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Number); !ok {
t.Error("lang.Number expected.")
}
if typ := e.Type(); !typ.Equal(lang.Int) {
t.Errorf("'int' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Float("0")),
test.Plus(test.Float("1.0")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Float); !ok {
t.Error("lang.Float expected.")
}
if typ := e.Type(); !typ.Equal(lang.Float64) {
t.Errorf("'float' expected, '%s' found.", typ)
}
}
for _, c := range []struct {
n node.Node
t string
}{
{test.Minus(test.String(`"test"`)), lang.String},
{test.Minus(test.String(`""`)), lang.String},
{test.Minus(test.Int("0")), lang.Int},
{test.Minus(test.Int("2")), lang.Int},
{test.Minus(test.Float("0")), lang.Float64},
{test.Minus(test.Float("1.0")), lang.Float64},
} {
e := parser.expression(parent, c.n)
u, ok := e.(*lang.UnaryMinus)
if !ok {
t.Fatal("lang.UnaryMinus expected.")
}
if u.Parent() != parent {
t.Error("Parent not set.")
}
if u.Expr.Parent() != u {
t.Error("Parent not set.")
}
if typ := u.Type(); !typ.Equal(c.t) {
t.Errorf("'%s' expected, '%s' found.", c.t, typ)
}
}
}
func testStatements(t *testing.T) {
t.Helper()
gc := lang.NewGlobalContext()
funcs := NewFunc(gc)
parser := fileParser{
parser: &parser{
gc: gc,
funcs: funcs,
},
}
parser.file = lang.NewFile(gc, "dummy", false, true)
parser.funcs = &FileFunc{funcs, parser.file}
b := lang.NewCode(nil)
html := test.HTML("<html></html>")
parser.createFunction(b, []node.Node{html})
if len(b.Statements) != 1 {
t.Fatal("Wrong amount of statements in the block.")
}
h, ok := b.Statements[0].(*lang.FunctionCall)
if !ok {
t.Fatal("That one statement should be function call.")
}
if h.Parent() != b {
t.Error("Parent not set.")
}
if !h.Return.Equal(lang.Void) {
t.Errorf("'void' expected, '%s' found.", h.Return)
}
if h.Name != "fmt.Print" {
t.Errorf("'fmt.Print' expected, '%s' found.", h.Name)
}
if len(h.Args) != 1 {
t.Fatal("'fmt.Print' should have only one argument.")
}
a, ok := h.Args[0].(*lang.Str)
if !ok {
t.Fatal("That one argument should be string.")
}
if a.Parent() != h {
t.Error("Parent not set.")
}
if a.Value != "`<html></html>`" {
t.Errorf("'`<html></html>`' expected, '%s' found", a.Value)
}
if !a.Type().Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", a.Type())
}
}
func testMain(tt *testing.T) {
tt.Helper()
tests := []struct {
source []byte
expected string
}{
// Sandbox
{
source: []byte(`<?php
function fc() {
$a = 1 + 2;
}
`),
expected: `func fc() {
a := 1 + 2
}`,
},
// examples/04.php
{
source: []byte(`<?php
function fc() {
$a = 2 + 3 + 4 * 2;
echo $a * $ | {
expr := parser.expression(nil, test.BinaryOp(left, c.op, right))
op, ok := expr.(*lang.BinaryOp)
if !ok {
t.Fatal("Expected binary operation, something else found.")
}
if op.Operation != c.op {
t.Errorf("'%s' expected, '%s' found.", c.op, op.Operation)
}
if !op.Type().Equal(c.ret) {
t.Errorf("'%s' expected, '%s' found.", c.ret, op.Type())
}
} | conditional_block |
p_test.go | 1"},
{test.Variable("function"), "function"},
}
for _, v := range variables {
if name := parser.identifierName(v.source); name != v.expected {
t.Errorf("'%s' expected, '%s' found.\n", v.expected, name)
}
}
nop := test.Nop()
if l := nodeList(nop); l[0] != nop {
t.Error("Nothing should happen to passed node.")
}
if l := nodeList(nil); len(l) != 0 {
t.Error("Nil cannot create non-empty statement list.")
}
list := test.List([]node.Node{nop})
if l := nodeList(list); l[0] != nop {
t.Error("Nothing should happen to the nodes passed in the node list.")
}
}
func functionDef(t *testing.T) {
t.Helper()
parser := fileParser{
parser: &parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
},
}
// This tests which name and return type will
// be used. lang.NewFunc(string) is tested
// elsewhere.
f, _ := parser.funcDef(nil)
if f != nil {
t.Error("From nil nothing can be created.")
}
funcDefs := []struct {
f *stmt.Function
name string
ret string
}{
{test.Func("f"), "f", lang.Void},
{test.Func("function"), "function", lang.Void},
{test.Func("func"), "func1", lang.Void},
}
for _, f := range funcDefs {
def, _ := parser.funcDef(f.f)
if def.Name != f.name {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Name)
}
if !def.Return.Equal(f.ret) {
t.Errorf("'%s' expected, '%s' found.\n", f.name, def.Return)
}
}
// f = mainDef(parser.file, false)
// if f.Name != "main" {
// t.Errorf("'%s' expected, '%s' found.\n", "main", f.Name)
// }
// if !f.Return.Equal(lang.Void) {
// t.Errorf("'%s' expected, '%s' found.\n", lang.Void, f.Return)
// }
// It used to be empty string, but because
// funcDef translates the function name,
// it had to be changed to something
// meaningful.
placeholderFunction := test.Func("placeholderFunction")
returnTypes := []struct {
typ *name.Name
expected string
}{
{test.Name("void"), lang.Void},
{test.Name("int"), lang.Int},
{test.Name("string"), lang.String},
}
for _, rt := range returnTypes {
placeholderFunction.ReturnType = rt.typ
f, _ := parser.funcDef(placeholderFunction)
if !f.Return.Equal(rt.expected) {
t.Errorf("'%s' expected, '%s' found.\n", rt.expected, f.Return)
}
}
}
func testBinaryOp(t *testing.T) {
t.Helper()
left := test.Int("1")
right := test.Int("2")
cases := []struct {
op string
ret string
}{
{"+", lang.Int},
{"-", lang.Int},
{"*", lang.Int},
{"<", lang.Bool},
{"<=", lang.Bool},
{">=", lang.Bool},
{">", lang.Bool},
{"==", lang.Bool},
}
parser := fileParser{parser: &parser{}}
for _, c := range cases {
expr := parser.expression(nil, test.BinaryOp(left, c.op, right))
op, ok := expr.(*lang.BinaryOp)
if !ok {
t.Fatal("Expected binary operation, something else found.")
}
if op.Operation != c.op {
t.Errorf("'%s' expected, '%s' found.", c.op, op.Operation)
}
if !op.Type().Equal(c.ret) {
t.Errorf("'%s' expected, '%s' found.", c.ret, op.Type())
}
}
}
func unaryOp(t *testing.T) {
t.Helper()
parent := lang.NewCode(nil)
parser := fileParser{parser: &parser{}}
for _, n := range []node.Node{
test.Plus(test.String(`"test"`)),
test.Plus(test.String(`""`)),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Str); !ok {
t.Error("lang.Str expected.")
}
if typ := e.Type(); !typ.Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Int("0")),
test.Plus(test.Int("2")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Number); !ok {
t.Error("lang.Number expected.")
}
if typ := e.Type(); !typ.Equal(lang.Int) {
t.Errorf("'int' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Float("0")),
test.Plus(test.Float("1.0")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Float); !ok {
t.Error("lang.Float expected.")
}
if typ := e.Type(); !typ.Equal(lang.Float64) {
t.Errorf("'float' expected, '%s' found.", typ)
}
}
for _, c := range []struct {
n node.Node
t string
}{
{test.Minus(test.String(`"test"`)), lang.String},
{test.Minus(test.String(`""`)), lang.String},
{test.Minus(test.Int("0")), lang.Int},
{test.Minus(test.Int("2")), lang.Int},
{test.Minus(test.Float("0")), lang.Float64},
{test.Minus(test.Float("1.0")), lang.Float64},
} {
e := parser.expression(parent, c.n)
u, ok := e.(*lang.UnaryMinus)
if !ok {
t.Fatal("lang.UnaryMinus expected.")
}
if u.Parent() != parent {
t.Error("Parent not set.")
}
if u.Expr.Parent() != u {
t.Error("Parent not set.")
}
if typ := u.Type(); !typ.Equal(c.t) {
t.Errorf("'%s' expected, '%s' found.", c.t, typ)
}
}
}
func testStatements(t *testing.T) {
t.Helper()
gc := lang.NewGlobalContext()
funcs := NewFunc(gc)
parser := fileParser{
parser: &parser{
gc: gc,
funcs: funcs,
},
}
parser.file = lang.NewFile(gc, "dummy", false, true)
parser.funcs = &FileFunc{funcs, parser.file}
b := lang.NewCode(nil)
html := test.HTML("<html></html>")
parser.createFunction(b, []node.Node{html})
if len(b.Statements) != 1 {
t.Fatal("Wrong amount of statements in the block.")
}
h, ok := b.Statements[0].(*lang.FunctionCall)
if !ok {
t.Fatal("That one statement should be function call.")
}
if h.Parent() != b {
t.Error("Parent not set.")
}
if !h.Return.Equal(lang.Void) {
t.Errorf("'void' expected, '%s' found.", h.Return)
}
if h.Name != "fmt.Print" {
t.Errorf("'fmt.Print' expected, '%s' found.", h.Name)
}
if len(h.Args) != 1 {
t.Fatal("'fmt.Print' should have only one argument.")
}
a, ok := h.Args[0].(*lang.Str)
if !ok {
t.Fatal("That one argument should be string.")
}
if a.Parent() != h {
t.Error("Parent not set.")
}
if a.Value != "`<html></html>`" {
t.Errorf("'`<html></html>`' expected, '%s' found", a.Value)
}
if !a.Type().Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", a.Type())
}
}
func | (tt *testing.T) {
tt.Helper()
tests := []struct {
source []byte
expected string
}{
// Sandbox
{
source: []byte(`<?php
function fc() {
$a = 1 + 2;
}
`),
expected: `func fc() {
a := 1 + 2
}`,
},
// examples/04.php
{
source: []byte(`<?php
function fc() {
$a = 2 + 3 + 4 * 2;
echo $a * $ | testMain | identifier_name |
p_test.go | func testBinaryOp(t *testing.T) {
t.Helper()
left := test.Int("1")
right := test.Int("2")
cases := []struct {
op string
ret string
}{
{"+", lang.Int},
{"-", lang.Int},
{"*", lang.Int},
{"<", lang.Bool},
{"<=", lang.Bool},
{">=", lang.Bool},
{">", lang.Bool},
{"==", lang.Bool},
}
parser := fileParser{parser: &parser{}}
for _, c := range cases {
expr := parser.expression(nil, test.BinaryOp(left, c.op, right))
op, ok := expr.(*lang.BinaryOp)
if !ok {
t.Fatal("Expected binary operation, something else found.")
}
if op.Operation != c.op {
t.Errorf("'%s' expected, '%s' found.", c.op, op.Operation)
}
if !op.Type().Equal(c.ret) {
t.Errorf("'%s' expected, '%s' found.", c.ret, op.Type())
}
}
}
func unaryOp(t *testing.T) {
t.Helper()
parent := lang.NewCode(nil)
parser := fileParser{parser: &parser{}}
for _, n := range []node.Node{
test.Plus(test.String(`"test"`)),
test.Plus(test.String(`""`)),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Str); !ok {
t.Error("lang.Str expected.")
}
if typ := e.Type(); !typ.Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Int("0")),
test.Plus(test.Int("2")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Number); !ok {
t.Error("lang.Number expected.")
}
if typ := e.Type(); !typ.Equal(lang.Int) {
t.Errorf("'int' expected, '%s' found.", typ)
}
}
for _, n := range []node.Node{
test.Plus(test.Float("0")),
test.Plus(test.Float("1.0")),
} {
e := parser.expression(parent, n)
if e.Parent() != parent {
t.Error("Parent not set.")
}
if _, ok := e.(*lang.Float); !ok {
t.Error("lang.Float expected.")
}
if typ := e.Type(); !typ.Equal(lang.Float64) {
t.Errorf("'float' expected, '%s' found.", typ)
}
}
for _, c := range []struct {
n node.Node
t string
}{
{test.Minus(test.String(`"test"`)), lang.String},
{test.Minus(test.String(`""`)), lang.String},
{test.Minus(test.Int("0")), lang.Int},
{test.Minus(test.Int("2")), lang.Int},
{test.Minus(test.Float("0")), lang.Float64},
{test.Minus(test.Float("1.0")), lang.Float64},
} {
e := parser.expression(parent, c.n)
u, ok := e.(*lang.UnaryMinus)
if !ok {
t.Fatal("lang.UnaryMinus expected.")
}
if u.Parent() != parent {
t.Error("Parent not set.")
}
if u.Expr.Parent() != u {
t.Error("Parent not set.")
}
if typ := u.Type(); !typ.Equal(c.t) {
t.Errorf("'%s' expected, '%s' found.", c.t, typ)
}
}
}
func testStatements(t *testing.T) {
t.Helper()
gc := lang.NewGlobalContext()
funcs := NewFunc(gc)
parser := fileParser{
parser: &parser{
gc: gc,
funcs: funcs,
},
}
parser.file = lang.NewFile(gc, "dummy", false, true)
parser.funcs = &FileFunc{funcs, parser.file}
b := lang.NewCode(nil)
html := test.HTML("<html></html>")
parser.createFunction(b, []node.Node{html})
if len(b.Statements) != 1 {
t.Fatal("Wrong amount of statements in the block.")
}
h, ok := b.Statements[0].(*lang.FunctionCall)
if !ok {
t.Fatal("That one statement should be function call.")
}
if h.Parent() != b {
t.Error("Parent not set.")
}
if !h.Return.Equal(lang.Void) {
t.Errorf("'void' expected, '%s' found.", h.Return)
}
if h.Name != "fmt.Print" {
t.Errorf("'fmt.Print' expected, '%s' found.", h.Name)
}
if len(h.Args) != 1 {
t.Fatal("'fmt.Print' should have only one argument.")
}
a, ok := h.Args[0].(*lang.Str)
if !ok {
t.Fatal("That one argument should be string.")
}
if a.Parent() != h {
t.Error("Parent not set.")
}
if a.Value != "`<html></html>`" {
t.Errorf("'`<html></html>`' expected, '%s' found", a.Value)
}
if !a.Type().Equal(lang.String) {
t.Errorf("'string' expected, '%s' found.", a.Type())
}
}
func testMain(tt *testing.T) {
tt.Helper()
tests := []struct {
source []byte
expected string
}{
// Sandbox
{
source: []byte(`<?php
function fc() {
$a = 1 + 2;
}
`),
expected: `func fc() {
a := 1 + 2
}`,
},
// examples/04.php
{
source: []byte(`<?php
function fc() {
$a = 2 + 3 + 4 * 2;
echo $a * $a;
}
`),
expected: `func fc() {
a := 2 + 3 + 4 * 2
fmt.Print(a * a)
}`,
},
// examples/05.php
{
source: []byte(`<?php
function fc() {
{
{
$a = "0";
// Added to compile it in Go. This var is not used.
echo $a;
}
$a = 1;
echo $a;
}
}
`),
expected: `func fc() {
{
{
a := "0"
fmt.Print(a)
}
a := 1
fmt.Print(a)
}
}`,
},
// examples/06.php
{
source: []byte(`<?php
function fc() {
{
$a = 0;
}
$a++;
echo $a;
}
`),
expected: `func fc() {
var a int
{
a = 0
}
a++
fmt.Print(a)
}`,
},
// examples/07.php
{
source: []byte(`<?php
function fc() {
$a = 0;
{
$a = "1";
echo $a;
}
echo $a;
$a = 2;
echo $a;
}
`),
expected: `func fc() {
var a interface{}
a = 0
{
a = "1"
fmt.Print(a.(string))
}
fmt.Print(a.(string))
a = 2
fmt.Print(a.(int))
}`,
},
}
for _, t := range tests {
parser := parser{
translator: NewNameTranslator(),
functionTranslator: NewFunctionTranslator(),
}
out := parser.Run(parsePHP(t.source), "dummy", false)
for _, f := range out.Files {
if f.Name == "fc" {
main := f.String()
compare(tt, t.expected, main)
}
}
}
}
func parsePHP(source []byte) *node.Root {
parser := php7.NewParser(source, "")
parser.Parse()
return parser.GetRootNode().(*node.Root)
}
func compare(t *testing.T, ref, out string) | {
r := strings.Split(ref, "\n")
o := strings.Split(out, "\n")
i, j := 0, 0
for i < len(r) && j < len(o) {
c := true
s1 := strings.TrimLeft(r[i], "\t")
if s1 == "" {
i++
c = false
}
s2 := strings.TrimLeft(o[j], "\t")
if s2 == "" {
j++
c = false
}
if !c {
continue
}
if s1 != s2 { | identifier_body | |
repo_polling.go | )
continue
}
for _, p := range proj {
if RunningPollers.Workers[p.Key] == nil {
w := NewWorker(p.Key)
RunningPollers.mutex.Lock()
RunningPollers.Workers[p.Key] = w
RunningPollers.mutex.Unlock()
var pollerhasStop = func() {
RunningPollers.mutex.Lock()
delete(RunningPollers.Workers, w.ProjectKey)
RunningPollers.mutex.Unlock()
}
ok, quit, err := w.Poll()
if err != nil {
log.Warning("Polling> Unable to lauch worker %s: %s", p.Key, err)
continue
}
if !ok {
pollerhasStop()
}
go func() {
<-quit
pollerhasStop()
}()
}
}
time.Sleep(1 * time.Minute)
}
}
//Poll initiate a poller
func (w *Worker) Poll() (bool, chan bool, error) {
//Check database connection
db := database.DB()
if db == nil {
return false, nil, errors.New("Database is unavailable")
}
pollers, err := poller.LoadEnabledPollers(db) |
var quit chan bool
var atLeastOne bool
for i := range pollers {
p := &pollers[i]
b, _ := repositoriesmanager.CheckApplicationIsAttached(db, p.Name, w.ProjectKey, p.Application.Name)
if !b || p.Application.RepositoriesManager == nil || p.Application.RepositoryFullname == "" {
continue
}
if !p.Application.RepositoriesManager.PollingSupported {
log.Info("Polling is not supported by %s\n", p.Name)
continue
}
log.Info("Starting poller on %s %s %s", p.Name, p.Application.Name, p.Pipeline.Name)
atLeastOne = true
quit = make(chan bool)
go w.poll(p.Application.RepositoriesManager, p.Application.ID, p.Pipeline.ID, quit)
time.Sleep(2 * time.Minute)
}
if !atLeastOne {
return false, nil, nil
}
return true, quit, nil
}
func (w *Worker) poll(rm *sdk.RepositoriesManager, appID, pipID int64, quit chan bool) {
delay := time.Duration(60.0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var mayIWork string
log.Debug("Polling> Start on appID=%d, pipID=%d\n", appID, pipID)
for RunningPollers.Workers[w.ProjectKey] != nil {
//Check database connection
db := database.DB()
if db == nil {
time.Sleep(60 * time.Second)
continue
}
//Loading poller from database
p, err := poller.LoadPollerByApplicationAndPipeline(db, appID, pipID)
if err != nil {
log.Warning("Polling> Unable to load poller appID=%d pipID=%d: %s", appID, pipID, err)
break
}
//Check if poller is still enabled
if !p.Enabled {
log.Warning("Polling> Poller %s is disabled %s", p.Application.RepositoryFullname, err)
break
}
k := cache.Key("reposmanager", "polling", w.ProjectKey, p.Application.Name, p.Pipeline.Name, p.Name)
//Get fro mcache to know if someone is polling the repo
cache.Get(k, &mayIWork)
//If nobody is polling it
if mayIWork == "" {
log.Info("Polling> Polling repository %s for %s/%s\n", p.Application.RepositoryFullname, w.ProjectKey, p.Application.Name)
cache.SetWithTTL(k, "true", 300)
e := &WorkerExecution{
Status: "Running",
Execution: time.Now(),
}
if err := insertExecution(db, &p.Application, &p.Pipeline, e); err != nil {
log.Warning("Polling> Unable to save execution : %s", err)
}
//get the client for the repositories manager
client, err := repositoriesmanager.AuthorizedClient(db, w.ProjectKey, rm.Name)
if err != nil {
log.Warning("Polling> Unable to get client for %s %s : %s\n", w.ProjectKey, rm.Name, err)
break
}
var events []sdk.VCSPushEvent
events, delay, err = client.PushEvents(p.Application.RepositoryFullname, p.DateCreation)
s, err := triggerPipelines(db, w.ProjectKey, rm, p, events)
if err != nil {
log.Warning("Polling> Unable to trigger pipeline %s for repository %s\n", p.Pipeline.Name, p.Application.RepositoryFullname)
break
}
e.Status = fmt.Sprintf(s)
e.Events = events
if err := updateExecution(db, e); err != nil {
log.Warning("Polling> Unable to update execution : %s", err)
}
//Wait for the delay
time.Sleep(delay * time.Second)
cache.Delete(k)
}
//Wait for sometime between 0 and 10 seconds
time.Sleep(time.Duration(r.Float64()*10) * time.Second)
}
log.Debug("Polling> End\n")
quit <- true
}
func triggerPipelines(db *sql.DB, projectKey string, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, events []sdk.VCSPushEvent) (string, error) {
status := ""
for _, event := range events {
projectData, err := project.LoadProjectByPipelineID(db, poller.Pipeline.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project for pipeline %s: %s\n", poller.Pipeline.Name, err)
return "Error", err
}
projectsVar, err := project.GetAllVariableInProject(db, projectData.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project variable: %s\n", err)
return "Error", err
}
projectData.Variable = projectsVar
//begin a tx
tx, err := db.Begin()
if err != nil {
return "Error", err
}
ok, err := TriggerPipeline(tx, rm, poller, event, projectData)
if err != nil {
log.Warning("Polling.triggerPipelines> cannot trigger pipeline %d: %s\n", poller.Pipeline.ID, err)
tx.Rollback()
return "Error", err
}
// commit the tx
if err := tx.Commit(); err != nil {
log.Critical("Polling.triggerPipelines> Cannot commit tx; %s\n", err)
return "Error", err
}
if ok {
log.Debug("Polling.triggerPipelines> Triggered %s/%s/%s", projectKey, poller.Application.RepositoryFullname, event.Branch)
status = fmt.Sprintf("%s Pipeline %s triggered on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
} else {
log.Info("Polling.triggerPipelines> Did not trigger %s/%s/%s\n", projectKey, poller.Application.RepositoryFullname, event.Branch.ID)
status = fmt.Sprintf("%s Pipeline %s skipped on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
}
}
return status, nil
}
// TriggerPipeline linked to received hook
func TriggerPipeline(tx *sql.Tx, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, e sdk.VCSPushEvent, projectData *sdk.Project) (bool, error) {
client, err := repositoriesmanager.AuthorizedClient(tx, projectData.Key, rm.Name)
if err != nil {
return false, err
}
// Create pipeline args
var args []sdk.Parameter
args = append(args, sdk.Parameter{
Name: "git.branch",
Value: e.Branch.ID,
})
args = append(args, sdk.Parameter{
Name: "git.hash",
Value: e.Commit.Hash,
})
args = append(args, sdk.Parameter{
Name: "git.author",
Value: e.Commit.Author.Name,
})
args = append(args, sdk.Parameter{
Name: "git.repository",
Value: poller.Application.RepositoryFullname,
})
args = append(args, sdk.Parameter{
Name: "git.project",
Value: strings.Split(poller.Application.RepositoryFullname, "/")[0],
})
repo, _ := client.RepoByFullname(poller.Application.RepositoryFullname)
if repo.SSHCloneURL != "" {
args = append(args, sdk.Parameter{
Name: "git.url",
Value: repo.SSHCloneURL,
})
}
// Load pipeline Argument
parameters, err := pipeline.GetAllParametersInPipeline(tx, poller.Pipeline.ID)
if err != nil {
return false, err
}
poller.Pipeline.Parameter = parameters
applicationPipelineArgs, err := application.GetAll | if err != nil {
return false, nil, err
} | random_line_split |
repo_polling.go | .Name, p.Pipeline.Name, p.Name)
//Get fro mcache to know if someone is polling the repo
cache.Get(k, &mayIWork)
//If nobody is polling it
if mayIWork == "" {
log.Info("Polling> Polling repository %s for %s/%s\n", p.Application.RepositoryFullname, w.ProjectKey, p.Application.Name)
cache.SetWithTTL(k, "true", 300)
e := &WorkerExecution{
Status: "Running",
Execution: time.Now(),
}
if err := insertExecution(db, &p.Application, &p.Pipeline, e); err != nil {
log.Warning("Polling> Unable to save execution : %s", err)
}
//get the client for the repositories manager
client, err := repositoriesmanager.AuthorizedClient(db, w.ProjectKey, rm.Name)
if err != nil {
log.Warning("Polling> Unable to get client for %s %s : %s\n", w.ProjectKey, rm.Name, err)
break
}
var events []sdk.VCSPushEvent
events, delay, err = client.PushEvents(p.Application.RepositoryFullname, p.DateCreation)
s, err := triggerPipelines(db, w.ProjectKey, rm, p, events)
if err != nil {
log.Warning("Polling> Unable to trigger pipeline %s for repository %s\n", p.Pipeline.Name, p.Application.RepositoryFullname)
break
}
e.Status = fmt.Sprintf(s)
e.Events = events
if err := updateExecution(db, e); err != nil {
log.Warning("Polling> Unable to update execution : %s", err)
}
//Wait for the delay
time.Sleep(delay * time.Second)
cache.Delete(k)
}
//Wait for sometime between 0 and 10 seconds
time.Sleep(time.Duration(r.Float64()*10) * time.Second)
}
log.Debug("Polling> End\n")
quit <- true
}
func triggerPipelines(db *sql.DB, projectKey string, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, events []sdk.VCSPushEvent) (string, error) {
status := ""
for _, event := range events {
projectData, err := project.LoadProjectByPipelineID(db, poller.Pipeline.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project for pipeline %s: %s\n", poller.Pipeline.Name, err)
return "Error", err
}
projectsVar, err := project.GetAllVariableInProject(db, projectData.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project variable: %s\n", err)
return "Error", err
}
projectData.Variable = projectsVar
//begin a tx
tx, err := db.Begin()
if err != nil {
return "Error", err
}
ok, err := TriggerPipeline(tx, rm, poller, event, projectData)
if err != nil {
log.Warning("Polling.triggerPipelines> cannot trigger pipeline %d: %s\n", poller.Pipeline.ID, err)
tx.Rollback()
return "Error", err
}
// commit the tx
if err := tx.Commit(); err != nil {
log.Critical("Polling.triggerPipelines> Cannot commit tx; %s\n", err)
return "Error", err
}
if ok {
log.Debug("Polling.triggerPipelines> Triggered %s/%s/%s", projectKey, poller.Application.RepositoryFullname, event.Branch)
status = fmt.Sprintf("%s Pipeline %s triggered on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
} else {
log.Info("Polling.triggerPipelines> Did not trigger %s/%s/%s\n", projectKey, poller.Application.RepositoryFullname, event.Branch.ID)
status = fmt.Sprintf("%s Pipeline %s skipped on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
}
}
return status, nil
}
// TriggerPipeline linked to received hook
func TriggerPipeline(tx *sql.Tx, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, e sdk.VCSPushEvent, projectData *sdk.Project) (bool, error) {
client, err := repositoriesmanager.AuthorizedClient(tx, projectData.Key, rm.Name)
if err != nil {
return false, err
}
// Create pipeline args
var args []sdk.Parameter
args = append(args, sdk.Parameter{
Name: "git.branch",
Value: e.Branch.ID,
})
args = append(args, sdk.Parameter{
Name: "git.hash",
Value: e.Commit.Hash,
})
args = append(args, sdk.Parameter{
Name: "git.author",
Value: e.Commit.Author.Name,
})
args = append(args, sdk.Parameter{
Name: "git.repository",
Value: poller.Application.RepositoryFullname,
})
args = append(args, sdk.Parameter{
Name: "git.project",
Value: strings.Split(poller.Application.RepositoryFullname, "/")[0],
})
repo, _ := client.RepoByFullname(poller.Application.RepositoryFullname)
if repo.SSHCloneURL != "" {
args = append(args, sdk.Parameter{
Name: "git.url",
Value: repo.SSHCloneURL,
})
}
// Load pipeline Argument
parameters, err := pipeline.GetAllParametersInPipeline(tx, poller.Pipeline.ID)
if err != nil {
return false, err
}
poller.Pipeline.Parameter = parameters
applicationPipelineArgs, err := application.GetAllPipelineParam(tx, poller.Application.ID, poller.Pipeline.ID)
if err != nil {
return false, err
}
trigger := sdk.PipelineBuildTrigger{
ManualTrigger: false,
VCSChangesBranch: e.Branch.ID,
VCSChangesHash: e.Commit.Hash,
VCSChangesAuthor: e.Commit.Author.DisplayName,
}
// Get commit message to check if we have to skip the build
match, err := regexp.Match(".*\\[ci skip\\].*|.*\\[cd skip\\].*", []byte(e.Commit.Message))
if err != nil {
log.Warning("polling> Cannot check %s/%s for commit %s by %s : %s (%s)\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor, e.Commit.Message, err)
}
if match {
log.Debug("polling> Skipping build of %s/%s for commit %s by %s\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor)
return false, nil
}
if b, err := pipeline.BuildExists(tx, poller.Application.ID, poller.Pipeline.ID, sdk.DefaultEnv.ID, &trigger); err != nil || b {
if err != nil {
log.Warning("Polling> Error checking existing build : %s", err)
}
return false, nil
}
_, err = pipeline.InsertPipelineBuild(tx, projectData, &poller.Pipeline, &poller.Application, applicationPipelineArgs, args, &sdk.DefaultEnv, 0, trigger)
if err != nil {
return false, err
}
return true, nil
}
func insertExecution(db database.QueryExecuter, app *sdk.Application, pip *sdk.Pipeline, e *WorkerExecution) error {
query := `
insert into poller_execution (application_id, pipeline_id, execution_date, status, data)
values($1, $2, $3, $4, $5)
returning id
`
data, _ := json.Marshal(e.Events)
if err := db.QueryRow(query, app.ID, pip.ID, e.Execution, e.Status, data).Scan(&e.ID); err != nil {
return err
}
return nil
}
func updateExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
update poller_execution set status = $2, data = $3 where id = $1
`
data, _ := json.Marshal(e.Events)
if _, err := db.Exec(query, e.ID, e.Status, data); err != nil {
return err
}
return nil
}
func deleteExecution(db database.QueryExecuter, e *WorkerExecution) error {
query := `
delete from poller_execution where id = $1
`
if _, err := db.Exec(query, e.ID); err != nil {
return err
}
return nil
}
//ExecutionCleaner is globale goroutine to remove all old polling traces
func ExecutionCleaner() {
for | {
db := database.DB()
if db == nil {
time.Sleep(30 * time.Minute)
continue
}
execs, _ := LoadExecutions(db)
for i := range execs {
tenDaysAGo := time.Now().Add(-10 * 24 * time.Hour)
if execs[i].Execution.Before(tenDaysAGo) {
deleteExecution(db, &execs[i])
}
}
time.Sleep(1 * time.Hour)
} | conditional_block | |
repo_polling.go | ID, pipID)
for RunningPollers.Workers[w.ProjectKey] != nil {
//Check database connection
db := database.DB()
if db == nil {
time.Sleep(60 * time.Second)
continue
}
//Loading poller from database
p, err := poller.LoadPollerByApplicationAndPipeline(db, appID, pipID)
if err != nil {
log.Warning("Polling> Unable to load poller appID=%d pipID=%d: %s", appID, pipID, err)
break
}
//Check if poller is still enabled
if !p.Enabled {
log.Warning("Polling> Poller %s is disabled %s", p.Application.RepositoryFullname, err)
break
}
k := cache.Key("reposmanager", "polling", w.ProjectKey, p.Application.Name, p.Pipeline.Name, p.Name)
//Get fro mcache to know if someone is polling the repo
cache.Get(k, &mayIWork)
//If nobody is polling it
if mayIWork == "" {
log.Info("Polling> Polling repository %s for %s/%s\n", p.Application.RepositoryFullname, w.ProjectKey, p.Application.Name)
cache.SetWithTTL(k, "true", 300)
e := &WorkerExecution{
Status: "Running",
Execution: time.Now(),
}
if err := insertExecution(db, &p.Application, &p.Pipeline, e); err != nil {
log.Warning("Polling> Unable to save execution : %s", err)
}
//get the client for the repositories manager
client, err := repositoriesmanager.AuthorizedClient(db, w.ProjectKey, rm.Name)
if err != nil {
log.Warning("Polling> Unable to get client for %s %s : %s\n", w.ProjectKey, rm.Name, err)
break
}
var events []sdk.VCSPushEvent
events, delay, err = client.PushEvents(p.Application.RepositoryFullname, p.DateCreation)
s, err := triggerPipelines(db, w.ProjectKey, rm, p, events)
if err != nil {
log.Warning("Polling> Unable to trigger pipeline %s for repository %s\n", p.Pipeline.Name, p.Application.RepositoryFullname)
break
}
e.Status = fmt.Sprintf(s)
e.Events = events
if err := updateExecution(db, e); err != nil {
log.Warning("Polling> Unable to update execution : %s", err)
}
//Wait for the delay
time.Sleep(delay * time.Second)
cache.Delete(k)
}
//Wait for sometime between 0 and 10 seconds
time.Sleep(time.Duration(r.Float64()*10) * time.Second)
}
log.Debug("Polling> End\n")
quit <- true
}
func triggerPipelines(db *sql.DB, projectKey string, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, events []sdk.VCSPushEvent) (string, error) {
status := ""
for _, event := range events {
projectData, err := project.LoadProjectByPipelineID(db, poller.Pipeline.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project for pipeline %s: %s\n", poller.Pipeline.Name, err)
return "Error", err
}
projectsVar, err := project.GetAllVariableInProject(db, projectData.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project variable: %s\n", err)
return "Error", err
}
projectData.Variable = projectsVar
//begin a tx
tx, err := db.Begin()
if err != nil {
return "Error", err
}
ok, err := TriggerPipeline(tx, rm, poller, event, projectData)
if err != nil {
log.Warning("Polling.triggerPipelines> cannot trigger pipeline %d: %s\n", poller.Pipeline.ID, err)
tx.Rollback()
return "Error", err
}
// commit the tx
if err := tx.Commit(); err != nil {
log.Critical("Polling.triggerPipelines> Cannot commit tx; %s\n", err)
return "Error", err
}
if ok {
log.Debug("Polling.triggerPipelines> Triggered %s/%s/%s", projectKey, poller.Application.RepositoryFullname, event.Branch)
status = fmt.Sprintf("%s Pipeline %s triggered on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
} else {
log.Info("Polling.triggerPipelines> Did not trigger %s/%s/%s\n", projectKey, poller.Application.RepositoryFullname, event.Branch.ID)
status = fmt.Sprintf("%s Pipeline %s skipped on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
}
}
return status, nil
}
// TriggerPipeline linked to received hook
func TriggerPipeline(tx *sql.Tx, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, e sdk.VCSPushEvent, projectData *sdk.Project) (bool, error) {
client, err := repositoriesmanager.AuthorizedClient(tx, projectData.Key, rm.Name)
if err != nil {
return false, err
}
// Create pipeline args
var args []sdk.Parameter
args = append(args, sdk.Parameter{
Name: "git.branch",
Value: e.Branch.ID,
})
args = append(args, sdk.Parameter{
Name: "git.hash",
Value: e.Commit.Hash,
})
args = append(args, sdk.Parameter{
Name: "git.author",
Value: e.Commit.Author.Name,
})
args = append(args, sdk.Parameter{
Name: "git.repository",
Value: poller.Application.RepositoryFullname,
})
args = append(args, sdk.Parameter{
Name: "git.project",
Value: strings.Split(poller.Application.RepositoryFullname, "/")[0],
})
repo, _ := client.RepoByFullname(poller.Application.RepositoryFullname)
if repo.SSHCloneURL != "" {
args = append(args, sdk.Parameter{
Name: "git.url",
Value: repo.SSHCloneURL,
})
}
// Load pipeline Argument
parameters, err := pipeline.GetAllParametersInPipeline(tx, poller.Pipeline.ID)
if err != nil {
return false, err
}
poller.Pipeline.Parameter = parameters
applicationPipelineArgs, err := application.GetAllPipelineParam(tx, poller.Application.ID, poller.Pipeline.ID)
if err != nil {
return false, err
}
trigger := sdk.PipelineBuildTrigger{
ManualTrigger: false,
VCSChangesBranch: e.Branch.ID,
VCSChangesHash: e.Commit.Hash,
VCSChangesAuthor: e.Commit.Author.DisplayName,
}
// Get commit message to check if we have to skip the build
match, err := regexp.Match(".*\\[ci skip\\].*|.*\\[cd skip\\].*", []byte(e.Commit.Message))
if err != nil {
log.Warning("polling> Cannot check %s/%s for commit %s by %s : %s (%s)\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor, e.Commit.Message, err)
}
if match {
log.Debug("polling> Skipping build of %s/%s for commit %s by %s\n", projectData.Key, poller.Application.Name, trigger.VCSChangesHash, trigger.VCSChangesAuthor)
return false, nil
}
if b, err := pipeline.BuildExists(tx, poller.Application.ID, poller.Pipeline.ID, sdk.DefaultEnv.ID, &trigger); err != nil || b {
if err != nil {
log.Warning("Polling> Error checking existing build : %s", err)
}
return false, nil
}
_, err = pipeline.InsertPipelineBuild(tx, projectData, &poller.Pipeline, &poller.Application, applicationPipelineArgs, args, &sdk.DefaultEnv, 0, trigger)
if err != nil {
return false, err
}
return true, nil
}
func insertExecution(db database.QueryExecuter, app *sdk.Application, pip *sdk.Pipeline, e *WorkerExecution) error {
query := `
insert into poller_execution (application_id, pipeline_id, execution_date, status, data)
values($1, $2, $3, $4, $5)
returning id
`
data, _ := json.Marshal(e.Events)
if err := db.QueryRow(query, app.ID, pip.ID, e.Execution, e.Status, data).Scan(&e.ID); err != nil {
return err
}
return nil
}
func updateExecution(db database.QueryExecuter, e *WorkerExecution) error | {
query := `
update poller_execution set status = $2, data = $3 where id = $1
`
data, _ := json.Marshal(e.Events)
if _, err := db.Exec(query, e.ID, e.Status, data); err != nil {
return err
}
return nil
} | identifier_body | |
repo_polling.go | )
continue
}
for _, p := range proj {
if RunningPollers.Workers[p.Key] == nil {
w := NewWorker(p.Key)
RunningPollers.mutex.Lock()
RunningPollers.Workers[p.Key] = w
RunningPollers.mutex.Unlock()
var pollerhasStop = func() {
RunningPollers.mutex.Lock()
delete(RunningPollers.Workers, w.ProjectKey)
RunningPollers.mutex.Unlock()
}
ok, quit, err := w.Poll()
if err != nil {
log.Warning("Polling> Unable to lauch worker %s: %s", p.Key, err)
continue
}
if !ok {
pollerhasStop()
}
go func() {
<-quit
pollerhasStop()
}()
}
}
time.Sleep(1 * time.Minute)
}
}
//Poll initiate a poller
func (w *Worker) Poll() (bool, chan bool, error) {
//Check database connection
db := database.DB()
if db == nil {
return false, nil, errors.New("Database is unavailable")
}
pollers, err := poller.LoadEnabledPollers(db)
if err != nil {
return false, nil, err
}
var quit chan bool
var atLeastOne bool
for i := range pollers {
p := &pollers[i]
b, _ := repositoriesmanager.CheckApplicationIsAttached(db, p.Name, w.ProjectKey, p.Application.Name)
if !b || p.Application.RepositoriesManager == nil || p.Application.RepositoryFullname == "" {
continue
}
if !p.Application.RepositoriesManager.PollingSupported {
log.Info("Polling is not supported by %s\n", p.Name)
continue
}
log.Info("Starting poller on %s %s %s", p.Name, p.Application.Name, p.Pipeline.Name)
atLeastOne = true
quit = make(chan bool)
go w.poll(p.Application.RepositoriesManager, p.Application.ID, p.Pipeline.ID, quit)
time.Sleep(2 * time.Minute)
}
if !atLeastOne {
return false, nil, nil
}
return true, quit, nil
}
func (w *Worker) poll(rm *sdk.RepositoriesManager, appID, pipID int64, quit chan bool) {
delay := time.Duration(60.0)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var mayIWork string
log.Debug("Polling> Start on appID=%d, pipID=%d\n", appID, pipID)
for RunningPollers.Workers[w.ProjectKey] != nil {
//Check database connection
db := database.DB()
if db == nil {
time.Sleep(60 * time.Second)
continue
}
//Loading poller from database
p, err := poller.LoadPollerByApplicationAndPipeline(db, appID, pipID)
if err != nil {
log.Warning("Polling> Unable to load poller appID=%d pipID=%d: %s", appID, pipID, err)
break
}
//Check if poller is still enabled
if !p.Enabled {
log.Warning("Polling> Poller %s is disabled %s", p.Application.RepositoryFullname, err)
break
}
k := cache.Key("reposmanager", "polling", w.ProjectKey, p.Application.Name, p.Pipeline.Name, p.Name)
//Get fro mcache to know if someone is polling the repo
cache.Get(k, &mayIWork)
//If nobody is polling it
if mayIWork == "" {
log.Info("Polling> Polling repository %s for %s/%s\n", p.Application.RepositoryFullname, w.ProjectKey, p.Application.Name)
cache.SetWithTTL(k, "true", 300)
e := &WorkerExecution{
Status: "Running",
Execution: time.Now(),
}
if err := insertExecution(db, &p.Application, &p.Pipeline, e); err != nil {
log.Warning("Polling> Unable to save execution : %s", err)
}
//get the client for the repositories manager
client, err := repositoriesmanager.AuthorizedClient(db, w.ProjectKey, rm.Name)
if err != nil {
log.Warning("Polling> Unable to get client for %s %s : %s\n", w.ProjectKey, rm.Name, err)
break
}
var events []sdk.VCSPushEvent
events, delay, err = client.PushEvents(p.Application.RepositoryFullname, p.DateCreation)
s, err := triggerPipelines(db, w.ProjectKey, rm, p, events)
if err != nil {
log.Warning("Polling> Unable to trigger pipeline %s for repository %s\n", p.Pipeline.Name, p.Application.RepositoryFullname)
break
}
e.Status = fmt.Sprintf(s)
e.Events = events
if err := updateExecution(db, e); err != nil {
log.Warning("Polling> Unable to update execution : %s", err)
}
//Wait for the delay
time.Sleep(delay * time.Second)
cache.Delete(k)
}
//Wait for sometime between 0 and 10 seconds
time.Sleep(time.Duration(r.Float64()*10) * time.Second)
}
log.Debug("Polling> End\n")
quit <- true
}
func triggerPipelines(db *sql.DB, projectKey string, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, events []sdk.VCSPushEvent) (string, error) {
status := ""
for _, event := range events {
projectData, err := project.LoadProjectByPipelineID(db, poller.Pipeline.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project for pipeline %s: %s\n", poller.Pipeline.Name, err)
return "Error", err
}
projectsVar, err := project.GetAllVariableInProject(db, projectData.ID)
if err != nil {
log.Warning("Polling.triggerPipelines> Cannot load project variable: %s\n", err)
return "Error", err
}
projectData.Variable = projectsVar
//begin a tx
tx, err := db.Begin()
if err != nil {
return "Error", err
}
ok, err := TriggerPipeline(tx, rm, poller, event, projectData)
if err != nil {
log.Warning("Polling.triggerPipelines> cannot trigger pipeline %d: %s\n", poller.Pipeline.ID, err)
tx.Rollback()
return "Error", err
}
// commit the tx
if err := tx.Commit(); err != nil {
log.Critical("Polling.triggerPipelines> Cannot commit tx; %s\n", err)
return "Error", err
}
if ok {
log.Debug("Polling.triggerPipelines> Triggered %s/%s/%s", projectKey, poller.Application.RepositoryFullname, event.Branch)
status = fmt.Sprintf("%s Pipeline %s triggered on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
} else {
log.Info("Polling.triggerPipelines> Did not trigger %s/%s/%s\n", projectKey, poller.Application.RepositoryFullname, event.Branch.ID)
status = fmt.Sprintf("%s Pipeline %s skipped on %s (%s)", status, poller.Pipeline.Name, event.Branch.DisplayID, event.Commit.Hash)
}
}
return status, nil
}
// TriggerPipeline linked to received hook
func | (tx *sql.Tx, rm *sdk.RepositoriesManager, poller *sdk.RepositoryPoller, e sdk.VCSPushEvent, projectData *sdk.Project) (bool, error) {
client, err := repositoriesmanager.AuthorizedClient(tx, projectData.Key, rm.Name)
if err != nil {
return false, err
}
// Create pipeline args
var args []sdk.Parameter
args = append(args, sdk.Parameter{
Name: "git.branch",
Value: e.Branch.ID,
})
args = append(args, sdk.Parameter{
Name: "git.hash",
Value: e.Commit.Hash,
})
args = append(args, sdk.Parameter{
Name: "git.author",
Value: e.Commit.Author.Name,
})
args = append(args, sdk.Parameter{
Name: "git.repository",
Value: poller.Application.RepositoryFullname,
})
args = append(args, sdk.Parameter{
Name: "git.project",
Value: strings.Split(poller.Application.RepositoryFullname, "/")[0],
})
repo, _ := client.RepoByFullname(poller.Application.RepositoryFullname)
if repo.SSHCloneURL != "" {
args = append(args, sdk.Parameter{
Name: "git.url",
Value: repo.SSHCloneURL,
})
}
// Load pipeline Argument
parameters, err := pipeline.GetAllParametersInPipeline(tx, poller.Pipeline.ID)
if err != nil {
return false, err
}
poller.Pipeline.Parameter = parameters
applicationPipelineArgs, err := application.GetAll | TriggerPipeline | identifier_name |
keycap.js | this.selectedKey.topView.viewbox = "0 0 " + this.selectedKey.topView.body.width + " " + this.selectedKey.topView.body.height
this.updateSnapPoints()
},
updateText: function(text) {
this.surfaces[this.currentView].text.value = text
surface = document.querySelector('.restrictRect').getBoundingClientRect()
var newWidth = document.querySelector('.moveableText').style.width
var newHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface, newWidth, newHeight)
},
shadeBlend: function(p,c0,c1) {
var n=p<0?p*-1:p,u=Math.round,w=parseInt;
if(c0.length>7){
var f=c0.split(","),t=(c1?c1:p<0?"rgb(0,0,0)":"rgb(255,255,255)").split(","),R=w(f[0].slice(4)),G=w(f[1]),B=w(f[2]);
return "rgb("+(u((w(t[0].slice(4))-R)*n)+R)+","+(u((w(t[1])-G)*n)+G)+","+(u((w(t[2])-B)*n)+B)+")"
}else{
var f=w(c0.slice(1),16),t=w((c1?c1:p<0?"#000000":"#FFFFFF").slice(1),16),R1=f>>16,G1=f>>8&0x00FF,B1=f&0x0000FF;
return "#"+(0x1000000+(u(((t>>16)-R1)*n)+R1)*0x10000+(u(((t>>8&0x00FF)-G1)*n)+G1)*0x100+(u(((t&0x0000FF)-B1)*n)+B1)).toString(16).slice(1)
}
},
// ugly, must be a better way
generatePreviews: function() {
var svgns = "http://www.w3.org/2000/svg"
var allSides = []
for (var surface in this.surfaces) {
var self = this
var promise = new Promise(function(resolve, reject) {
var newSvg = document.createElementNS(svgns, "svg");
newSvg.setAttribute('width', 260)
newSvg.setAttribute('height', 210)
newSvg.setAttribute('preserveAspectRatio', 'xMidYMid meet')
newSvg.setAttribute('viewBox', self.selectedKey.sides.topView.viewbox)
if (surface == 'topView'){
var body = document.createElementNS(svgns, 'rect');
body.setAttribute('class','keySurface')
body.setAttribute('x', self.selectedKey.sides.topView.body.x)
body.setAttribute('y',self.selectedKey.sides.topView.body.y)
body.setAttribute('width',self.selectedKey.sides.topView.body.width)
body.setAttribute('height',self.selectedKey.sides.topView.body.height)
body.setAttribute('rx',self.selectedKey.sides.topView.body.rx)
body.setAttribute('ry',self.selectedKey.sides.topView.body.ry)
body.setAttribute('fill',self.shadeBlend(-0.2,self.selectedColor.color))
var face = document.createElementNS(svgns, 'rect');
face.setAttribute('class','restrictRect')
face.setAttribute('x', self.selectedKey.sides.topView.face.x)
face.setAttribute('y',self.selectedKey.sides.topView.face.y)
face.setAttribute('width',self.selectedKey.sides.topView.face.width)
face.setAttribute('height',self.selectedKey.sides.topView.face.height)
face.setAttribute('rx',self.selectedKey.sides.topView.face.rx)
face.setAttribute('ry',self.selectedKey.sides.topView.face.ry)
face.setAttribute('fill',self.selectedColor.color)
face.setAttribute('stroke',self.shadeBlend(0.065,self.selectedColor.color))
face.setAttribute('stroke-width','1px')
newSvg.appendChild(body)
newSvg.appendChild(face)
} else {
var path = document.createElementNS(svgns, 'path')
path.setAttribute('stroke-width','0.5px')
path.setAttribute('stroke',self.shadeBlend(-0.2,self.selectedColor.color))
path.setAttribute('fill',self.selectedColor.color)
path.setAttribute('class','keySurface')
path.setAttribute('d',self.selectedKey.sides[surface].path)
newSvg.appendChild(path)
}
if (self.surfaces[surface].img.value != '' || self.surfaces[surface].text.value != ''){
var img = new Image()
img.width = self.surfaces[surface].img.width
img.height = self.surfaces[surface].img.height
img.src = self.surfaces[surface].img.value
var thisSurface = surface
img.onload = function () {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
var context = canvas.getContext('2d');
console.log(img.width)
console.log(img.height)
context.drawImage(img, self.surfaces[thisSurface].img.x, self.surfaces[thisSurface].img.y, self.surfaces[thisSurface].img.width,self.surfaces[thisSurface].img.height);
self.surfaces[thisSurface].preview = canvas.toDataURL('image/png')
resolve()
}
//console.log(this.surfaces[surface].img)
//var img = document.getElementsByClassName('moveableImg')[0]
} else {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg);
canvg('canvas', svg_xml, {useCORS: true});
self.surfaces[surface].preview = canvas.toDataURL('image/png')
resolve()
}
})
allSides.push(promise)
}
return Promise.all(allSides)
},
previewImg: function() {
var file = document.querySelector('input[type=file]').files[0]; //sames as here
var label = document.querySelector( '.inputfile' ).nextElementSibling
var target = this.surfaces[this.currentView].img
var reader = new FileReader();
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var width = document.querySelector('.moveableImg').width
reader.onloadend = function () {
target.value = reader.result
Vue.nextTick(function () {
var image = document.querySelector('#keyImage')
image.onload = function() {
target.width = image.width
target.height = image.height
}
})
}
if (file) {
target.filename = file.name
reader.readAsDataURL(file); //reads the data as a URL
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,width,width)
} else {
target.value = "";
}
}
}
}).$mount('#app')
function calcSnapTargets(surface, width, height){
var left = surface.left+(width / 2)
var center = surface.left + (surface.width / 2)
var right = surface.left + surface.width - (width / 2)
var top = surface.top + (height / 2)
var middle = surface.top + (surface.height / 2)
var bottom = surface.top + surface.height - (height / 2)
var snapTargets = [
{x:left, y:top},
{x:center,y:top},
{x:right,y:top},
{x:left, y:middle},
{x:center,y:middle},
{x:right,y:middle},
{x:left, y:bottom},
{x:center,y:bottom},
{x:right,y:bottom},
]
return snapTargets
}
function dragMoveListener (event) {
var target = event.target
if (target.className == "moveableImg"){
var node = customKey.surfaces[customKey.currentView].img
} else if (target.className == "moveableText") {
var node = customKey.surfaces[customKey.currentView].text
}
node.x = (parseFloat(node.x) || 0) + event.dx,
node.y = (parseFloat(node.y) || 0) + event.dy;
}
// this is used later in the resizing and gesture demos
window.dragMoveListener = dragMoveListener;
function addInteractive() | {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
interact('.moveableText')
.draggable({
onmove: window.dragMoveListener,
snap: {
targets: [{}],
range:20,
relativePoints: [
//{ x: 0 , y: 0 } // snap relative to the element's top-left,
{ x: 0.5, y: 0.5 } // to the center
// { x: 1 , y: 1 } // and to the bottom-right
]
},
restrict: {
restriction: {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height | identifier_body | |
keycap.js | .62 12.64 10.51C11.24 10.4 9.84 10.18 8.47 9.85C8.16 9.78 7.85 9.7 7.18 9.54C7 9.5 6.81 9.49 6.62 9.52C6.41 9.55 6.59 9.52 6.51 9.53C5.96 9.61 5.52 10.04 5.42 10.58C4.97 13.11 3.83 19.41 2 29.5L37 29.5Z", "restrict":{"height":17,"width":27,"x":6,"y":12}},
"leftView":{"path":"M2 29.89L37 30L30.75 10L3.77 14.33L2 29.89Z", "restrict":{"height":14,"width":26,"x":6,"y":15}},
"rightView":{"path":"M36.94 29.89L1.94 30L8.18 10L35.16 14.33L36.94 29.89Z", "restrict":{"height":14,"width":26,"x":8,"y":15}},
"backView":{"path":"M37 27.5C35.31 19.68 34.25 14.8 33.83 12.84C33.71 12.27 33.27 11.81 32.71 11.65C32.58 11.62 32.88 11.7 32.69 11.65C32.29 11.54 31.86 11.52 31.45 11.59C30.6 11.74 30.09 11.83 29.6 11.91C29.6 11.91 24.82 12.45 24.82 12.45C23.76 12.51 23.67 12.51 22.79 12.56C22.79 12.56 16.22 12.53 16.22 12.53C15.08 12.46 13.72 12.38 12.27 12.29C12.27 12.29 8.82 11.85 8.82 11.85C8.44 11.77 7.81 11.65 7.13 11.52C6.98 11.49 6.82 11.49 6.67 11.51C6.45 11.53 6.46 11.53 6.35 11.54C5.9 11.6 5.52 11.93 5.42 12.38C4.96 14.4 3.82 19.44 2 27.5L37 27.5Z", "restrict":{"height":14,"width":27,"x":6,"y":13}}
}
},
sides:{},
active:false,
mainStyle:{
pointerEvents:'none',
opacity:0.3
}
},
computed: {
transformText () {
return {transform: 'translate(' + this.surfaces[this.currentView].text.x + 'px,' + this.surfaces[this.currentView].text.y + 'px)',fontSize:'26px'}
},
| () {
return {transform: 'translate(' + this.surfaces[this.currentView].img.x + 'px,' + this.surfaces[this.currentView].img.y + 'px)'}
}
},
mounted: function () {
var self = this;
fetch('https://us-central1-hotsguide-188315.cloudfunctions.net/function-1?board=keyboard-104&sides=true', {
headers: {
"Content-Type": "application/json; charset=utf-8",
}
})
.then(response => response.json())
.then(data => {
self.keys = data.keys;
self.sides = data.sides
})
.catch(error => console.error(error));
// $.ajax({
// url: 'https://us-central1-hotsguide-188315.cloudfunctions.net/function-1', //read comments in search.php for more information usage
// type: 'GET',
// data: {board: 'keyboard-61', sides:true},
// dataType: 'json',
// success: function(json) {
// self.keys = json.keys;
// self.sides = json.sides;
// }
// });
},
methods: {
addToCart: function() {
var self = this
this.generatePreviews().then(function (){
var key = {name:self.selectedKey.name, color:self.selectedColor.color,surfaces:self.surfaces, price:1000}
var cartItems = JSON.parse(sessionStorage.getItem('customKeycaps')) || []
console.log(cartItems)
cartItems.push(key)
sessionStorage.setItem('customKeycaps', JSON.stringify(cartItems))
var cartLink = document.getElementById('cartLink')
var itemsSpan = cartLink.getElementsByTagName('span')[0]
if (itemsSpan) {
itemsSpan.innerHTML = ' ('+cartItems.length+')'
} else {
itemsSpan = document.createElement('span')
itemsSpan.innerHTML = ' ('+cartItems.length+')'
itemsSpan.setAttribute('style','color:blue;')
cartLink.appendChild(itemsSpan)
}
})
},
setColor: function(color) {
document.querySelector('.keyPicker[data-color="'+this.selectedColor.name+'"]').style.removeProperty('border')
document.querySelector('.keyPicker[data-color="'+color.name+'"]').style.border = '2px solid red'
this.selectedColor = color
},
changeView: function(ev) {
console.log(ev)
document.activeElement.blur();
this.currentView = ev.target.selectedOptions[0].value
var fileInput = document.querySelector('input[type=file]')
//var textInput = document.querySelector('input[type=text]')
if (this.surfaces[this.currentView].img.filename == ''){
fileInput.value = ''
if(!/safari/i.test(navigator.userAgent)){
fileInput.type = ''
fileInput.type = 'file'
}
}
//textInput.value = this.surfaces[this.currentView].text.value
this.updateSnapPoints()
},
updateSnapPoints: function () {
Vue.nextTick(function () {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var imgWidth = document.querySelector('.moveableImg').width
var textWidth = document.querySelector('.moveableText').style.width
var textHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface,textWidth,textHeight)
interact('.moveableText').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,imgWidth,imgWidth)
interact('.moveableImg').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
})
},
changeKey: function(name) {
var prevKey = this.selectedKey.name
var color = '#ffffff'
var keyType = JSON.parse(JSON.stringify(this.keys[name].type))
if (!this.active) {
this.active = true
this.mainStyle.pointerEvents = 'auto'
this.mainStyle.opacity = 1
Vue.nextTick(function () {
addInteractive()
})
}
if (prevKey){
this.keys[prevKey].body.color = '#0f0f0f'
this.keys[prevKey].face.stroke = '#272727'
this.keys[prevKey].face.color = '#1a1 | transformImg | identifier_name |
keycap.js | -188315.cloudfunctions.net/function-1', //read comments in search.php for more information usage
// type: 'GET',
// data: {board: 'keyboard-61', sides:true},
// dataType: 'json',
// success: function(json) {
// self.keys = json.keys;
// self.sides = json.sides;
// }
// });
},
methods: {
addToCart: function() {
var self = this
this.generatePreviews().then(function (){
var key = {name:self.selectedKey.name, color:self.selectedColor.color,surfaces:self.surfaces, price:1000}
var cartItems = JSON.parse(sessionStorage.getItem('customKeycaps')) || []
console.log(cartItems)
cartItems.push(key)
sessionStorage.setItem('customKeycaps', JSON.stringify(cartItems))
var cartLink = document.getElementById('cartLink')
var itemsSpan = cartLink.getElementsByTagName('span')[0]
if (itemsSpan) {
itemsSpan.innerHTML = ' ('+cartItems.length+')'
} else {
itemsSpan = document.createElement('span')
itemsSpan.innerHTML = ' ('+cartItems.length+')'
itemsSpan.setAttribute('style','color:blue;')
cartLink.appendChild(itemsSpan)
}
})
},
setColor: function(color) {
document.querySelector('.keyPicker[data-color="'+this.selectedColor.name+'"]').style.removeProperty('border')
document.querySelector('.keyPicker[data-color="'+color.name+'"]').style.border = '2px solid red'
this.selectedColor = color
},
changeView: function(ev) {
console.log(ev)
document.activeElement.blur();
this.currentView = ev.target.selectedOptions[0].value
var fileInput = document.querySelector('input[type=file]')
//var textInput = document.querySelector('input[type=text]')
if (this.surfaces[this.currentView].img.filename == ''){
fileInput.value = ''
if(!/safari/i.test(navigator.userAgent)){
fileInput.type = ''
fileInput.type = 'file'
}
}
//textInput.value = this.surfaces[this.currentView].text.value
this.updateSnapPoints()
},
updateSnapPoints: function () {
Vue.nextTick(function () {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var imgWidth = document.querySelector('.moveableImg').width
var textWidth = document.querySelector('.moveableText').style.width
var textHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface,textWidth,textHeight)
interact('.moveableText').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,imgWidth,imgWidth)
interact('.moveableImg').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
})
},
changeKey: function(name) {
var prevKey = this.selectedKey.name
var color = '#ffffff'
var keyType = JSON.parse(JSON.stringify(this.keys[name].type))
if (!this.active) {
this.active = true
this.mainStyle.pointerEvents = 'auto'
this.mainStyle.opacity = 1
Vue.nextTick(function () {
addInteractive()
})
}
if (prevKey){
this.keys[prevKey].body.color = '#0f0f0f'
this.keys[prevKey].face.stroke = '#272727'
this.keys[prevKey].face.color = '#1a1a1a'
}
this.selectedKey.sides = this.sides[keyType]
this.selectedKey.type = keyType
this.selectedKey.name = name
this.keys[name].body.color = this.shadeBlend(-0.25, color)
this.keys[name].face.stroke = this.shadeBlend(0.065, color)
this.keys[name].face.color = color
//this.selectedKey.topView.viewbox = "0 0 " + this.selectedKey.topView.body.width + " " + this.selectedKey.topView.body.height
this.updateSnapPoints()
},
updateText: function(text) {
this.surfaces[this.currentView].text.value = text
surface = document.querySelector('.restrictRect').getBoundingClientRect()
var newWidth = document.querySelector('.moveableText').style.width
var newHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface, newWidth, newHeight)
},
shadeBlend: function(p,c0,c1) {
var n=p<0?p*-1:p,u=Math.round,w=parseInt;
if(c0.length>7){
var f=c0.split(","),t=(c1?c1:p<0?"rgb(0,0,0)":"rgb(255,255,255)").split(","),R=w(f[0].slice(4)),G=w(f[1]),B=w(f[2]);
return "rgb("+(u((w(t[0].slice(4))-R)*n)+R)+","+(u((w(t[1])-G)*n)+G)+","+(u((w(t[2])-B)*n)+B)+")"
}else{
var f=w(c0.slice(1),16),t=w((c1?c1:p<0?"#000000":"#FFFFFF").slice(1),16),R1=f>>16,G1=f>>8&0x00FF,B1=f&0x0000FF;
return "#"+(0x1000000+(u(((t>>16)-R1)*n)+R1)*0x10000+(u(((t>>8&0x00FF)-G1)*n)+G1)*0x100+(u(((t&0x0000FF)-B1)*n)+B1)).toString(16).slice(1)
}
},
// ugly, must be a better way
generatePreviews: function() {
var svgns = "http://www.w3.org/2000/svg"
var allSides = []
for (var surface in this.surfaces) {
var self = this
var promise = new Promise(function(resolve, reject) {
var newSvg = document.createElementNS(svgns, "svg");
newSvg.setAttribute('width', 260)
newSvg.setAttribute('height', 210)
newSvg.setAttribute('preserveAspectRatio', 'xMidYMid meet')
newSvg.setAttribute('viewBox', self.selectedKey.sides.topView.viewbox)
if (surface == 'topView'){
var body = document.createElementNS(svgns, 'rect');
body.setAttribute('class','keySurface')
body.setAttribute('x', self.selectedKey.sides.topView.body.x)
body.setAttribute('y',self.selectedKey.sides.topView.body.y)
body.setAttribute('width',self.selectedKey.sides.topView.body.width)
body.setAttribute('height',self.selectedKey.sides.topView.body.height)
body.setAttribute('rx',self.selectedKey.sides.topView.body.rx)
body.setAttribute('ry',self.selectedKey.sides.topView.body.ry)
body.setAttribute('fill',self.shadeBlend(-0.2,self.selectedColor.color))
var face = document.createElementNS(svgns, 'rect');
face.setAttribute('class','restrictRect')
face.setAttribute('x', self.selectedKey.sides.topView.face.x)
face.setAttribute('y',self.selectedKey.sides.topView.face.y)
face.setAttribute('width',self.selectedKey.sides.topView.face.width)
face.setAttribute('height',self.selectedKey.sides.topView.face.height)
face.setAttribute('rx',self.selectedKey.sides.topView.face.rx)
face.setAttribute('ry',self.selectedKey.sides.topView.face.ry)
face.setAttribute('fill',self.selectedColor.color)
face.setAttribute('stroke',self.shadeBlend(0.065,self.selectedColor.color))
face.setAttribute('stroke-width','1px')
newSvg.appendChild(body)
newSvg.appendChild(face)
} else {
var path = document.createElementNS(svgns, 'path')
path.setAttribute('stroke-width','0.5px')
path.setAttribute('stroke',self.shadeBlend(-0.2,self.selectedColor.color))
path.setAttribute('fill',self.selectedColor.color)
path.setAttribute('class','keySurface')
path.setAttribute('d',self.selectedKey.sides[surface].path)
newSvg.appendChild(path)
}
if (self.surfaces[surface].img.value != '' || self.surfaces[surface].text.value != ''){
var img = new Image()
img.width = self.surfaces[surface].img.width
img.height = self.surfaces[surface].img.height
img.src = self.surfaces[surface].img.value | var thisSurface = surface
img.onload = function () {
var svg_xml = (new XMLSerializer()).serializeToString(newSvg); | random_line_split | |
keycap.js | .62 12.64 10.51C11.24 10.4 9.84 10.18 8.47 9.85C8.16 9.78 7.85 9.7 7.18 9.54C7 9.5 6.81 9.49 6.62 9.52C6.41 9.55 6.59 9.52 6.51 9.53C5.96 9.61 5.52 10.04 5.42 10.58C4.97 13.11 3.83 19.41 2 29.5L37 29.5Z", "restrict":{"height":17,"width":27,"x":6,"y":12}},
"leftView":{"path":"M2 29.89L37 30L30.75 10L3.77 14.33L2 29.89Z", "restrict":{"height":14,"width":26,"x":6,"y":15}},
"rightView":{"path":"M36.94 29.89L1.94 30L8.18 10L35.16 14.33L36.94 29.89Z", "restrict":{"height":14,"width":26,"x":8,"y":15}},
"backView":{"path":"M37 27.5C35.31 19.68 34.25 14.8 33.83 12.84C33.71 12.27 33.27 11.81 32.71 11.65C32.58 11.62 32.88 11.7 32.69 11.65C32.29 11.54 31.86 11.52 31.45 11.59C30.6 11.74 30.09 11.83 29.6 11.91C29.6 11.91 24.82 12.45 24.82 12.45C23.76 12.51 23.67 12.51 22.79 12.56C22.79 12.56 16.22 12.53 16.22 12.53C15.08 12.46 13.72 12.38 12.27 12.29C12.27 12.29 8.82 11.85 8.82 11.85C8.44 11.77 7.81 11.65 7.13 11.52C6.98 11.49 6.82 11.49 6.67 11.51C6.45 11.53 6.46 11.53 6.35 11.54C5.9 11.6 5.52 11.93 5.42 12.38C4.96 14.4 3.82 19.44 2 27.5L37 27.5Z", "restrict":{"height":14,"width":27,"x":6,"y":13}}
}
},
sides:{},
active:false,
mainStyle:{
pointerEvents:'none',
opacity:0.3
}
},
computed: {
transformText () {
return {transform: 'translate(' + this.surfaces[this.currentView].text.x + 'px,' + this.surfaces[this.currentView].text.y + 'px)',fontSize:'26px'}
},
transformImg () {
return {transform: 'translate(' + this.surfaces[this.currentView].img.x + 'px,' + this.surfaces[this.currentView].img.y + 'px)'}
}
},
mounted: function () {
var self = this;
fetch('https://us-central1-hotsguide-188315.cloudfunctions.net/function-1?board=keyboard-104&sides=true', {
headers: {
"Content-Type": "application/json; charset=utf-8",
}
})
.then(response => response.json())
.then(data => {
self.keys = data.keys;
self.sides = data.sides
})
.catch(error => console.error(error));
// $.ajax({
// url: 'https://us-central1-hotsguide-188315.cloudfunctions.net/function-1', //read comments in search.php for more information usage
// type: 'GET',
// data: {board: 'keyboard-61', sides:true},
// dataType: 'json',
// success: function(json) {
// self.keys = json.keys;
// self.sides = json.sides;
// }
// });
},
methods: {
addToCart: function() {
var self = this
this.generatePreviews().then(function (){
var key = {name:self.selectedKey.name, color:self.selectedColor.color,surfaces:self.surfaces, price:1000}
var cartItems = JSON.parse(sessionStorage.getItem('customKeycaps')) || []
console.log(cartItems)
cartItems.push(key)
sessionStorage.setItem('customKeycaps', JSON.stringify(cartItems))
var cartLink = document.getElementById('cartLink')
var itemsSpan = cartLink.getElementsByTagName('span')[0]
if (itemsSpan) | else {
itemsSpan = document.createElement('span')
itemsSpan.innerHTML = ' ('+cartItems.length+')'
itemsSpan.setAttribute('style','color:blue;')
cartLink.appendChild(itemsSpan)
}
})
},
setColor: function(color) {
document.querySelector('.keyPicker[data-color="'+this.selectedColor.name+'"]').style.removeProperty('border')
document.querySelector('.keyPicker[data-color="'+color.name+'"]').style.border = '2px solid red'
this.selectedColor = color
},
changeView: function(ev) {
console.log(ev)
document.activeElement.blur();
this.currentView = ev.target.selectedOptions[0].value
var fileInput = document.querySelector('input[type=file]')
//var textInput = document.querySelector('input[type=text]')
if (this.surfaces[this.currentView].img.filename == ''){
fileInput.value = ''
if(!/safari/i.test(navigator.userAgent)){
fileInput.type = ''
fileInput.type = 'file'
}
}
//textInput.value = this.surfaces[this.currentView].text.value
this.updateSnapPoints()
},
updateSnapPoints: function () {
Vue.nextTick(function () {
var surface = document.querySelector('.restrictRect').getBoundingClientRect()
var imgWidth = document.querySelector('.moveableImg').width
var textWidth = document.querySelector('.moveableText').style.width
var textHeight = document.querySelector('.moveableText').style.height
interact('.moveableText').options.drag.snap.targets = calcSnapTargets(surface,textWidth,textHeight)
interact('.moveableText').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
interact('.moveableImg').options.drag.snap.targets = calcSnapTargets(surface,imgWidth,imgWidth)
interact('.moveableImg').options.drag.restrict.restriction = {
x:surface.x,
y:surface.y,
width:surface.width,
height:surface.height
}
})
},
changeKey: function(name) {
var prevKey = this.selectedKey.name
var color = '#ffffff'
var keyType = JSON.parse(JSON.stringify(this.keys[name].type))
if (!this.active) {
this.active = true
this.mainStyle.pointerEvents = 'auto'
this.mainStyle.opacity = 1
Vue.nextTick(function () {
addInteractive()
})
}
if (prevKey){
this.keys[prevKey].body.color = '#0f0f0f'
this.keys[prevKey].face.stroke = '#272727'
this.keys[prevKey].face.color = '#1a | {
itemsSpan.innerHTML = ' ('+cartItems.length+')'
} | conditional_block |
replicator.go | bind_port", 6000))
r.driveRoot = cnf.GetDefault("app:object-server", "devices", "/srv/node")
r.rpcPort = int(cnf.GetInt("object-replicator", "rpc_port", 60000))
r.concurrency = int(cnf.GetInt("object-replicator", "concurrency", 1))
r.interval = int(cnf.GetInt("object-replicator", "interval", 60*60*24))
}
func (r *Replicator) collectDevices(policyFilter, deviceFilter string) {
pf := map[int]bool{}
for _, p := range strings.Split(policyFilter, ",") {
if p == "" {
continue
}
pi, err := strconv.Atoi(p)
if err != nil {
r.logger.Error("unable to parse policy filter, ignore",
zap.String("policies", policyFilter), zap.Error(err))
continue
}
pf[pi] = true
}
df := map[string]bool{}
for _, d := range strings.Split(deviceFilter, ",") {
if d != "" {
df[d] = true
}
}
r.rings = map[int]ring.Ring{}
for _, p := range conf.LoadPolicies() {
if p.Type != NAME || (len(pf) > 0 && !pf[p.Index]) {
continue
}
var err error
r.rings[p.Index], err = ring.GetRing(
"object", r.hashPrefix, r.hashSuffix, p.Index)
if err != nil {
r.logger.Error("unable to get ring",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
r.devices = map[int][]*ring.Device{}
devs, err := r.rings[p.Index].LocalDevices(r.srvPort)
if err != nil {
r.logger.Error("unable to list local device",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
for _, d := range devs {
if len(df) == 0 || df[d.Device] {
r.devices[p.Index] = append(r.devices[p.Index], d)
}
}
devices := r.devices[p.Index]
rand.Shuffle(len(devices), func(i, j int) {
devices[i], devices[j] = devices[j], devices[i]
})
}
}
func (r *Replicator) listPartitions(policy int, device string) []string {
objPath, _ := PackDevicePaths(device, r.driveRoot, policy)
suffixes, err := fs.ReadDirNames(objPath)
if err != nil {
r.logger.Error("unable to get partition list", zap.Error(err))
return nil
}
var partitions []string
for _, suff := range suffixes {
if (len(r.whitelist) > 0 && !r.whitelist[suff]) || !common.IsDecimal(suff) {
continue
}
partitions = append(partitions, suff)
}
rand.Shuffle(len(partitions), func(i, j int) {
partitions[i], partitions[j] = partitions[j], partitions[i]
})
return partitions
}
func (r *Replicator) getLocalHash(
policy int, device, partition string, rehash []string) (int64, map[string]string) {
// TODO: shall we need to add a timeout?
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
msg := &SuffixHashesMsg{
Device: device,
Policy: uint32(policy),
Partition: partition,
ReclaimAge: ONE_WEEK,
ListDir: rand.Intn(10) == 0,
Recalculate: rehash,
}
reply, err := r.rpc.GetHashes(ctx, msg)
if err != nil {
r.logger.Error("unable to get local hashes",
zap.Int("policy", policy),
zap.String("device", device),
zap.String("partition", partition),
zap.Error(err))
return 0, nil
}
return reply.Hashed, reply.Hashes
}
func (r *Replicator) getRemoteHash(policy int, node *ring.Device,
partition string, suffixes []string) (map[string]string, error) | zap.String("url", url), zap.Error(err))
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusInsufficientStorage {
return nil, ErrRemoteDiskUnmounted
}
if resp.StatusCode != http.StatusOK {
return nil, ErrRemoteHash
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.logger.Error("unable to read replicate response body",
zap.String("url", url), zap.Error(err))
return nil, err
}
v, err := pickle.PickleLoads(body)
if err != nil {
r.logger.Error("unable to deserialize pickle data",
zap.String("url", url), zap.Error(err))
return nil, err
}
pickledHashes, ok := v.(map[interface{}]interface{})
if !ok {
return nil, ErrMalformedData
}
hashes := make(map[string]string)
for suff, hash := range pickledHashes {
if hashes[suff.(string)], ok = hash.(string); !ok {
hashes[suff.(string)] = ""
}
}
return hashes, nil
}
func (r *Replicator) replicateLocal(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
attempts := int(r.rings[policy].ReplicaCount()) - 1
for node := nodes.Next(); node != nil && attempts > 0; node = nodes.Next() {
attempts--
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
if err == ErrRemoteDiskUnmounted {
attempts++
}
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
continue
}
r.getRemoteHash(policy, node, partition, suffixes)
if reply.Success {
r.stat.replicated += int64(len(reply.Candidates))
}
}
}
func (r *Replicator) replicateHandoff(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
success := true
for node := nodes.Next(); node != nil; node = nodes.Next() {
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.Int("policy", policy),
zap.Any("node", node),
zap.Error(err))
success = false
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition | {
url := fmt.Sprintf("http://%s:%d/%s/%s",
node.Ip, node.Port, node.Device, partition)
if len(suffixes) > 0 {
url = fmt.Sprintf("%s/%s", url, strings.Join(suffixes, "-"))
}
req, err := http.NewRequest(common.REPLICATE, url, nil)
if err != nil {
r.logger.Error("unable to create diff request",
zap.String("url", url),
zap.Error(err))
return nil, err
}
req.Header.Set(common.XBackendPolicyIndex, strconv.Itoa(policy))
resp, err := r.http.Do(req)
if err != nil {
r.logger.Error("unable to get remote hash", | identifier_body |
replicator.go | _port", 6000))
r.driveRoot = cnf.GetDefault("app:object-server", "devices", "/srv/node")
r.rpcPort = int(cnf.GetInt("object-replicator", "rpc_port", 60000))
r.concurrency = int(cnf.GetInt("object-replicator", "concurrency", 1))
r.interval = int(cnf.GetInt("object-replicator", "interval", 60*60*24))
}
func (r *Replicator) collectDevices(policyFilter, deviceFilter string) {
pf := map[int]bool{}
for _, p := range strings.Split(policyFilter, ",") {
if p == "" {
continue
}
pi, err := strconv.Atoi(p)
if err != nil {
r.logger.Error("unable to parse policy filter, ignore",
zap.String("policies", policyFilter), zap.Error(err))
continue
}
pf[pi] = true
}
df := map[string]bool{}
for _, d := range strings.Split(deviceFilter, ",") {
if d != "" {
df[d] = true
}
}
r.rings = map[int]ring.Ring{}
for _, p := range conf.LoadPolicies() {
if p.Type != NAME || (len(pf) > 0 && !pf[p.Index]) {
continue
}
var err error
r.rings[p.Index], err = ring.GetRing(
"object", r.hashPrefix, r.hashSuffix, p.Index)
if err != nil {
r.logger.Error("unable to get ring",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
r.devices = map[int][]*ring.Device{}
devs, err := r.rings[p.Index].LocalDevices(r.srvPort)
if err != nil {
r.logger.Error("unable to list local device",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
for _, d := range devs {
if len(df) == 0 || df[d.Device] {
r.devices[p.Index] = append(r.devices[p.Index], d)
}
}
devices := r.devices[p.Index]
rand.Shuffle(len(devices), func(i, j int) {
devices[i], devices[j] = devices[j], devices[i]
})
}
}
func (r *Replicator) listPartitions(policy int, device string) []string {
objPath, _ := PackDevicePaths(device, r.driveRoot, policy)
suffixes, err := fs.ReadDirNames(objPath)
if err != nil {
r.logger.Error("unable to get partition list", zap.Error(err))
return nil
}
var partitions []string
for _, suff := range suffixes {
if (len(r.whitelist) > 0 && !r.whitelist[suff]) || !common.IsDecimal(suff) {
continue
}
partitions = append(partitions, suff)
}
rand.Shuffle(len(partitions), func(i, j int) {
partitions[i], partitions[j] = partitions[j], partitions[i]
})
return partitions
}
func (r *Replicator) getLocalHash(
policy int, device, partition string, rehash []string) (int64, map[string]string) {
// TODO: shall we need to add a timeout?
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
msg := &SuffixHashesMsg{
Device: device,
Policy: uint32(policy),
Partition: partition,
ReclaimAge: ONE_WEEK,
ListDir: rand.Intn(10) == 0,
Recalculate: rehash,
}
reply, err := r.rpc.GetHashes(ctx, msg)
if err != nil {
r.logger.Error("unable to get local hashes",
zap.Int("policy", policy),
zap.String("device", device),
zap.String("partition", partition),
zap.Error(err))
return 0, nil
}
return reply.Hashed, reply.Hashes
}
func (r *Replicator) getRemoteHash(policy int, node *ring.Device,
partition string, suffixes []string) (map[string]string, error) {
url := fmt.Sprintf("http://%s:%d/%s/%s",
node.Ip, node.Port, node.Device, partition)
if len(suffixes) > 0 {
url = fmt.Sprintf("%s/%s", url, strings.Join(suffixes, "-"))
}
req, err := http.NewRequest(common.REPLICATE, url, nil)
if err != nil {
r.logger.Error("unable to create diff request",
zap.String("url", url),
zap.Error(err))
return nil, err
}
req.Header.Set(common.XBackendPolicyIndex, strconv.Itoa(policy))
resp, err := r.http.Do(req)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.String("url", url), zap.Error(err))
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusInsufficientStorage {
return nil, ErrRemoteDiskUnmounted
}
if resp.StatusCode != http.StatusOK {
return nil, ErrRemoteHash
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.logger.Error("unable to read replicate response body",
zap.String("url", url), zap.Error(err))
return nil, err
}
v, err := pickle.PickleLoads(body)
if err != nil {
r.logger.Error("unable to deserialize pickle data",
zap.String("url", url), zap.Error(err))
return nil, err
}
pickledHashes, ok := v.(map[interface{}]interface{})
if !ok {
return nil, ErrMalformedData
}
hashes := make(map[string]string)
for suff, hash := range pickledHashes {
if hashes[suff.(string)], ok = hash.(string); !ok {
hashes[suff.(string)] = ""
}
}
return hashes, nil
}
func (r *Replicator) | (
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
attempts := int(r.rings[policy].ReplicaCount()) - 1
for node := nodes.Next(); node != nil && attempts > 0; node = nodes.Next() {
attempts--
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
if err == ErrRemoteDiskUnmounted {
attempts++
}
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
continue
}
r.getRemoteHash(policy, node, partition, suffixes)
if reply.Success {
r.stat.replicated += int64(len(reply.Candidates))
}
}
}
func (r *Replicator) replicateHandoff(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
success := true
for node := nodes.Next(); node != nil; node = nodes.Next() {
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.Int("policy", policy),
zap.Any("node", node),
zap.Error(err))
success = false
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition | replicateLocal | identifier_name |
replicator.go | ))
r.concurrency = int(cnf.GetInt("object-replicator", "concurrency", 1))
r.interval = int(cnf.GetInt("object-replicator", "interval", 60*60*24))
}
func (r *Replicator) collectDevices(policyFilter, deviceFilter string) {
pf := map[int]bool{}
for _, p := range strings.Split(policyFilter, ",") {
if p == "" {
continue
}
pi, err := strconv.Atoi(p)
if err != nil {
r.logger.Error("unable to parse policy filter, ignore",
zap.String("policies", policyFilter), zap.Error(err))
continue
}
pf[pi] = true
}
df := map[string]bool{}
for _, d := range strings.Split(deviceFilter, ",") {
if d != "" {
df[d] = true
}
}
r.rings = map[int]ring.Ring{}
for _, p := range conf.LoadPolicies() {
if p.Type != NAME || (len(pf) > 0 && !pf[p.Index]) {
continue
}
var err error
r.rings[p.Index], err = ring.GetRing(
"object", r.hashPrefix, r.hashSuffix, p.Index)
if err != nil {
r.logger.Error("unable to get ring",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
r.devices = map[int][]*ring.Device{}
devs, err := r.rings[p.Index].LocalDevices(r.srvPort)
if err != nil {
r.logger.Error("unable to list local device",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
for _, d := range devs {
if len(df) == 0 || df[d.Device] {
r.devices[p.Index] = append(r.devices[p.Index], d)
}
}
devices := r.devices[p.Index]
rand.Shuffle(len(devices), func(i, j int) {
devices[i], devices[j] = devices[j], devices[i]
})
}
}
func (r *Replicator) listPartitions(policy int, device string) []string {
objPath, _ := PackDevicePaths(device, r.driveRoot, policy)
suffixes, err := fs.ReadDirNames(objPath)
if err != nil {
r.logger.Error("unable to get partition list", zap.Error(err))
return nil
}
var partitions []string
for _, suff := range suffixes {
if (len(r.whitelist) > 0 && !r.whitelist[suff]) || !common.IsDecimal(suff) {
continue
}
partitions = append(partitions, suff)
}
rand.Shuffle(len(partitions), func(i, j int) {
partitions[i], partitions[j] = partitions[j], partitions[i]
})
return partitions
}
func (r *Replicator) getLocalHash(
policy int, device, partition string, rehash []string) (int64, map[string]string) {
// TODO: shall we need to add a timeout?
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
msg := &SuffixHashesMsg{
Device: device,
Policy: uint32(policy),
Partition: partition,
ReclaimAge: ONE_WEEK,
ListDir: rand.Intn(10) == 0,
Recalculate: rehash,
}
reply, err := r.rpc.GetHashes(ctx, msg)
if err != nil {
r.logger.Error("unable to get local hashes",
zap.Int("policy", policy),
zap.String("device", device),
zap.String("partition", partition),
zap.Error(err))
return 0, nil
}
return reply.Hashed, reply.Hashes
}
func (r *Replicator) getRemoteHash(policy int, node *ring.Device,
partition string, suffixes []string) (map[string]string, error) {
url := fmt.Sprintf("http://%s:%d/%s/%s",
node.Ip, node.Port, node.Device, partition)
if len(suffixes) > 0 {
url = fmt.Sprintf("%s/%s", url, strings.Join(suffixes, "-"))
}
req, err := http.NewRequest(common.REPLICATE, url, nil)
if err != nil {
r.logger.Error("unable to create diff request",
zap.String("url", url),
zap.Error(err))
return nil, err
}
req.Header.Set(common.XBackendPolicyIndex, strconv.Itoa(policy))
resp, err := r.http.Do(req)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.String("url", url), zap.Error(err))
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusInsufficientStorage {
return nil, ErrRemoteDiskUnmounted
}
if resp.StatusCode != http.StatusOK {
return nil, ErrRemoteHash
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.logger.Error("unable to read replicate response body",
zap.String("url", url), zap.Error(err))
return nil, err
}
v, err := pickle.PickleLoads(body)
if err != nil {
r.logger.Error("unable to deserialize pickle data",
zap.String("url", url), zap.Error(err))
return nil, err
}
pickledHashes, ok := v.(map[interface{}]interface{})
if !ok {
return nil, ErrMalformedData
}
hashes := make(map[string]string)
for suff, hash := range pickledHashes {
if hashes[suff.(string)], ok = hash.(string); !ok {
hashes[suff.(string)] = ""
}
}
return hashes, nil
}
func (r *Replicator) replicateLocal(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
attempts := int(r.rings[policy].ReplicaCount()) - 1
for node := nodes.Next(); node != nil && attempts > 0; node = nodes.Next() {
attempts--
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
if err == ErrRemoteDiskUnmounted {
attempts++
}
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
continue
}
r.getRemoteHash(policy, node, partition, suffixes)
if reply.Success {
r.stat.replicated += int64(len(reply.Candidates))
}
}
}
func (r *Replicator) replicateHandoff(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
success := true
for node := nodes.Next(); node != nil; node = nodes.Next() {
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.Int("policy", policy),
zap.Any("node", node),
zap.Error(err))
success = false
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash { | if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
| random_line_split | |
replicator.go | _port", 6000))
r.driveRoot = cnf.GetDefault("app:object-server", "devices", "/srv/node")
r.rpcPort = int(cnf.GetInt("object-replicator", "rpc_port", 60000))
r.concurrency = int(cnf.GetInt("object-replicator", "concurrency", 1))
r.interval = int(cnf.GetInt("object-replicator", "interval", 60*60*24))
}
func (r *Replicator) collectDevices(policyFilter, deviceFilter string) {
pf := map[int]bool{}
for _, p := range strings.Split(policyFilter, ",") {
if p == "" {
continue
}
pi, err := strconv.Atoi(p)
if err != nil {
r.logger.Error("unable to parse policy filter, ignore",
zap.String("policies", policyFilter), zap.Error(err))
continue
}
pf[pi] = true
}
df := map[string]bool{}
for _, d := range strings.Split(deviceFilter, ",") {
if d != "" {
df[d] = true
}
}
r.rings = map[int]ring.Ring{}
for _, p := range conf.LoadPolicies() {
if p.Type != NAME || (len(pf) > 0 && !pf[p.Index]) {
continue
}
var err error
r.rings[p.Index], err = ring.GetRing(
"object", r.hashPrefix, r.hashSuffix, p.Index)
if err != nil {
r.logger.Error("unable to get ring",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
r.devices = map[int][]*ring.Device{}
devs, err := r.rings[p.Index].LocalDevices(r.srvPort)
if err != nil {
r.logger.Error("unable to list local device",
zap.Int("policy", p.Index),
zap.Int("port", r.srvPort),
zap.Error(err))
continue
}
for _, d := range devs {
if len(df) == 0 || df[d.Device] {
r.devices[p.Index] = append(r.devices[p.Index], d)
}
}
devices := r.devices[p.Index]
rand.Shuffle(len(devices), func(i, j int) {
devices[i], devices[j] = devices[j], devices[i]
})
}
}
func (r *Replicator) listPartitions(policy int, device string) []string {
objPath, _ := PackDevicePaths(device, r.driveRoot, policy)
suffixes, err := fs.ReadDirNames(objPath)
if err != nil {
r.logger.Error("unable to get partition list", zap.Error(err))
return nil
}
var partitions []string
for _, suff := range suffixes {
if (len(r.whitelist) > 0 && !r.whitelist[suff]) || !common.IsDecimal(suff) {
continue
}
partitions = append(partitions, suff)
}
rand.Shuffle(len(partitions), func(i, j int) {
partitions[i], partitions[j] = partitions[j], partitions[i]
})
return partitions
}
func (r *Replicator) getLocalHash(
policy int, device, partition string, rehash []string) (int64, map[string]string) {
// TODO: shall we need to add a timeout?
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
msg := &SuffixHashesMsg{
Device: device,
Policy: uint32(policy),
Partition: partition,
ReclaimAge: ONE_WEEK,
ListDir: rand.Intn(10) == 0,
Recalculate: rehash,
}
reply, err := r.rpc.GetHashes(ctx, msg)
if err != nil {
r.logger.Error("unable to get local hashes",
zap.Int("policy", policy),
zap.String("device", device),
zap.String("partition", partition),
zap.Error(err))
return 0, nil
}
return reply.Hashed, reply.Hashes
}
func (r *Replicator) getRemoteHash(policy int, node *ring.Device,
partition string, suffixes []string) (map[string]string, error) {
url := fmt.Sprintf("http://%s:%d/%s/%s",
node.Ip, node.Port, node.Device, partition)
if len(suffixes) > 0 {
url = fmt.Sprintf("%s/%s", url, strings.Join(suffixes, "-"))
}
req, err := http.NewRequest(common.REPLICATE, url, nil)
if err != nil {
r.logger.Error("unable to create diff request",
zap.String("url", url),
zap.Error(err))
return nil, err
}
req.Header.Set(common.XBackendPolicyIndex, strconv.Itoa(policy))
resp, err := r.http.Do(req)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.String("url", url), zap.Error(err))
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusInsufficientStorage {
return nil, ErrRemoteDiskUnmounted
}
if resp.StatusCode != http.StatusOK {
return nil, ErrRemoteHash
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.logger.Error("unable to read replicate response body",
zap.String("url", url), zap.Error(err))
return nil, err
}
v, err := pickle.PickleLoads(body)
if err != nil {
r.logger.Error("unable to deserialize pickle data",
zap.String("url", url), zap.Error(err))
return nil, err
}
pickledHashes, ok := v.(map[interface{}]interface{})
if !ok {
return nil, ErrMalformedData
}
hashes := make(map[string]string)
for suff, hash := range pickledHashes {
if hashes[suff.(string)], ok = hash.(string); !ok |
}
return hashes, nil
}
func (r *Replicator) replicateLocal(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
attempts := int(r.rings[policy].ReplicaCount()) - 1
for node := nodes.Next(); node != nil && attempts > 0; node = nodes.Next() {
attempts--
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
if err == ErrRemoteDiskUnmounted {
attempts++
}
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition, suffixes)
r.stat.rehashed += rehashed
suffixes = nil
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
msg := &SyncMsg{
LocalDevice: device.Device,
Host: node.Ip,
Port: int32(node.Port),
Device: node.Device,
Policy: uint32(policy),
Partition: partition,
Suffixes: suffixes,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reply, err := r.rpc.Sync(ctx, msg)
if err != nil {
r.logger.Error("unable to finish sync job",
zap.Any("args", msg), zap.Error(err))
continue
}
r.getRemoteHash(policy, node, partition, suffixes)
if reply.Success {
r.stat.replicated += int64(len(reply.Candidates))
}
}
}
func (r *Replicator) replicateHandoff(
policy int, device *ring.Device, partition string, nodes *NodeChain) {
rehashed, localHash := r.getLocalHash(policy, device.Device, partition, nil)
r.stat.rehashed += rehashed
success := true
for node := nodes.Next(); node != nil; node = nodes.Next() {
remoteHash, err := r.getRemoteHash(policy, node, partition, nil)
if err != nil {
r.logger.Error("unable to get remote hash",
zap.Int("policy", policy),
zap.Any("node", node),
zap.Error(err))
success = false
continue
}
var suffixes []string
for s, h := range localHash {
if remoteHash[s] != h {
suffixes = append(suffixes, s)
}
}
if len(suffixes) == 0 {
continue
}
rehashed, localHash := r.getLocalHash(
policy, device.Device, partition | {
hashes[suff.(string)] = ""
} | conditional_block |
fleet.go | return err
}
if err := launchFleetUnitN(
i,
UNIT_NAME_SIDEKICK,
sidekickFleetUnitJson,
); err != nil {
return err
}
}
if err := c.WaitForFleetLaunch(); err != nil {
log.Printf("Error waiting for couchbase cluster launch: %v", err)
return err
}
return nil
}
// Call Fleet API and tell it to stop units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) StopUnits(allUnits bool) error {
// set the /couchbase.com/remove-rebalance-disabled flag in etcd since
// otherwise, it will try to remove and rebalance the node, which is not
// what we want when stopping all units.
// set the ttl to be 5 minutes, since there's nothing in place yet to
// block until all the units have stopped
// (TODO: this should get added .. it waits for all units to stop, and then
// it removes the /couchbase.com/remove-rebalance-disabled flag)
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitStopper := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
log.Printf("Stop unit %v via PUT %v", unit.Name, endpointUrl)
return PUT(endpointUrl, `{"desiredState": "inactive"}`)
}
return c.ManipulateUnits(unitStopper, allUnits)
}
// Call Fleet API and tell it to destroy units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) | (allUnits bool) error {
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitDestroyer := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
return DELETE(endpointUrl)
}
return c.ManipulateUnits(unitDestroyer, allUnits)
}
type UnitManipulator func(unit *schema.Unit) error
func (c CouchbaseFleet) ManipulateUnits(unitManipulator UnitManipulator, manipulateAllUnits bool) error {
// find all the units
allUnits, err := c.findAllFleetUnits()
if err != nil {
return err
}
var units []*schema.Unit
if manipulateAllUnits {
units = allUnits
} else {
// filter the ones out that have the name pattern we care about (couchbase_node)
unitNamePatterns := []string{UNIT_NAME_NODE, UNIT_NAME_SIDEKICK}
units = c.filterFleetUnits(allUnits, unitNamePatterns)
}
for _, unit := range units {
if err := unitManipulator(unit); err != nil {
return err
}
}
return nil
}
func (c CouchbaseFleet) findAllFleetUnits() (units []*schema.Unit, err error) {
endpointUrl := ""
maxAttempts := 10000
sleepSeconds := 0
nextPageToken := ""
log.Printf("findAllFleetUnits()")
worker := func() (finished bool, err error) {
// append a next page token to url if needed
if len(nextPageToken) > 0 {
endpointUrl = fmt.Sprintf("%v/units?nextPageToken=%v", FLEET_API_ENDPOINT, nextPageToken)
} else {
endpointUrl = fmt.Sprintf("%v/units", FLEET_API_ENDPOINT)
}
log.Printf("Getting units from %v", endpointUrl)
unitPage := schema.UnitPage{}
if err := getJsonData(endpointUrl, &unitPage); err != nil {
return true, err
}
// add all units to return value
for _, unit := range unitPage.Units {
units = append(units, unit)
}
// if no more pages, we are finished
areWeFinished := len(unitPage.NextPageToken) == 0
return areWeFinished, nil
}
sleeper := func(numAttempts int) (bool, int) {
if numAttempts > maxAttempts {
return false, -1
}
return true, sleepSeconds
}
if err := RetryLoop(worker, sleeper); err != nil {
return nil, err
}
return units, nil
}
func (c CouchbaseFleet) filterFleetUnits(units []*schema.Unit, filters []string) (filteredUnits []*schema.Unit) {
stringContainsAny := func(s string, filters []string) bool {
for _, filter := range filters {
if strings.Contains(s, filter) {
return true
}
}
return false
}
for _, unit := range units {
if stringContainsAny(unit.Name, filters) {
filteredUnits = append(filteredUnits, unit)
}
}
return filteredUnits
}
func (c CouchbaseFleet) GenerateUnits(outputDir string) error {
// generate node unit
nodeFleetUnit, err := c.generateNodeFleetUnitFile()
if err != nil {
return err
}
filename := fmt.Sprintf("%v@.service", UNIT_NAME_NODE)
path := filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(nodeFleetUnit), 0644); err != nil {
return err
}
// generate sidekick unit
sidekickFleetUnit, err := c.generateSidekickFleetUnitFile("%i")
if err != nil {
return err
}
filename = fmt.Sprintf("%v@.service", UNIT_NAME_SIDEKICK)
path = filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(sidekickFleetUnit), 0644); err != nil {
return err
}
return nil
}
func (c CouchbaseFleet) WaitForFleetLaunch() error {
// wait until X nodes are up in cluster
log.Printf("Waiting for cluster to be up ..")
WaitUntilNumNodesRunning(c.NumNodes, c.EtcdServers)
// wait until no rebalance running
cb := NewCouchbaseCluster(c.EtcdServers)
if err := cb.LoadAdminCredsFromEtcd(); err != nil {
return err
}
liveNodeIp, err := cb.FindLiveNode()
if err != nil {
return err
}
// dirty hack to solve problem: the cluster might have
// 2 nodes which just finished rebalancing, and a third node
// that joins and triggers another rebalance. thus, it will briefly
// go into "no rebalances happening" state, followed by a rebalance.
// if we see the "no rebalances happening state", we'll be tricked and
// think we're done when we're really not.
// workaround: check twice, and sleep in between the check
for i := 0; i < c.NumNodes; i++ {
if err := cb.WaitUntilNoRebalanceRunning(liveNodeIp, 30); err != nil {
return err
}
log.Printf("No rebalance running, sleeping 15s. (%v/%v)", i+1, c.NumNodes)
<-time.After(time.Second * 15)
}
log.Println("No rebalance running after several checks")
// let user know its up
log.Printf("Cluster is up!")
return nil
}
func (c *CouchbaseFleet) ExtractDocOptArgs(arguments map[string]interface{}) error {
userpass, err := ExtractUserPass(arguments)
if err != nil {
return err
}
numnodes, err := ExtractNumNodes(arguments)
if err != nil {
return err
}
cbVersion, err := ExtractCbVersion(arguments)
if err != nil {
return err
}
c.UserPass = userpass
c.NumNodes = numnodes
c.CbVersion = cbVersion
c.ContainerTag = ExtractDockerTagOrLatest(arguments)
c.SkipCleanSlateCheck = ExtractSkipCheckCleanState(arguments)
return nil
}
// call fleetctl list-machines and verify that the number of nodes
// the user asked to kick off is LTE number of machines on cluster
func (c CouchbaseFleet) verifyEnoughMachinesAvailable() error {
log.Printf("verifyEnoughMachinesAvailable()")
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
// {"machines":[{"id":"a91c394439734375aa25 | DestroyUnits | identifier_name |
fleet.go | return err
}
if err := launchFleetUnitN(
i,
UNIT_NAME_SIDEKICK,
sidekickFleetUnitJson,
); err != nil {
return err
}
}
if err := c.WaitForFleetLaunch(); err != nil {
log.Printf("Error waiting for couchbase cluster launch: %v", err)
return err
}
return nil
}
// Call Fleet API and tell it to stop units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) StopUnits(allUnits bool) error {
// set the /couchbase.com/remove-rebalance-disabled flag in etcd since
// otherwise, it will try to remove and rebalance the node, which is not
// what we want when stopping all units.
// set the ttl to be 5 minutes, since there's nothing in place yet to
// block until all the units have stopped
// (TODO: this should get added .. it waits for all units to stop, and then
// it removes the /couchbase.com/remove-rebalance-disabled flag)
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitStopper := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
log.Printf("Stop unit %v via PUT %v", unit.Name, endpointUrl)
return PUT(endpointUrl, `{"desiredState": "inactive"}`)
}
return c.ManipulateUnits(unitStopper, allUnits)
}
// Call Fleet API and tell it to destroy units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) DestroyUnits(allUnits bool) error {
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitDestroyer := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
return DELETE(endpointUrl)
}
return c.ManipulateUnits(unitDestroyer, allUnits)
}
type UnitManipulator func(unit *schema.Unit) error
func (c CouchbaseFleet) ManipulateUnits(unitManipulator UnitManipulator, manipulateAllUnits bool) error {
// find all the units
allUnits, err := c.findAllFleetUnits()
if err != nil {
return err
}
var units []*schema.Unit
if manipulateAllUnits {
units = allUnits
} else {
// filter the ones out that have the name pattern we care about (couchbase_node)
unitNamePatterns := []string{UNIT_NAME_NODE, UNIT_NAME_SIDEKICK}
units = c.filterFleetUnits(allUnits, unitNamePatterns)
}
for _, unit := range units {
if err := unitManipulator(unit); err != nil {
return err
}
}
return nil
}
func (c CouchbaseFleet) findAllFleetUnits() (units []*schema.Unit, err error) {
endpointUrl := ""
maxAttempts := 10000
sleepSeconds := 0
nextPageToken := ""
log.Printf("findAllFleetUnits()")
worker := func() (finished bool, err error) {
// append a next page token to url if needed
if len(nextPageToken) > 0 {
endpointUrl = fmt.Sprintf("%v/units?nextPageToken=%v", FLEET_API_ENDPOINT, nextPageToken)
} else {
endpointUrl = fmt.Sprintf("%v/units", FLEET_API_ENDPOINT)
}
log.Printf("Getting units from %v", endpointUrl)
unitPage := schema.UnitPage{}
if err := getJsonData(endpointUrl, &unitPage); err != nil {
return true, err
}
// add all units to return value
for _, unit := range unitPage.Units {
units = append(units, unit)
}
// if no more pages, we are finished
areWeFinished := len(unitPage.NextPageToken) == 0
return areWeFinished, nil
}
sleeper := func(numAttempts int) (bool, int) {
if numAttempts > maxAttempts {
return false, -1
}
return true, sleepSeconds
}
if err := RetryLoop(worker, sleeper); err != nil {
return nil, err
}
return units, nil
}
func (c CouchbaseFleet) filterFleetUnits(units []*schema.Unit, filters []string) (filteredUnits []*schema.Unit) | }
func (c CouchbaseFleet) GenerateUnits(outputDir string) error {
// generate node unit
nodeFleetUnit, err := c.generateNodeFleetUnitFile()
if err != nil {
return err
}
filename := fmt.Sprintf("%v@.service", UNIT_NAME_NODE)
path := filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(nodeFleetUnit), 0644); err != nil {
return err
}
// generate sidekick unit
sidekickFleetUnit, err := c.generateSidekickFleetUnitFile("%i")
if err != nil {
return err
}
filename = fmt.Sprintf("%v@.service", UNIT_NAME_SIDEKICK)
path = filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(sidekickFleetUnit), 0644); err != nil {
return err
}
return nil
}
func (c CouchbaseFleet) WaitForFleetLaunch() error {
// wait until X nodes are up in cluster
log.Printf("Waiting for cluster to be up ..")
WaitUntilNumNodesRunning(c.NumNodes, c.EtcdServers)
// wait until no rebalance running
cb := NewCouchbaseCluster(c.EtcdServers)
if err := cb.LoadAdminCredsFromEtcd(); err != nil {
return err
}
liveNodeIp, err := cb.FindLiveNode()
if err != nil {
return err
}
// dirty hack to solve problem: the cluster might have
// 2 nodes which just finished rebalancing, and a third node
// that joins and triggers another rebalance. thus, it will briefly
// go into "no rebalances happening" state, followed by a rebalance.
// if we see the "no rebalances happening state", we'll be tricked and
// think we're done when we're really not.
// workaround: check twice, and sleep in between the check
for i := 0; i < c.NumNodes; i++ {
if err := cb.WaitUntilNoRebalanceRunning(liveNodeIp, 30); err != nil {
return err
}
log.Printf("No rebalance running, sleeping 15s. (%v/%v)", i+1, c.NumNodes)
<-time.After(time.Second * 15)
}
log.Println("No rebalance running after several checks")
// let user know its up
log.Printf("Cluster is up!")
return nil
}
func (c *CouchbaseFleet) ExtractDocOptArgs(arguments map[string]interface{}) error {
userpass, err := ExtractUserPass(arguments)
if err != nil {
return err
}
numnodes, err := ExtractNumNodes(arguments)
if err != nil {
return err
}
cbVersion, err := ExtractCbVersion(arguments)
if err != nil {
return err
}
c.UserPass = userpass
c.NumNodes = numnodes
c.CbVersion = cbVersion
c.ContainerTag = ExtractDockerTagOrLatest(arguments)
c.SkipCleanSlateCheck = ExtractSkipCheckCleanState(arguments)
return nil
}
// call fleetctl list-machines and verify that the number of nodes
// the user asked to kick off is LTE number of machines on cluster
func (c CouchbaseFleet) verifyEnoughMachinesAvailable() error {
log.Printf("verifyEnoughMachinesAvailable()")
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
// {"machines":[{"id":"a91c394439734375aa25 | {
stringContainsAny := func(s string, filters []string) bool {
for _, filter := range filters {
if strings.Contains(s, filter) {
return true
}
}
return false
}
for _, unit := range units {
if stringContainsAny(unit.Name, filters) {
filteredUnits = append(filteredUnits, unit)
}
}
return filteredUnits
| identifier_body |
fleet.go | return err
}
if err := launchFleetUnitN(
i,
UNIT_NAME_SIDEKICK,
sidekickFleetUnitJson,
); err != nil {
return err
}
}
if err := c.WaitForFleetLaunch(); err != nil {
log.Printf("Error waiting for couchbase cluster launch: %v", err)
return err
}
return nil
}
// Call Fleet API and tell it to stop units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) StopUnits(allUnits bool) error {
// set the /couchbase.com/remove-rebalance-disabled flag in etcd since
// otherwise, it will try to remove and rebalance the node, which is not
// what we want when stopping all units.
// set the ttl to be 5 minutes, since there's nothing in place yet to
// block until all the units have stopped
// (TODO: this should get added .. it waits for all units to stop, and then
// it removes the /couchbase.com/remove-rebalance-disabled flag)
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitStopper := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
log.Printf("Stop unit %v via PUT %v", unit.Name, endpointUrl)
return PUT(endpointUrl, `{"desiredState": "inactive"}`)
}
return c.ManipulateUnits(unitStopper, allUnits)
}
// Call Fleet API and tell it to destroy units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) DestroyUnits(allUnits bool) error {
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitDestroyer := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
return DELETE(endpointUrl)
}
return c.ManipulateUnits(unitDestroyer, allUnits)
}
type UnitManipulator func(unit *schema.Unit) error
func (c CouchbaseFleet) ManipulateUnits(unitManipulator UnitManipulator, manipulateAllUnits bool) error {
// find all the units
allUnits, err := c.findAllFleetUnits()
if err != nil {
return err
}
var units []*schema.Unit
if manipulateAllUnits {
units = allUnits
} else {
// filter the ones out that have the name pattern we care about (couchbase_node)
unitNamePatterns := []string{UNIT_NAME_NODE, UNIT_NAME_SIDEKICK}
units = c.filterFleetUnits(allUnits, unitNamePatterns)
}
for _, unit := range units {
if err := unitManipulator(unit); err != nil {
return err
}
}
return nil
}
func (c CouchbaseFleet) findAllFleetUnits() (units []*schema.Unit, err error) {
endpointUrl := ""
maxAttempts := 10000
sleepSeconds := 0
nextPageToken := ""
log.Printf("findAllFleetUnits()")
worker := func() (finished bool, err error) {
// append a next page token to url if needed
if len(nextPageToken) > 0 {
endpointUrl = fmt.Sprintf("%v/units?nextPageToken=%v", FLEET_API_ENDPOINT, nextPageToken)
} else {
endpointUrl = fmt.Sprintf("%v/units", FLEET_API_ENDPOINT)
}
log.Printf("Getting units from %v", endpointUrl)
unitPage := schema.UnitPage{}
if err := getJsonData(endpointUrl, &unitPage); err != nil {
return true, err
}
// add all units to return value
for _, unit := range unitPage.Units {
units = append(units, unit)
}
// if no more pages, we are finished
areWeFinished := len(unitPage.NextPageToken) == 0
return areWeFinished, nil
}
sleeper := func(numAttempts int) (bool, int) {
if numAttempts > maxAttempts {
return false, -1
}
return true, sleepSeconds
}
if err := RetryLoop(worker, sleeper); err != nil {
return nil, err
}
return units, nil
}
func (c CouchbaseFleet) filterFleetUnits(units []*schema.Unit, filters []string) (filteredUnits []*schema.Unit) {
stringContainsAny := func(s string, filters []string) bool {
for _, filter := range filters {
if strings.Contains(s, filter) {
return true
}
}
return false
}
for _, unit := range units {
if stringContainsAny(unit.Name, filters) {
filteredUnits = append(filteredUnits, unit)
}
}
return filteredUnits
}
func (c CouchbaseFleet) GenerateUnits(outputDir string) error {
// generate node unit
nodeFleetUnit, err := c.generateNodeFleetUnitFile()
if err != nil {
return err
}
filename := fmt.Sprintf("%v@.service", UNIT_NAME_NODE)
path := filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(nodeFleetUnit), 0644); err != nil {
return err
}
// generate sidekick unit
sidekickFleetUnit, err := c.generateSidekickFleetUnitFile("%i")
if err != nil {
return err
}
filename = fmt.Sprintf("%v@.service", UNIT_NAME_SIDEKICK)
path = filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(sidekickFleetUnit), 0644); err != nil {
return err
}
return nil
}
func (c CouchbaseFleet) WaitForFleetLaunch() error {
// wait until X nodes are up in cluster
log.Printf("Waiting for cluster to be up ..")
WaitUntilNumNodesRunning(c.NumNodes, c.EtcdServers)
// wait until no rebalance running
cb := NewCouchbaseCluster(c.EtcdServers)
if err := cb.LoadAdminCredsFromEtcd(); err != nil {
return err
}
liveNodeIp, err := cb.FindLiveNode()
if err != nil |
// dirty hack to solve problem: the cluster might have
// 2 nodes which just finished rebalancing, and a third node
// that joins and triggers another rebalance. thus, it will briefly
// go into "no rebalances happening" state, followed by a rebalance.
// if we see the "no rebalances happening state", we'll be tricked and
// think we're done when we're really not.
// workaround: check twice, and sleep in between the check
for i := 0; i < c.NumNodes; i++ {
if err := cb.WaitUntilNoRebalanceRunning(liveNodeIp, 30); err != nil {
return err
}
log.Printf("No rebalance running, sleeping 15s. (%v/%v)", i+1, c.NumNodes)
<-time.After(time.Second * 15)
}
log.Println("No rebalance running after several checks")
// let user know its up
log.Printf("Cluster is up!")
return nil
}
func (c *CouchbaseFleet) ExtractDocOptArgs(arguments map[string]interface{}) error {
userpass, err := ExtractUserPass(arguments)
if err != nil {
return err
}
numnodes, err := ExtractNumNodes(arguments)
if err != nil {
return err
}
cbVersion, err := ExtractCbVersion(arguments)
if err != nil {
return err
}
c.UserPass = userpass
c.NumNodes = numnodes
c.CbVersion = cbVersion
c.ContainerTag = ExtractDockerTagOrLatest(arguments)
c.SkipCleanSlateCheck = ExtractSkipCheckCleanState(arguments)
return nil
}
// call fleetctl list-machines and verify that the number of nodes
// the user asked to kick off is LTE number of machines on cluster
func (c CouchbaseFleet) verifyEnoughMachinesAvailable() error {
log.Printf("verifyEnoughMachinesAvailable()")
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
// {"machines":[{"id":"a91c394439734375aa25 | {
return err
} | conditional_block |
fleet.go | return err
}
if err := launchFleetUnitN(
i,
UNIT_NAME_SIDEKICK,
sidekickFleetUnitJson,
); err != nil {
return err
}
}
if err := c.WaitForFleetLaunch(); err != nil {
log.Printf("Error waiting for couchbase cluster launch: %v", err)
return err
}
return nil
}
// Call Fleet API and tell it to stop units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) StopUnits(allUnits bool) error {
// set the /couchbase.com/remove-rebalance-disabled flag in etcd since
// otherwise, it will try to remove and rebalance the node, which is not
// what we want when stopping all units.
// set the ttl to be 5 minutes, since there's nothing in place yet to
// block until all the units have stopped
// (TODO: this should get added .. it waits for all units to stop, and then
// it removes the /couchbase.com/remove-rebalance-disabled flag)
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitStopper := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
log.Printf("Stop unit %v via PUT %v", unit.Name, endpointUrl)
return PUT(endpointUrl, `{"desiredState": "inactive"}`)
}
return c.ManipulateUnits(unitStopper, allUnits)
}
// Call Fleet API and tell it to destroy units. If allUnits is false,
// will only stop couchbase server node + couchbase server sidekick units.
// Otherwise, will stop all fleet units.
func (c CouchbaseFleet) DestroyUnits(allUnits bool) error {
ttlSeconds := uint64(300)
_, err := c.etcdClient.Set(KEY_REMOVE_REBALANCE_DISABLED, "true", ttlSeconds)
if err != nil {
return err
}
// call ManipulateUnits with a function that will stop them
unitDestroyer := func(unit *schema.Unit) error {
// stop the unit by updating desiredState to inactive
// and posting to fleet api
endpointUrl := fmt.Sprintf("%v/units/%v", FLEET_API_ENDPOINT, unit.Name)
return DELETE(endpointUrl)
}
return c.ManipulateUnits(unitDestroyer, allUnits)
}
type UnitManipulator func(unit *schema.Unit) error
func (c CouchbaseFleet) ManipulateUnits(unitManipulator UnitManipulator, manipulateAllUnits bool) error {
// find all the units
allUnits, err := c.findAllFleetUnits()
if err != nil {
return err
}
var units []*schema.Unit
if manipulateAllUnits {
units = allUnits
} else {
// filter the ones out that have the name pattern we care about (couchbase_node)
unitNamePatterns := []string{UNIT_NAME_NODE, UNIT_NAME_SIDEKICK}
units = c.filterFleetUnits(allUnits, unitNamePatterns)
}
for _, unit := range units {
if err := unitManipulator(unit); err != nil {
return err
}
}
return nil
}
func (c CouchbaseFleet) findAllFleetUnits() (units []*schema.Unit, err error) {
endpointUrl := ""
maxAttempts := 10000
sleepSeconds := 0
nextPageToken := ""
log.Printf("findAllFleetUnits()")
worker := func() (finished bool, err error) {
// append a next page token to url if needed
if len(nextPageToken) > 0 {
endpointUrl = fmt.Sprintf("%v/units?nextPageToken=%v", FLEET_API_ENDPOINT, nextPageToken)
} else {
endpointUrl = fmt.Sprintf("%v/units", FLEET_API_ENDPOINT)
}
log.Printf("Getting units from %v", endpointUrl)
unitPage := schema.UnitPage{}
if err := getJsonData(endpointUrl, &unitPage); err != nil {
return true, err
}
// add all units to return value
for _, unit := range unitPage.Units {
units = append(units, unit)
}
// if no more pages, we are finished
areWeFinished := len(unitPage.NextPageToken) == 0
return areWeFinished, nil
}
sleeper := func(numAttempts int) (bool, int) {
if numAttempts > maxAttempts {
return false, -1
}
return true, sleepSeconds
}
if err := RetryLoop(worker, sleeper); err != nil {
return nil, err
}
return units, nil
}
func (c CouchbaseFleet) filterFleetUnits(units []*schema.Unit, filters []string) (filteredUnits []*schema.Unit) {
stringContainsAny := func(s string, filters []string) bool {
for _, filter := range filters {
if strings.Contains(s, filter) {
return true
}
}
return false
}
for _, unit := range units {
if stringContainsAny(unit.Name, filters) {
filteredUnits = append(filteredUnits, unit)
}
}
return filteredUnits
}
func (c CouchbaseFleet) GenerateUnits(outputDir string) error {
// generate node unit
nodeFleetUnit, err := c.generateNodeFleetUnitFile()
if err != nil {
return err
}
filename := fmt.Sprintf("%v@.service", UNIT_NAME_NODE)
path := filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(nodeFleetUnit), 0644); err != nil {
return err
}
| // generate sidekick unit
sidekickFleetUnit, err := c.generateSidekickFleetUnitFile("%i")
if err != nil {
return err
}
filename = fmt.Sprintf("%v@.service", UNIT_NAME_SIDEKICK)
path = filepath.Join(outputDir, filename)
if err := ioutil.WriteFile(path, []byte(sidekickFleetUnit), 0644); err != nil {
return err
}
return nil
}
func (c CouchbaseFleet) WaitForFleetLaunch() error {
// wait until X nodes are up in cluster
log.Printf("Waiting for cluster to be up ..")
WaitUntilNumNodesRunning(c.NumNodes, c.EtcdServers)
// wait until no rebalance running
cb := NewCouchbaseCluster(c.EtcdServers)
if err := cb.LoadAdminCredsFromEtcd(); err != nil {
return err
}
liveNodeIp, err := cb.FindLiveNode()
if err != nil {
return err
}
// dirty hack to solve problem: the cluster might have
// 2 nodes which just finished rebalancing, and a third node
// that joins and triggers another rebalance. thus, it will briefly
// go into "no rebalances happening" state, followed by a rebalance.
// if we see the "no rebalances happening state", we'll be tricked and
// think we're done when we're really not.
// workaround: check twice, and sleep in between the check
for i := 0; i < c.NumNodes; i++ {
if err := cb.WaitUntilNoRebalanceRunning(liveNodeIp, 30); err != nil {
return err
}
log.Printf("No rebalance running, sleeping 15s. (%v/%v)", i+1, c.NumNodes)
<-time.After(time.Second * 15)
}
log.Println("No rebalance running after several checks")
// let user know its up
log.Printf("Cluster is up!")
return nil
}
func (c *CouchbaseFleet) ExtractDocOptArgs(arguments map[string]interface{}) error {
userpass, err := ExtractUserPass(arguments)
if err != nil {
return err
}
numnodes, err := ExtractNumNodes(arguments)
if err != nil {
return err
}
cbVersion, err := ExtractCbVersion(arguments)
if err != nil {
return err
}
c.UserPass = userpass
c.NumNodes = numnodes
c.CbVersion = cbVersion
c.ContainerTag = ExtractDockerTagOrLatest(arguments)
c.SkipCleanSlateCheck = ExtractSkipCheckCleanState(arguments)
return nil
}
// call fleetctl list-machines and verify that the number of nodes
// the user asked to kick off is LTE number of machines on cluster
func (c CouchbaseFleet) verifyEnoughMachinesAvailable() error {
log.Printf("verifyEnoughMachinesAvailable()")
endpointUrl := fmt.Sprintf("%v/machines", FLEET_API_ENDPOINT)
// {"machines":[{"id":"a91c394439734375aa256 | random_line_split | |
aplicacao.py | :
# python -m serial.tools.list_ports
serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de)
#serialName = "COM11" # Windows(variacao de)
class Client():
def __init__(self, serialName, debug=False):
self.com = enlace(serialName)
self.com.enable()
self.debug = debug
self.fileName = None
self.results = []
if debug:
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(self.com.fisica.name))
self.run()
def run(self):
self.shouldStop = False
while not self.shouldStop:
self.configure()
if self.fileName != None:
self.emit()
self.getResults()
self.fileName = None
meanImageSize = 0
meanDeltaTime = 0
meanTransferRate = 0
for result in self.results:
meanImageSize += result[0]
meanDeltaTime += result[1]
meanTransferRate += result[2]
meanImageSize = meanImageSize/len(self.results)
meanDeltaTime = meanDeltaTime/len(self.results)
meanTransferRate = meanTransferRate/len(self.results)
print("[LOG] Tamanho Médio de Imagem.........{:.3f} b".format(meanImageSize))
print("[LOG] Tempo Médio de Transferência....{:.3f} s".format(meanDeltaTime))
print("[LOG] Taxa Média de transferência.....{:.3f} b/s".format(meanTransferRate))
def configure(self):
if self.debug:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
self.fileName = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(self.fileName) is tuple or self.fileName == "":
self.shouldStop = True
self.fileName = None
return None
with open(self.fileName, 'rb') as image:
if self.debug:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
self.imageByteArray = bytearray(imageFile)
self.imageSize = bytes(str(len(self.imageByteArray)), 'UTF-8')
if self.debug:
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(self.imageSize)))
self.textBuffer = self.imageSize + bytearray(b"start") + self.imageByteArray
def emit(self):
if self.debug:
print("[LOG] Tentado transmitir........{} bytes.".format(len(self.textBuffer)))
self.startTime = time.time()
self.com.sendData(self.textBuffer)
# Esperando o fim da transmissão do arquivo.
while(self.com.tx.getIsBussy()):
pass
txSize = self.com.tx.getStatus()
if self.debug:
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
rxBuffer = self.com.getData(len(self.imageSize))[0]
self.endTime = time.time()
if self.debug:
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(self.imageSize):
if self.debug:
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
self.com.disable()
if self.debug:
print("[LOG] Comunicação encerrada.")
self.shouldStop = True
if self.debug:
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
def getResults(self):
deltaTime = self.endTime - self.startTime
transferRate = int(self.imageSize) / deltaTime
if self.debug:
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
self.results.append([int(self.imageSize), deltaTime, transferRate])
def client(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace(serialName) # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
shouldClose = False
while not shouldClose:
# Verifica se o arquivo a ser transferido foi passado como
# argumento ou se deve ser escolhido pelo GUI.
if args.file is None:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
filePath = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(filePath) is tuple or filePath == "":
shouldClose = True
sys.exit("[ERRO] Arquivo não escolhido. Abortando... Usar CTRL+C")
else:
print("\n[LOG] Arquivo fornecido como argumento.")
filePath = args.file
with open(filePath, "rb") as image:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
imageByteArray = bytearray(imageFile)
imageSize = bytes(str(len(imageByteArray)), 'UTF-8')
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(imageSize)))
# Criando o buffer a ser transmitido.
txBuffer = imageSize + bytearray(b"start") + imageByteArray
# Envia dado.
print("[LOG] Tentado transmitir........{} bytes.".format(len(txBuffer)))
startTime = time.time()
com.sendData(txBuffer)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
rxBuffer, nRx = com.getData(len(imageSize))
endTime = time.time()
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(imageSize):
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
com.disable()
print("[LOG] Comunicação encerrada.")
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
# Calculando o tempo e a taxa de transferência.
deltaTime = endTime - startTime
transferRate = int(imageSize) / deltaTime
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
# Encerra a comunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
def server(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace("/dev/ttyACM1") # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão |
while True:
# Faz a recepção dos dados
print("\n[LOG] Recebendo dados...")
keywordRecognized = False
receiveBuffer = bytearray()
# Espera até receber uma keyword.
while not keywordRecognized:
rxBuffer, nRx = com.getData(1)
receiveBuffer += rxBuffer
if b"start" in receiveBuffer:
keywordRecognized = True
# Cortando a keyword do buffer recebido.
imageSize = receiveBuffer[:-5]
print("[LOG] Começou a receber a arquivo. Tamanho do arquivo a ser recebido: {} bytes.".format(int(imageSize)))
# Agora recebemos a arquivo em si.
rxBuffer, nRx = com.getData(int(imageSize))
# Salvando a arquivo recebida.
with open("receivedImage.png", "wb") as receivedImage:
receivedImage.write(rxBuffer)
# LOG
print("[LOG] Lido....{} bytes ".format(nRx))
# Retornando o tamanho da arquivo para mostrar que ela foi recebida.
print("[LOG] Retornando o tamanho do arquivo para mostrar que ele foi receb | com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name)) | random_line_split |
aplicacao.py | # python -m serial.tools.list_ports
serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de)
#serialName = "COM11" # Windows(variacao de)
class Cli |
def __init__(self, serialName, debug=False):
self.com = enlace(serialName)
self.com.enable()
self.debug = debug
self.fileName = None
self.results = []
if debug:
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(self.com.fisica.name))
self.run()
def run(self):
self.shouldStop = False
while not self.shouldStop:
self.configure()
if self.fileName != None:
self.emit()
self.getResults()
self.fileName = None
meanImageSize = 0
meanDeltaTime = 0
meanTransferRate = 0
for result in self.results:
meanImageSize += result[0]
meanDeltaTime += result[1]
meanTransferRate += result[2]
meanImageSize = meanImageSize/len(self.results)
meanDeltaTime = meanDeltaTime/len(self.results)
meanTransferRate = meanTransferRate/len(self.results)
print("[LOG] Tamanho Médio de Imagem.........{:.3f} b".format(meanImageSize))
print("[LOG] Tempo Médio de Transferência....{:.3f} s".format(meanDeltaTime))
print("[LOG] Taxa Média de transferência.....{:.3f} b/s".format(meanTransferRate))
def configure(self):
if self.debug:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
self.fileName = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(self.fileName) is tuple or self.fileName == "":
self.shouldStop = True
self.fileName = None
return None
with open(self.fileName, 'rb') as image:
if self.debug:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
self.imageByteArray = bytearray(imageFile)
self.imageSize = bytes(str(len(self.imageByteArray)), 'UTF-8')
if self.debug:
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(self.imageSize)))
self.textBuffer = self.imageSize + bytearray(b"start") + self.imageByteArray
def emit(self):
if self.debug:
print("[LOG] Tentado transmitir........{} bytes.".format(len(self.textBuffer)))
self.startTime = time.time()
self.com.sendData(self.textBuffer)
# Esperando o fim da transmissão do arquivo.
while(self.com.tx.getIsBussy()):
pass
txSize = self.com.tx.getStatus()
if self.debug:
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
rxBuffer = self.com.getData(len(self.imageSize))[0]
self.endTime = time.time()
if self.debug:
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(self.imageSize):
if self.debug:
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
self.com.disable()
if self.debug:
print("[LOG] Comunicação encerrada.")
self.shouldStop = True
if self.debug:
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
def getResults(self):
deltaTime = self.endTime - self.startTime
transferRate = int(self.imageSize) / deltaTime
if self.debug:
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
self.results.append([int(self.imageSize), deltaTime, transferRate])
def client(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace(serialName) # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
shouldClose = False
while not shouldClose:
# Verifica se o arquivo a ser transferido foi passado como
# argumento ou se deve ser escolhido pelo GUI.
if args.file is None:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
filePath = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(filePath) is tuple or filePath == "":
shouldClose = True
sys.exit("[ERRO] Arquivo não escolhido. Abortando... Usar CTRL+C")
else:
print("\n[LOG] Arquivo fornecido como argumento.")
filePath = args.file
with open(filePath, "rb") as image:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
imageByteArray = bytearray(imageFile)
imageSize = bytes(str(len(imageByteArray)), 'UTF-8')
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(imageSize)))
# Criando o buffer a ser transmitido.
txBuffer = imageSize + bytearray(b"start") + imageByteArray
# Envia dado.
print("[LOG] Tentado transmitir........{} bytes.".format(len(txBuffer)))
startTime = time.time()
com.sendData(txBuffer)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
rxBuffer, nRx = com.getData(len(imageSize))
endTime = time.time()
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(imageSize):
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
com.disable()
print("[LOG] Comunicação encerrada.")
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
# Calculando o tempo e a taxa de transferência.
deltaTime = endTime - startTime
transferRate = int(imageSize) / deltaTime
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
# Encerra a comunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
def server(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace("/dev/ttyACM1") # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
while True:
# Faz a recepção dos dados
print("\n[LOG] Recebendo dados...")
keywordRecognized = False
receiveBuffer = bytearray()
# Espera até receber uma keyword.
while not keywordRecognized:
rxBuffer, nRx = com.getData(1)
receiveBuffer += rxBuffer
if b"start" in receiveBuffer:
keywordRecognized = True
# Cortando a keyword do buffer recebido.
imageSize = receiveBuffer[:-5]
print("[LOG] Começou a receber a arquivo. Tamanho do arquivo a ser recebido: {} bytes.".format(int(imageSize)))
# Agora recebemos a arquivo em si.
rxBuffer, nRx = com.getData(int(imageSize))
# Salvando a arquivo recebida.
with open("receivedImage.png", "wb") as receivedImage:
receivedImage.write(rxBuffer)
# LOG
print("[LOG] Lido....{} bytes ".format(nRx))
# Retornando o tamanho da arquivo para mostrar que ela foi recebida.
print("[LOG] Retornando o tamanho do arquivo para mostrar que ele foi rece | ent(): | identifier_name |
aplicacao.py | # python -m serial.tools.list_ports
serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de)
#serialName = "COM11" # Windows(variacao de)
class Client():
def __init__(self, serialName, debug=False):
self.com = enlace(serialName)
self.com.enable()
self.debug = debug
self.fileName = None
self.results = []
if debug:
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(self.com.fisica.name))
self.run()
def run(self):
self.shouldStop = False
while not self.shouldStop:
self.configure()
if self.fileName != None:
self.emit()
self.getResults()
self.fileName = None
meanImageSize = 0
meanDeltaTime = 0
meanTransferRate = 0
for result in self.results:
meanImageSize += result[0]
meanDeltaTime += result[1]
meanTransferRate += result[2]
meanImageSize = meanImageSize/len(self.results)
meanDeltaTime = meanDeltaTime/len(self.results)
meanTransferRate = meanTransferRate/len(self.results)
print("[LOG] Tamanho Médio de Imagem.........{:.3f} b".format(meanImageSize))
print("[LOG] Tempo Médio de Transferência....{:.3f} s".format(meanDeltaTime))
print("[LOG] Taxa Média de transferência.....{:.3f} b/s".format(meanTransferRate))
def configure(self):
if self.debug:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
self.fileName = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(self.fileName) is tuple or self.fileName == "":
self.shouldStop = True
self.fileName = None
return None
with open(self.fileName, 'rb') as image:
if self.debug:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
self.imageByteArray = bytearray(imageFile)
self.imageSize = bytes(str(len(self.imageByteArray)), 'UTF-8')
if self.debug:
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(self.imageSize)))
self.textBuffer = self.imageSize + bytearray(b"start") + self.imageByteArray
def emit(self):
if self.debug:
print("[LOG] Tentado transmitir........{} bytes.".format(len(self.textBuffer)))
self.startTime = time.time()
self.com.sendData(self.textBuffer)
# Esperando o fim da transmissão do arquivo.
while(self.com.tx.getIsBussy()):
pass
txSize = self.com.tx.getStatus()
if self.debug:
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
rxBuffer = self.com.getData(len(self.imageSize))[0]
self.endTime = time.time()
if self.debug:
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(self.imageSize):
if self.debug:
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
self.com.disable()
if self.debug:
print("[LOG] Comunicação encerrada.")
self.shouldStop = True
if self.debug:
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
def getResults(self):
deltaTime = self.endTime - self.startTime
transferRate = int(self.imageSize) / deltaTime
if self.debug:
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
self.results.append([int(self.imageSize), deltaTime, transferRate])
def client(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace(serialN |
else:
print("\n[LOG] Arquivo fornecido como argumento.")
filePath = args.file
with open(filePath, "rb") as image:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
imageByteArray = bytearray(imageFile)
imageSize = bytes(str(len(imageByteArray)), 'UTF-8')
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(imageSize)))
# Criando o buffer a ser transmitido.
txBuffer = imageSize + bytearray(b"start") + imageByteArray
# Envia dado.
print("[LOG] Tentado transmitir........{} bytes.".format(len(txBuffer)))
startTime = time.time()
com.sendData(txBuffer)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
# Atualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
rxBuffer, nRx = com.getData(len(imageSize))
endTime = time.time()
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(imageSize):
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
com.disable()
print("[LOG] Comunicação encerrada.")
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
# Calculando o tempo e a taxa de transferência.
deltaTime = endTime - startTime
transferRate = int(imageSize) / deltaTime
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
# Encerra a comunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
def server(args):
# Inicializa
enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace("/dev/ttyACM1") # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
while True:
# Faz a recepção dos dados
print("\n[LOG] Recebendo dados...")
keywordRecognized = False
receiveBuffer = bytearray()
# Espera até receber uma keyword.
while not keywordRecognized:
rxBuffer, nRx = com.getData(1)
receiveBuffer += rxBuffer
if b"start" in receiveBuffer:
keywordRecognized = True
# Cortando a keyword do buffer recebido.
imageSize = receiveBuffer[:-5]
print("[LOG] Começou a receber a arquivo. Tamanho do arquivo a ser recebido: {} bytes.".format(int(imageSize)))
# Agora recebemos a arquivo em si.
rxBuffer, nRx = com.getData(int(imageSize))
# Salvando a arquivo recebida.
with open("receivedImage.png", "wb") as receivedImage:
receivedImage.write(rxBuffer)
# LOG
print("[LOG] Lido....{} bytes ".format(nRx))
# Retornando o tamanho da arquivo para mostrar que ela foi recebida.
print("[LOG] Retornando o tamanho do arquivo para mostrar que ele foi | ame) # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
shouldClose = False
while not shouldClose:
# Verifica se o arquivo a ser transferido foi passado como
# argumento ou se deve ser escolhido pelo GUI.
if args.file is None:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
filePath = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(filePath) is tuple or filePath == "":
shouldClose = True
sys.exit("[ERRO] Arquivo não escolhido. Abortando... Usar CTRL+C") | identifier_body |
aplicacao.py | :
# python -m serial.tools.list_ports
serialName = "/dev/ttyACM0" # Ubuntu (variacao de)
#serialName = "/dev/tty.usbmodem1411" # Mac (variacao de)
#serialName = "COM11" # Windows(variacao de)
class Client():
def __init__(self, serialName, debug=False):
self.com = enlace(serialName)
self.com.enable()
self.debug = debug
self.fileName = None
self.results = []
if debug:
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(self.com.fisica.name))
self.run()
def run(self):
self.shouldStop = False
while not self.shouldStop:
self.configure()
if self.fileName != None:
self.emit()
self.getResults()
self.fileName = None
meanImageSize = 0
meanDeltaTime = 0
meanTransferRate = 0
for result in self.results:
meanImageSize += result[0]
meanDeltaTime += result[1]
meanTransferRate += result[2]
meanImageSize = meanImageSize/len(self.results)
meanDeltaTime = meanDeltaTime/len(self.results)
meanTransferRate = meanTransferRate/len(self.results)
print("[LOG] Tamanho Médio de Imagem.........{:.3f} b".format(meanImageSize))
print("[LOG] Tempo Médio de Transferência....{:.3f} s".format(meanDeltaTime))
print("[LOG] Taxa Média de transferência.....{:.3f} b/s".format(meanTransferRate))
def configure(self):
if self.debug:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
self.fileName = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(self.fileName) is tuple or self.fileName == "":
self.shouldStop = True
self.fileName = None
return None
with open(self.fileName, 'rb') as image:
if self.debug:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
self.imageByteArray = bytearray(imageFile)
self.imageSize = bytes(str(len(self.imageByteArray)), 'UTF-8')
if self.debug:
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(self.imageSize)))
self.textBuffer = self.imageSize + bytearray(b"start") + self.imageByteArray
def emit(self):
if self.debug:
print("[LOG] Tentado transmitir........{} bytes.".format(len(self.textBuffer)))
self.startTime = time.time()
self.com.sendData(self.textBuffer)
# Esperando o fim da transmissão do arquivo.
while(self.com.tx.getIsBussy()):
pass
txSize = self.com.tx.getStatus()
if self.debug:
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
rxBuffer = self.com.getData(len(self.imageSize))[0]
self.endTime = time.time()
if self.debug:
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(self.imageSize):
if self.debug:
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
self.com.disable()
if self.debug:
print("[LOG] Comunicação encerrada.")
self.shouldStop = True
if self.debug:
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
def getResults(self):
deltaTime = self.endTime - self.startTime
transferRate = int(self.imageSize) / deltaTime
if self.debug:
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
self.results.append([int(self.imageSize), deltaTime, transferRate])
def client(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace(serialName) # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
shouldClose = False
while not shouldClose:
# Verifica se o arquivo a ser transferido foi passado como
# argumento ou se deve ser escolhido pelo GUI.
if args.file is None:
print("\n[LOG] Arquivo não fornecido como argumento, usando GUI.")
Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing
filePath = askopenfilename() # Show an "Open" dialog box and return the path to the selected file
if type(filePath) is tuple or filePath == "":
shouldClose = True
sys.exit("[ERRO] Arquivo não escolhido. Abortando... Usar CTRL+C")
else:
print("\n[LOG] Arquivo fornecido como argumento.")
filePath = args.file
with open(filePath, "rb") as image:
print("[LOG] Arquivo encontrado. Lendo e transformando em bytearray.")
imageFile = image.read()
imageByteArray = bytearray(imageFile)
imageSize = bytes(str(len(imageByteArray)), 'UTF-8')
print("[LOG] Tamanho do arquivo........{} bytes.".format(int(imageSize)))
# Criando o buffer a ser transmitido.
txBuffer = imageSize + bytearray(b"start") + imageByteArray
# Envia dado.
print("[LOG] Tentado transmitir........{} bytes.".format(len(txBuffer)))
startTime = time.time()
com.sendData(txBuffer)
# Esperando o fim da transmissão do arquivo.
while(com.tx.getIsBussy()):
pass
| ualiza dados da transmissão.
txSize = com.tx.getStatus()
print("[LOG] Transmitido...............{} bytes.".format(int(txSize)))
# Esperando pela resposta. Sabemos que ela deve ser o tamanho da arquivo.
print("[LOG] Esperando pela resposta do servidor com o tamanho do arquivo.")
rxBuffer, nRx = com.getData(len(imageSize))
endTime = time.time()
print("[LOG] Resposta: {} bytes.".format(int(rxBuffer)))
# Verifica se o tamanho recebido está correto.
if int(rxBuffer) != int(imageSize):
print("[LOG] Tamanho incorreto.")
# Encerra a comunicação.
com.disable()
print("[LOG] Comunicação encerrada.")
print("[LOG] Tamanho correto. Arquivo enviado com sucesso.")
# Calculando o tempo e a taxa de transferência.
deltaTime = endTime - startTime
transferRate = int(imageSize) / deltaTime
print("[LOG] Tempo levado..............{:.3f} s".format(deltaTime))
print("[LOG] Taxa de transferência.....{:.3f} b/s".format(transferRate))
# Encerra a comunicação.
com.disable()
print("\n[LOG] Comunicação encerrada.")
def server(args):
# Inicializa enlace... variável COM possui todos os métodos e propriedades do enlace, que funciona em threading
com = enlace("/dev/ttyACM1") # Repare que o metodo construtor recebe um string (nome)
# Ativa comunicacão
com.enable()
# LOG
print("[LOG] Comunicação inicializada.")
print("[LOG] Porta: {}".format(com.fisica.name))
while True:
# Faz a recepção dos dados
print("\n[LOG] Recebendo dados...")
keywordRecognized = False
receiveBuffer = bytearray()
# Espera até receber uma keyword.
while not keywordRecognized:
rxBuffer, nRx = com.getData(1)
receiveBuffer += rxBuffer
if b"start" in receiveBuffer:
keywordRecognized = True
# Cortando a keyword do buffer recebido.
imageSize = receiveBuffer[:-5]
print("[LOG] Começou a receber a arquivo. Tamanho do arquivo a ser recebido: {} bytes.".format(int(imageSize)))
# Agora recebemos a arquivo em si.
rxBuffer, nRx = com.getData(int(imageSize))
# Salvando a arquivo recebida.
with open("receivedImage.png", "wb") as receivedImage:
receivedImage.write(rxBuffer)
# LOG
print("[LOG] Lido....{} bytes ".format(nRx))
# Retornando o tamanho da arquivo para mostrar que ela foi recebida.
print("[LOG] Retornando o tamanho do arquivo para mostrar que ele foi receb | # At | conditional_block |
lib.rs | 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if !ptr.is_null() {
return ptr;
} else {
continue;
}
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) & !(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> | {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated = !bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if !offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0; | identifier_body | |
lib.rs | }
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn | (current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList | get_max_size | identifier_name |
lib.rs | Allocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if !ptr.is_null() {
return ptr;
} else | {
continue;
} | conditional_block | |
lib.rs | SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn get_max_size(current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for | ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager), | random_line_split | |
ip6.py | 6s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + bytes(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol | self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
self.data = buf[2:self.length] # keep raw data with all pad options, but not the following data
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
# Unit tests
def test_ipg():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00\x00')
_ip = IP6(s)
# basic properties
assert _ip.v == 6
assert _ip.fc == 0
assert _ip.flow == 0
_ip.data.sum = 0
s2 = bytes(_ip)
assert s == s2
def test_ip6_routing_header():
s = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14 | ('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf) | random_line_split |
ip6.py |
next_ext_hdr = self.nxt
while next_ext_hdr in ext_hdrs:
ext = ext_hdrs_cls[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
self.all_extension_headers.append(ext)
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
# If all_extension_headers is available, return the headers as they originally appeared
if self.all_extension_headers:
return b''.join(bytes(ext) for ext in self.all_extension_headers)
# Output extension headers in order defined in RFC1883 (except dest opts)
header_str = b""
for hdr in ext_hdrs:
if hdr in self.extension_hdrs:
header_str += bytes(self.extension_hdrs[hdr])
return header_str
def __bytes__(self):
if (self.p == 6 or self.p == 17 or self.p == 58) and not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = bytes(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + bytes(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
self.data = buf[2:self.length] # keep raw data with all pad options, but not the following data
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
# Unit tests
def test_ipg():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00 | buf = self.data | conditional_block | |
ip6.py | b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
# Unit tests
def test_ipg():
s = (b'\x60\x00\x00\x00\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72\xcd\xca\x00\x16'
b'\x04\x84\x46\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\x09\x00\x00\x02\x04\x05\xa0\x01\x03'
b'\x03\x00\x01\x01\x08\x0a\x7d\x18\x35\x3f\x00\x00\x00\x00')
_ip = IP6(s)
# basic properties
assert _ip.v == 6
assert _ip.fc == 0
assert _ip.flow == 0
_ip.data.sum = 0
s2 = bytes(_ip)
assert s == s2
def test_ip6_routing_header():
s = (b'\x60\x00\x00\x00\x00\x3c\x2b\x40\x20\x48\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xde\xca\x20\x47\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02'
b'\x00\x00\x00\x00\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x20\x22'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00\x50\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x50\x02\x20\x00\x91\x7f\x00\x00')
_ip = IP6(s)
s2 = bytes(_ip)
# 43 is Routing header id
assert len(_ip.extension_hdrs[43].addresses) == 2
assert _ip.tcp
assert s == s2
def test_ip6_fragment_header():
s = b'\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
# s2 = str(fh) variable 's2' is not used
assert fh.nxt == 6
assert fh.id == 65535
assert fh.frag_off == 8191
assert fh.m_flag == 1
assert bytes(fh) == s
# IP6 with fragment header
s = (b'\x60\x00\x00\x00\x00\x10\x2c\x00\x02\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x02\x03\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x29\x00\x00\x01'
b'\x00\x00\x00\x00\x60\x00\x00\x00\x00\x10\x2c\x00')
_ip = IP6(s)
assert bytes(_ip) == s
def test_ip6_options_header():
s = (b'\x3b\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00')
options = IP6OptsHeader(s).options
assert len(options) == 3
assert bytes(IP6OptsHeader(s)) == s
def test_ip6_ah_header():
s = b'\x3b\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert ah.length == 24
assert ah.auth_data == b'xxxxxxxx'
assert ah.spi == 0x2020202
assert ah.seq == 0x1010101
assert bytes(ah) == s
def test_ip6_esp_header():
| s = (b'\x00\x00\x01\x00\x00\x00\x00\x44\xe2\x4f\x9e\x68\xf3\xcd\xb1\x5f\x61\x65\x42\x8b\x78\x0b'
b'\x4a\xfd\x13\xf0\x15\x98\xf5\x55\x16\xa8\x12\xb3\xb8\x4d\xbc\x16\xb2\x14\xbe\x3d\xf9\x96'
b'\xd4\xa0\x39\x1f\x85\x74\x25\x81\x83\xa6\x0d\x99\xb6\xba\xa3\xcc\xb6\xe0\x9a\x78\xee\xf2'
b'\xaf\x9a')
esp = IP6ESPHeader(s)
assert esp.length == 68
assert esp.spi == 256
assert bytes(esp) == s | identifier_body | |
ip6.py | (self):
return (self._v_fc_flow >> 20) & 0xff
@fc.setter
def fc(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xff00000) | (v << 20)
@property
def flow(self):
return self._v_fc_flow & 0xfffff
@flow.setter
def flow(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = {}
# NOTE: self.extension_hdrs is not accurate, as it doesn't support duplicate header types.
# According to RFC-1883 "Each extension header should occur at most once, except for the
# Destination Options header which should occur at most twice".
# Secondly, the .headers_str() method attempts to pack the extension headers in order as
# defined in the RFC, however it doesn't adjust the next header (nxt) pointer accordingly.
# Here we introduce the new field .all_extension_headers; it allows duplicate types and
# keeps the original order.
self.all_extension_headers = []
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next_ext_hdr = self.nxt
while next_ext_hdr in ext_hdrs:
ext = ext_hdrs_cls[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
self.all_extension_headers.append(ext)
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
# If all_extension_headers is available, return the headers as they originally appeared
if self.all_extension_headers:
return b''.join(bytes(ext) for ext in self.all_extension_headers)
# Output extension headers in order defined in RFC1883 (except dest opts)
header_str = b""
for hdr in ext_hdrs:
if hdr in self.extension_hdrs:
header_str += bytes(self.extension_hdrs[hdr])
return header_str
def __bytes__(self):
if (self.p == 6 or self.p == 17 or self.p == 58) and not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = bytes(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + bytes(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = compat_ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = compat_ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
self.data = buf[2:self.length] # keep raw data with all pad options, but not the following data
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len // 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = b''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HO | fc | identifier_name | |
schema.go | t *TableSchema) GetColumnIfNonNilDefault() []bool {
nonNilDefaultByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
nonNilDefaultByColumn[columnID] = column.DefaultValue != nil
}
return nonNilDefaultByColumn
}
// GetArchivingSortColumns makes a copy of the Schema.ArchivingSortColumns so
// callers don't have to hold a read lock to access it.
func (t *TableSchema) GetArchivingSortColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.ArchivingSortColumns
}
// FetchSchema fetches schema from metaStore and updates in-memory copy of table schema,
// and set up watch channels for metaStore schema changes, used for bootstrapping mem store.
func (m *memStoreImpl) FetchSchema() error {
tables, err := m.metaStore.ListTables()
if err != nil {
return utils.StackError(err, "Failed to list tables from meta")
}
for _, tableName := range tables {
err := m.fetchTable(tableName)
if err != nil {
return err
}
}
// watch table addition/modification
tableSchemaChangeEvents, done, err := m.metaStore.WatchTableSchemaEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableSchemaChange(tableSchemaChangeEvents, done)
// watch table deletion
tableListChangeEvents, done, err := m.metaStore.WatchTableListEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableListChange(tableListChangeEvents, done)
// watch enum cases appending
m.RLock()
for _, tableSchema := range m.TableSchemas {
for columnName, enumCases := range tableSchema.EnumDicts {
err := m.watchEnumCases(tableSchema.Schema.Name, columnName, len(enumCases.ReverseDict))
if err != nil {
return err
}
}
}
m.RUnlock()
return nil
}
func (m *memStoreImpl) fetchTable(tableName string) error {
table, err := m.metaStore.GetTable(tableName)
if err != nil {
if err != metastore.ErrTableDoesNotExist {
return utils.StackError(err, "Failed to get table schema for table %s from meta", tableName)
}
} else {
tableSchema := NewTableSchema(table)
for columnID, column := range table.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
enumCases, err := m.metaStore.GetEnumDict(tableName, column.Name)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to fetch enum cases for table: %s, column: %s", tableName, column.Name)
}
} else {
tableSchema.createEnumDict(column.Name, enumCases)
}
}
}
tableSchema.SetDefaultValue(columnID)
}
m.Lock()
m.TableSchemas[tableName] = tableSchema
m.Unlock()
}
return nil
}
// watch enumCases will setup watch channels for each enum column.
func (m *memStoreImpl) watchEnumCases(tableName, columnName string, startCase int) error {
enumDictChangeEvents, done, err := m.metaStore.WatchEnumDictEvents(tableName, columnName, startCase)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to watch enum case events")
}
} else {
go m.handleEnumDictChange(tableName, columnName, enumDictChangeEvents, done)
}
return nil
}
// handleTableListChange handles table deletion events from metaStore.
func (m *memStoreImpl) handleTableListChange(tableListChangeEvents <-chan []string, done chan<- struct{}) {
for newTableList := range tableListChangeEvents {
m.applyTableList(newTableList)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableList(newTableList []string) {
m.Lock()
for tableName, tableSchema := range m.TableSchemas {
if utils.IndexOfStr(newTableList, tableName) < 0 {
// detach shards and schema from map
// to prevent new usage
tableShards := m.TableShards[tableName]
delete(m.TableSchemas, tableName)
delete(m.TableShards, tableName)
// only one table deletion at a time
m.Unlock()
for shardID, shard := range tableShards {
shard.Destruct()
m.diskStore.DeleteTableShard(tableName, shardID)
}
m.scheduler.DeleteTable(tableName, tableSchema.Schema.IsFactTable)
return
}
}
m.Unlock()
}
// handleTableSchemaChange handles table schema change event from metaStore including new table schema.
func (m *memStoreImpl) handleTableSchemaChange(tableSchemaChangeEvents <-chan *metaCom.Table, done chan<- struct{}) {
for table := range tableSchemaChangeEvents {
m.applyTableSchema(table)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableSchema(newTable *metaCom.Table) {
tableName := newTable.Name
newEnumColumns := []string{}
// default start watching from first enumCase
startEnumID := 0
defer func() {
for _, column := range newEnumColumns {
err := m.watchEnumCases(tableName, column, startEnumID)
if err != nil {
utils.GetLogger().With(
"error", err.Error(),
"table", tableName,
"column", column).
Panic("Failed to watch enum dict events")
}
}
}()
m.Lock()
tableSchema, tableExist := m.TableSchemas[tableName]
// new table
if !tableExist {
tableSchema = NewTableSchema(newTable)
for columnID, column := range newTable.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
tableSchema.SetDefaultValue(columnID)
}
m.TableSchemas[newTable.Name] = tableSchema
m.Unlock()
return
}
m.Unlock()
var columnsToDelete []int
tableSchema.Lock()
oldColumns := tableSchema.Schema.Columns
tableSchema.SetTable(newTable)
for columnID, column := range newTable.Columns {
tableSchema.SetDefaultValue(columnID)
if column.Deleted {
if columnID < len(oldColumns) && !oldColumns[columnID].Deleted { // new deletions only
delete(tableSchema.EnumDicts, column.Name)
columnsToDelete = append(columnsToDelete, columnID)
}
} else {
if column.IsEnumColumn() {
_, exist := tableSchema.EnumDicts[column.Name]
if !exist {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
var oldPreloadingDays int
newPreloadingDays := column.Config.PreloadingDays
// preloading will be triggered if
// 1. this is a new column and PreloadingDays > 0
// 2. this is a old column and PreloadingDays > oldPreloadingDays
if columnID < len(oldColumns) {
oldPreloadingDays = oldColumns[columnID].Config.PreloadingDays
}
m.HostMemManager.TriggerPreload(tableName, columnID, oldPreloadingDays, newPreloadingDays)
}
}
tableSchema.Unlock()
for _, columnID := range columnsToDelete {
var shards []*TableShard
m.RLock()
for _, shard := range m.TableShards[tableName] {
shard.Users.Add(1)
shards = append(shards, shard)
}
m.RUnlock()
for _, shard := range shards {
// May block for extended amount of time during archiving
shard.DeleteColumn(columnID)
shard.Users.Done()
}
}
}
// handleEnumDictChange handles enum dict change event from metaStore for specific table and column.
func (m *memStoreImpl) handleEnumDictChange(tableName, columnName string, enumDictChangeEvents <-chan string, done chan<- struct{}) {
for newEnumCase := range enumDictChangeEvents {
m.applyEnumCase(tableName, columnName, newEnumCase)
}
close(done)
}
func (m *memStoreImpl) applyEnumCase(tableName, columnName string, newEnumCase string) {
m.RLock()
tableSchema, tableExist := m.TableSchemas[tableName]
if !tableExist {
m.RUnlock() | return
}
| random_line_split | |
schema.go | the specified column with the
// specified initial cases, and attaches it to TableSchema object.
// Caller should acquire the schema lock before calling this function.
func (t *TableSchema) createEnumDict(columnName string, enumCases []string) {
columnID := t.ColumnIDs[columnName]
dataType := t.ValueTypeByColumn[columnID]
enumCapacity := 1 << uint(memCom.DataTypeBits(dataType))
enumDict := map[string]int{}
for id, enumCase := range enumCases {
enumDict[enumCase] = id
}
t.EnumDicts[columnName] = EnumDict{
Capacity: enumCapacity,
Dict: enumDict,
ReverseDict: enumCases,
}
}
// GetValueTypeByColumn makes a copy of the ValueTypeByColumn so callers don't have to hold a read
// lock to access it.
func (t *TableSchema) GetValueTypeByColumn() []memCom.DataType {
t.RLock()
defer t.RUnlock()
return t.ValueTypeByColumn
}
// GetPrimaryKeyColumns makes a copy of the Schema.PrimaryKeyColumns so callers don't have to hold
// a read lock to access it.
func (t *TableSchema) GetPrimaryKeyColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.PrimaryKeyColumns
}
// GetColumnDeletions returns a boolean slice that indicates whether a column has been deleted. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnDeletions() []bool {
deletedByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
deletedByColumn[columnID] = column.Deleted
}
return deletedByColumn
}
// GetColumnIfNonNilDefault returns a boolean slice that indicates whether a column has non nil default value. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnIfNonNilDefault() []bool {
nonNilDefaultByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
nonNilDefaultByColumn[columnID] = column.DefaultValue != nil
}
return nonNilDefaultByColumn
}
// GetArchivingSortColumns makes a copy of the Schema.ArchivingSortColumns so
// callers don't have to hold a read lock to access it.
func (t *TableSchema) GetArchivingSortColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.ArchivingSortColumns
}
// FetchSchema fetches schema from metaStore and updates in-memory copy of table schema,
// and set up watch channels for metaStore schema changes, used for bootstrapping mem store.
func (m *memStoreImpl) FetchSchema() error {
tables, err := m.metaStore.ListTables()
if err != nil {
return utils.StackError(err, "Failed to list tables from meta")
}
for _, tableName := range tables {
err := m.fetchTable(tableName)
if err != nil {
return err
}
}
// watch table addition/modification
tableSchemaChangeEvents, done, err := m.metaStore.WatchTableSchemaEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableSchemaChange(tableSchemaChangeEvents, done)
// watch table deletion
tableListChangeEvents, done, err := m.metaStore.WatchTableListEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableListChange(tableListChangeEvents, done)
// watch enum cases appending
m.RLock()
for _, tableSchema := range m.TableSchemas {
for columnName, enumCases := range tableSchema.EnumDicts {
err := m.watchEnumCases(tableSchema.Schema.Name, columnName, len(enumCases.ReverseDict))
if err != nil {
return err
}
}
}
m.RUnlock()
return nil
}
func (m *memStoreImpl) fetchTable(tableName string) error {
table, err := m.metaStore.GetTable(tableName)
if err != nil {
if err != metastore.ErrTableDoesNotExist {
return utils.StackError(err, "Failed to get table schema for table %s from meta", tableName)
}
} else {
tableSchema := NewTableSchema(table)
for columnID, column := range table.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
enumCases, err := m.metaStore.GetEnumDict(tableName, column.Name)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to fetch enum cases for table: %s, column: %s", tableName, column.Name)
}
} else {
tableSchema.createEnumDict(column.Name, enumCases)
}
}
}
tableSchema.SetDefaultValue(columnID)
}
m.Lock()
m.TableSchemas[tableName] = tableSchema
m.Unlock()
}
return nil
}
// watch enumCases will setup watch channels for each enum column.
func (m *memStoreImpl) watchEnumCases(tableName, columnName string, startCase int) error {
enumDictChangeEvents, done, err := m.metaStore.WatchEnumDictEvents(tableName, columnName, startCase)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to watch enum case events")
}
} else {
go m.handleEnumDictChange(tableName, columnName, enumDictChangeEvents, done)
}
return nil
}
// handleTableListChange handles table deletion events from metaStore.
func (m *memStoreImpl) handleTableListChange(tableListChangeEvents <-chan []string, done chan<- struct{}) {
for newTableList := range tableListChangeEvents {
m.applyTableList(newTableList)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableList(newTableList []string) {
m.Lock()
for tableName, tableSchema := range m.TableSchemas {
if utils.IndexOfStr(newTableList, tableName) < 0 {
// detach shards and schema from map
// to prevent new usage
tableShards := m.TableShards[tableName]
delete(m.TableSchemas, tableName)
delete(m.TableShards, tableName)
// only one table deletion at a time
m.Unlock()
for shardID, shard := range tableShards {
shard.Destruct()
m.diskStore.DeleteTableShard(tableName, shardID)
}
m.scheduler.DeleteTable(tableName, tableSchema.Schema.IsFactTable)
return
}
}
m.Unlock()
}
// handleTableSchemaChange handles table schema change event from metaStore including new table schema.
func (m *memStoreImpl) handleTableSchemaChange(tableSchemaChangeEvents <-chan *metaCom.Table, done chan<- struct{}) {
for table := range tableSchemaChangeEvents {
m.applyTableSchema(table)
done <- struct{}{}
}
close(done)
}
func (m *memStoreImpl) applyTableSchema(newTable *metaCom.Table) {
tableName := newTable.Name
newEnumColumns := []string{}
// default start watching from first enumCase
startEnumID := 0
defer func() {
for _, column := range newEnumColumns {
err := m.watchEnumCases(tableName, column, startEnumID)
if err != nil {
utils.GetLogger().With(
"error", err.Error(),
"table", tableName,
"column", column).
Panic("Failed to watch enum dict events")
}
}
}()
m.Lock()
tableSchema, tableExist := m.TableSchemas[tableName]
// new table
if !tableExist {
tableSchema = NewTableSchema(newTable)
for columnID, column := range newTable.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
var enumCases []string
if column.DefaultValue != nil {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
}
tableSchema.createEnumDict(column.Name, enumCases)
newEnumColumns = append(newEnumColumns, column.Name)
}
}
tableSchema.SetDefaultValue(columnID)
}
m.TableSchemas[newTable.Name] = tableSchema
m.Unlock()
return
}
m.Unlock()
var columnsToDelete []int
tableSchema.Lock()
oldColumns := tableSchema.Schema.Columns
tableSchema.SetTable(newTable)
for columnID, column := range newTable.Columns {
tableSchema.SetDefaultValue(columnID)
if column.Deleted {
if columnID < len(oldColumns) && !oldColumns[columnID].Deleted { // new deletions only
delete(tableSchema.EnumDicts, column.Name)
columnsToDelete = append(columnsToDelete, columnID)
}
} else {
if column.IsEnumColumn() {
_, exist := tableSchema.EnumDicts[column.Name]
if !exist {
var enumCases []string
if column.DefaultValue != nil | {
enumCases = append(enumCases, *column.DefaultValue)
// default value is already appended, start watching from 1
startEnumID = 1
} | conditional_block | |
schema.go | (table *metaCom.Table) *TableSchema {
tableSchema := &TableSchema{
Schema: *table,
ColumnIDs: make(map[string]int),
EnumDicts: make(map[string]EnumDict),
ValueTypeByColumn: make([]memCom.DataType, len(table.Columns)),
PrimaryKeyColumnTypes: make([]memCom.DataType, len(table.PrimaryKeyColumns)),
DefaultValues: make([]*memCom.DataValue, len(table.Columns)),
}
for id, column := range table.Columns {
if !column.Deleted {
tableSchema.ColumnIDs[column.Name] = id
}
tableSchema.ValueTypeByColumn[id] = memCom.DataTypeForColumn(column)
}
for i, columnID := range table.PrimaryKeyColumns {
columnType := tableSchema.ValueTypeByColumn[columnID]
tableSchema.PrimaryKeyColumnTypes[i] = columnType
dataBits := memCom.DataTypeBits(columnType)
if dataBits < 8 {
dataBits = 8
}
tableSchema.PrimaryKeyBytes += dataBits / 8
}
return tableSchema
}
// MarshalJSON marshals TableSchema into json.
func (t *TableSchema) MarshalJSON() ([]byte, error) {
// Avoid loop json.Marshal calls.
type alias TableSchema
t.RLock()
defer t.RUnlock()
return json.Marshal((*alias)(t))
}
// SetTable sets a updated table and update TableSchema,
// should acquire lock before calling.
func (t *TableSchema) SetTable(table *metaCom.Table) {
t.Schema = *table
for id, column := range table.Columns {
if !column.Deleted {
t.ColumnIDs[column.Name] = id
} else {
delete(t.ColumnIDs, column.Name)
}
if id >= len(t.ValueTypeByColumn) {
t.ValueTypeByColumn = append(t.ValueTypeByColumn, memCom.DataTypeForColumn(column))
}
if id >= len(t.DefaultValues) {
t.DefaultValues = append(t.DefaultValues, nil)
}
}
}
// SetDefaultValue parses the default value string if present and sets to TableSchema.
// Schema lock should be acquired and release by caller and enum dict should already be
// created/update before this function.
func (t *TableSchema) SetDefaultValue(columnID int) {
// Default values are already set.
if t.DefaultValues[columnID] != nil {
return
}
column := t.Schema.Columns[columnID]
defStrVal := column.DefaultValue
if defStrVal == nil || column.Deleted {
t.DefaultValues[columnID] = &memCom.NullDataValue
return
}
dataType := t.ValueTypeByColumn[columnID]
dataTypeName := memCom.DataTypeName[dataType]
val := memCom.DataValue{
Valid: true,
DataType: dataType,
}
if dataType == memCom.SmallEnum || dataType == memCom.BigEnum {
enumDict, ok := t.EnumDicts[column.Name]
if !ok {
// Should no happen since the enum dict should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find EnumDict for column")
}
enumVal, ok := enumDict.Dict[*defStrVal]
if !ok {
// Should no happen since the enum value should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find enum value for column")
}
if dataType == memCom.SmallEnum {
enumValUint8 := uint8(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint8)
} else {
enumValUint16 := uint16(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint16)
}
} else {
dataValue, err := memCom.ValueFromString(*defStrVal, dataType)
if err != nil {
// Should not happen since the string value is already validated by schema handler.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot parse default value")
}
if dataType == memCom.Bool {
val.IsBool = true
val.BoolVal = dataValue.BoolVal
} else {
val.OtherVal = dataValue.OtherVal
}
}
val.CmpFunc = memCom.GetCompareFunc(dataType)
t.DefaultValues[columnID] = &val
return
}
// createEnumDict creates the enum dictionary for the specified column with the
// specified initial cases, and attaches it to TableSchema object.
// Caller should acquire the schema lock before calling this function.
func (t *TableSchema) createEnumDict(columnName string, enumCases []string) {
columnID := t.ColumnIDs[columnName]
dataType := t.ValueTypeByColumn[columnID]
enumCapacity := 1 << uint(memCom.DataTypeBits(dataType))
enumDict := map[string]int{}
for id, enumCase := range enumCases {
enumDict[enumCase] = id
}
t.EnumDicts[columnName] = EnumDict{
Capacity: enumCapacity,
Dict: enumDict,
ReverseDict: enumCases,
}
}
// GetValueTypeByColumn makes a copy of the ValueTypeByColumn so callers don't have to hold a read
// lock to access it.
func (t *TableSchema) GetValueTypeByColumn() []memCom.DataType {
t.RLock()
defer t.RUnlock()
return t.ValueTypeByColumn
}
// GetPrimaryKeyColumns makes a copy of the Schema.PrimaryKeyColumns so callers don't have to hold
// a read lock to access it.
func (t *TableSchema) GetPrimaryKeyColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.PrimaryKeyColumns
}
// GetColumnDeletions returns a boolean slice that indicates whether a column has been deleted. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnDeletions() []bool {
deletedByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
deletedByColumn[columnID] = column.Deleted
}
return deletedByColumn
}
// GetColumnIfNonNilDefault returns a boolean slice that indicates whether a column has non nil default value. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnIfNonNilDefault() []bool {
nonNilDefaultByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
nonNilDefaultByColumn[columnID] = column.DefaultValue != nil
}
return nonNilDefaultByColumn
}
// GetArchivingSortColumns makes a copy of the Schema.ArchivingSortColumns so
// callers don't have to hold a read lock to access it.
func (t *TableSchema) GetArchivingSortColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.ArchivingSortColumns
}
// FetchSchema fetches schema from metaStore and updates in-memory copy of table schema,
// and set up watch channels for metaStore schema changes, used for bootstrapping mem store.
func (m *memStoreImpl) FetchSchema() error {
tables, err := m.metaStore.ListTables()
if err != nil {
return utils.StackError(err, "Failed to list tables from meta")
}
for _, tableName := range tables {
err := m.fetchTable(tableName)
if err != nil {
return err
}
}
// watch table addition/modification
tableSchemaChangeEvents, done, err := m.metaStore.WatchTableSchemaEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableSchemaChange(tableSchemaChangeEvents, done)
// watch table deletion
tableListChangeEvents, done, err := m.metaStore.WatchTableListEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableListChange(tableListChangeEvents, done)
// watch enum cases appending
m.RLock()
for _, tableSchema := range m.TableSchemas {
for columnName, enumCases := range tableSchema.EnumDicts {
err := m.watchEnumCases(tableSchema.Schema.Name, columnName, len(enumCases.ReverseDict))
if err != nil {
return err
}
}
}
m.RUnlock()
return nil
}
func (m *memStoreImpl) fetchTable(tableName string) error {
table, err := m.metaStore.GetTable(tableName)
if err != nil {
if err != metastore.ErrTableDoesNotExist {
return utils.StackError(err, "Failed to get table schema for table %s from meta", tableName)
}
} else {
tableSchema := NewTableSchema(table)
for columnID, column := range table.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
enumCases, err := m.metaStore.GetEnumDict(tableName, column.Name)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.Stack | NewTableSchema | identifier_name | |
schema.go | )),
DefaultValues: make([]*memCom.DataValue, len(table.Columns)),
}
for id, column := range table.Columns {
if !column.Deleted {
tableSchema.ColumnIDs[column.Name] = id
}
tableSchema.ValueTypeByColumn[id] = memCom.DataTypeForColumn(column)
}
for i, columnID := range table.PrimaryKeyColumns {
columnType := tableSchema.ValueTypeByColumn[columnID]
tableSchema.PrimaryKeyColumnTypes[i] = columnType
dataBits := memCom.DataTypeBits(columnType)
if dataBits < 8 {
dataBits = 8
}
tableSchema.PrimaryKeyBytes += dataBits / 8
}
return tableSchema
}
// MarshalJSON marshals TableSchema into json.
func (t *TableSchema) MarshalJSON() ([]byte, error) {
// Avoid loop json.Marshal calls.
type alias TableSchema
t.RLock()
defer t.RUnlock()
return json.Marshal((*alias)(t))
}
// SetTable sets a updated table and update TableSchema,
// should acquire lock before calling.
func (t *TableSchema) SetTable(table *metaCom.Table) {
t.Schema = *table
for id, column := range table.Columns {
if !column.Deleted {
t.ColumnIDs[column.Name] = id
} else {
delete(t.ColumnIDs, column.Name)
}
if id >= len(t.ValueTypeByColumn) {
t.ValueTypeByColumn = append(t.ValueTypeByColumn, memCom.DataTypeForColumn(column))
}
if id >= len(t.DefaultValues) {
t.DefaultValues = append(t.DefaultValues, nil)
}
}
}
// SetDefaultValue parses the default value string if present and sets to TableSchema.
// Schema lock should be acquired and release by caller and enum dict should already be
// created/update before this function.
func (t *TableSchema) SetDefaultValue(columnID int) {
// Default values are already set.
if t.DefaultValues[columnID] != nil {
return
}
column := t.Schema.Columns[columnID]
defStrVal := column.DefaultValue
if defStrVal == nil || column.Deleted {
t.DefaultValues[columnID] = &memCom.NullDataValue
return
}
dataType := t.ValueTypeByColumn[columnID]
dataTypeName := memCom.DataTypeName[dataType]
val := memCom.DataValue{
Valid: true,
DataType: dataType,
}
if dataType == memCom.SmallEnum || dataType == memCom.BigEnum {
enumDict, ok := t.EnumDicts[column.Name]
if !ok {
// Should no happen since the enum dict should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find EnumDict for column")
}
enumVal, ok := enumDict.Dict[*defStrVal]
if !ok {
// Should no happen since the enum value should already be created.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot find enum value for column")
}
if dataType == memCom.SmallEnum {
enumValUint8 := uint8(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint8)
} else {
enumValUint16 := uint16(enumVal)
val.OtherVal = unsafe.Pointer(&enumValUint16)
}
} else {
dataValue, err := memCom.ValueFromString(*defStrVal, dataType)
if err != nil {
// Should not happen since the string value is already validated by schema handler.
utils.GetLogger().With(
"data_type", dataTypeName,
"default_value", *defStrVal,
"column", t.Schema.Columns[columnID].Name,
).Panic("Cannot parse default value")
}
if dataType == memCom.Bool {
val.IsBool = true
val.BoolVal = dataValue.BoolVal
} else {
val.OtherVal = dataValue.OtherVal
}
}
val.CmpFunc = memCom.GetCompareFunc(dataType)
t.DefaultValues[columnID] = &val
return
}
// createEnumDict creates the enum dictionary for the specified column with the
// specified initial cases, and attaches it to TableSchema object.
// Caller should acquire the schema lock before calling this function.
func (t *TableSchema) createEnumDict(columnName string, enumCases []string) {
columnID := t.ColumnIDs[columnName]
dataType := t.ValueTypeByColumn[columnID]
enumCapacity := 1 << uint(memCom.DataTypeBits(dataType))
enumDict := map[string]int{}
for id, enumCase := range enumCases {
enumDict[enumCase] = id
}
t.EnumDicts[columnName] = EnumDict{
Capacity: enumCapacity,
Dict: enumDict,
ReverseDict: enumCases,
}
}
// GetValueTypeByColumn makes a copy of the ValueTypeByColumn so callers don't have to hold a read
// lock to access it.
func (t *TableSchema) GetValueTypeByColumn() []memCom.DataType {
t.RLock()
defer t.RUnlock()
return t.ValueTypeByColumn
}
// GetPrimaryKeyColumns makes a copy of the Schema.PrimaryKeyColumns so callers don't have to hold
// a read lock to access it.
func (t *TableSchema) GetPrimaryKeyColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.PrimaryKeyColumns
}
// GetColumnDeletions returns a boolean slice that indicates whether a column has been deleted. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnDeletions() []bool {
deletedByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
deletedByColumn[columnID] = column.Deleted
}
return deletedByColumn
}
// GetColumnIfNonNilDefault returns a boolean slice that indicates whether a column has non nil default value. Callers
// need to hold a read lock.
func (t *TableSchema) GetColumnIfNonNilDefault() []bool |
// GetArchivingSortColumns makes a copy of the Schema.ArchivingSortColumns so
// callers don't have to hold a read lock to access it.
func (t *TableSchema) GetArchivingSortColumns() []int {
t.RLock()
defer t.RUnlock()
return t.Schema.ArchivingSortColumns
}
// FetchSchema fetches schema from metaStore and updates in-memory copy of table schema,
// and set up watch channels for metaStore schema changes, used for bootstrapping mem store.
func (m *memStoreImpl) FetchSchema() error {
tables, err := m.metaStore.ListTables()
if err != nil {
return utils.StackError(err, "Failed to list tables from meta")
}
for _, tableName := range tables {
err := m.fetchTable(tableName)
if err != nil {
return err
}
}
// watch table addition/modification
tableSchemaChangeEvents, done, err := m.metaStore.WatchTableSchemaEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableSchemaChange(tableSchemaChangeEvents, done)
// watch table deletion
tableListChangeEvents, done, err := m.metaStore.WatchTableListEvents()
if err != nil {
return utils.StackError(err, "Failed to watch table list events")
}
go m.handleTableListChange(tableListChangeEvents, done)
// watch enum cases appending
m.RLock()
for _, tableSchema := range m.TableSchemas {
for columnName, enumCases := range tableSchema.EnumDicts {
err := m.watchEnumCases(tableSchema.Schema.Name, columnName, len(enumCases.ReverseDict))
if err != nil {
return err
}
}
}
m.RUnlock()
return nil
}
func (m *memStoreImpl) fetchTable(tableName string) error {
table, err := m.metaStore.GetTable(tableName)
if err != nil {
if err != metastore.ErrTableDoesNotExist {
return utils.StackError(err, "Failed to get table schema for table %s from meta", tableName)
}
} else {
tableSchema := NewTableSchema(table)
for columnID, column := range table.Columns {
if !column.Deleted {
if column.IsEnumColumn() {
enumCases, err := m.metaStore.GetEnumDict(tableName, column.Name)
if err != nil {
if err != metastore.ErrTableDoesNotExist && err != metastore.ErrColumnDoesNotExist {
return utils.StackError(err, "Failed to fetch enum cases for table: %s, column: %s", tableName, column.Name)
}
} else {
tableSchema.createEnumDict(column.Name, enumCases)
}
}
}
tableSchema.SetDefaultValue(columnID)
}
m.Lock()
m.TableSchemas[tableName] = tableSchema
m | {
nonNilDefaultByColumn := make([]bool, len(t.Schema.Columns))
for columnID, column := range t.Schema.Columns {
nonNilDefaultByColumn[columnID] = column.DefaultValue != nil
}
return nonNilDefaultByColumn
} | identifier_body |
util.py | =CMAP_DEFAULT):
"""Converts a depth map to an RGB image."""
# Convert to disparity.
disp = 1.0 / (depth + 1e-6)
if normalizer is not None:
disp /= normalizer
else:
disp /= (np.percentile(disp, pc) + 1e-6)
disp = np.clip(disp, 0, 1)
disp = gray2rgb(disp, cmap=cmap)
keep_h = int(disp.shape[0] * (1 - crop_percent))
disp = disp[:keep_h]
return disp
def get_seq_start_end(target_index, seq_length, sample_every=1):
"""Returns absolute seq start and end indices for a given target frame."""
half_offset = int((seq_length - 1) / 2) * sample_every
end_index = target_index + half_offset
start_index = end_index - (seq_length - 1) * sample_every
return start_index, end_index
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Count the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for (_, v) in get_vars_to_save_and_restore().items():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_save_and_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or
'mu' in v.op.name or 'sigma' in v.op.name or
'global_scale_var' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
mapping = {}
if ckpt is not None:
ckpt_var = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]
not_loaded = list(ckpt_var_names)
for v in model_vars:
if v.op.name not in ckpt_var_names:
# For backward compatibility, try additional matching.
v_additional_name = v.op.name.replace('egomotion_prediction/', '')
if v_additional_name in ckpt_var_names:
# Check if shapes match.
ind = ckpt_var_names.index(v_additional_name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v_additional_name] = v
not_loaded.remove(v_additional_name)
continue
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
logging.warning('Did not find var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
else:
# Check if shapes match.
ind = ckpt_var_names.index(v.op.name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v.op.name] = v
not_loaded.remove(v.op.name)
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
if not_loaded:
logging.warning('The following variables in the checkpoint were not loaded:')
for varname_not_loaded in not_loaded:
logging.info('%s', varname_not_loaded)
else: # just get model vars.
for v in model_vars:
mapping[v.op.name] = v
return mapping
def get_imagenet_vars_to_restore(imagenet_ckpt):
"""Returns dict of variables to restore from ImageNet-checkpoint."""
vars_to_restore_imagenet = {}
ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
model_vars = tf.global_variables()
for v in model_vars:
if 'global_step' in v.op.name:
continue
mvname_noprefix = v.op.name.replace('depth_prediction/', '')
mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')
mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')
if mvname_noprefix in ckpt_var_names:
vars_to_restore_imagenet[mvname_noprefix] = v
else:
logging.info('The following variable will not be restored from '
'pretrained ImageNet-checkpoint: %s', mvname_noprefix)
return vars_to_restore_imagenet
def format_number(n):
"""Formats number with thousands commas."""
# locale.setlocale(locale.LC_ALL, 'en_US') # commented by me
# return locale.format('%d', n, grouping=True)
return n
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def read_text_lines(filepath):
with tf.gfile.Open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def save_flags(FLAGS, save_path):
import json
check_path(save_path)
save_path = os.path.join(save_path, 'flags.json')
with open(save_path, 'w') as f:
json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=False)
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def save_command(save_path):
check_path(save_path)
import sys
command = sys.argv
save_file = os.path.join(save_path, 'command.txt')
with open(save_file, 'w') as f:
f.write(' '.join(command))
def make_intrinsics_matrix(fx, fy, cx, cy):
r1 = np.stack([fx, 0, cx])
r2 = np.stack([0, fy, cy])
r3 = np.array([0., 0., 1.])
intrinsics = np.stack([r1, r2, r3])
return intrinsics
def get_multi_scale_intrinsics(intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2 ** s)
fy = intrinsics[1, 1] / (2 ** s)
cx = intrinsics[0, 2] / (2 ** s)
cy = intrinsics[1, 2] / (2 ** s)
intrinsics_multi_scale.append(make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = np.stack(intrinsics_multi_scale) # [num_scales, 3, 3]
return intrinsics_multi_scale
def pack_pred_depths(pred_dir, test_file):
| """Pack depth predictions as a single .npy file"""
test_images = read_text_lines(test_file)
save_name = 'pred_depth.npy'
output_file = os.path.join(pred_dir, save_name)
img_height = 128
img_width = 416
all_pred = np.zeros((len(test_images), img_height, img_width))
for i, img_path in enumerate(test_images):
npy_path = os.path.join(pred_dir, img_path.replace('png', 'npy'))
depth = np.load(npy_path)
all_pred[i] = np.squeeze(depth)
np.save(output_file, all_pred) | identifier_body | |
util.py | s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_save_and_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or
'mu' in v.op.name or 'sigma' in v.op.name or
'global_scale_var' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
mapping = {}
if ckpt is not None:
ckpt_var = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]
not_loaded = list(ckpt_var_names)
for v in model_vars:
if v.op.name not in ckpt_var_names:
# For backward compatibility, try additional matching.
v_additional_name = v.op.name.replace('egomotion_prediction/', '')
if v_additional_name in ckpt_var_names:
# Check if shapes match.
ind = ckpt_var_names.index(v_additional_name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v_additional_name] = v
not_loaded.remove(v_additional_name)
continue
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
logging.warning('Did not find var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
else:
# Check if shapes match.
ind = ckpt_var_names.index(v.op.name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v.op.name] = v
not_loaded.remove(v.op.name)
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
if not_loaded:
logging.warning('The following variables in the checkpoint were not loaded:')
for varname_not_loaded in not_loaded:
logging.info('%s', varname_not_loaded)
else: # just get model vars.
for v in model_vars:
mapping[v.op.name] = v
return mapping
def get_imagenet_vars_to_restore(imagenet_ckpt):
"""Returns dict of variables to restore from ImageNet-checkpoint."""
vars_to_restore_imagenet = {}
ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
model_vars = tf.global_variables()
for v in model_vars:
if 'global_step' in v.op.name:
continue
mvname_noprefix = v.op.name.replace('depth_prediction/', '')
mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')
mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')
if mvname_noprefix in ckpt_var_names:
vars_to_restore_imagenet[mvname_noprefix] = v
else:
logging.info('The following variable will not be restored from '
'pretrained ImageNet-checkpoint: %s', mvname_noprefix)
return vars_to_restore_imagenet
def format_number(n):
"""Formats number with thousands commas."""
# locale.setlocale(locale.LC_ALL, 'en_US') # commented by me
# return locale.format('%d', n, grouping=True)
return n
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def read_text_lines(filepath):
with tf.gfile.Open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def save_flags(FLAGS, save_path):
import json
check_path(save_path)
save_path = os.path.join(save_path, 'flags.json')
with open(save_path, 'w') as f:
json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=False)
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def save_command(save_path):
check_path(save_path)
import sys
command = sys.argv
save_file = os.path.join(save_path, 'command.txt')
with open(save_file, 'w') as f:
f.write(' '.join(command))
def make_intrinsics_matrix(fx, fy, cx, cy):
r1 = np.stack([fx, 0, cx])
r2 = np.stack([0, fy, cy])
r3 = np.array([0., 0., 1.])
intrinsics = np.stack([r1, r2, r3])
return intrinsics
def get_multi_scale_intrinsics(intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2 ** s)
fy = intrinsics[1, 1] / (2 ** s)
cx = intrinsics[0, 2] / (2 ** s)
cy = intrinsics[1, 2] / (2 ** s)
intrinsics_multi_scale.append(make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = np.stack(intrinsics_multi_scale) # [num_scales, 3, 3]
return intrinsics_multi_scale
def pack_pred_depths(pred_dir, test_file):
"""Pack depth predictions as a single .npy file"""
test_images = read_text_lines(test_file)
save_name = 'pred_depth.npy'
output_file = os.path.join(pred_dir, save_name)
img_height = 128
img_width = 416
all_pred = np.zeros((len(test_images), img_height, img_width))
for i, img_path in enumerate(test_images):
npy_path = os.path.join(pred_dir, img_path.replace('png', 'npy'))
depth = np.load(npy_path)
all_pred[i] = np.squeeze(depth)
np.save(output_file, all_pred)
# Depth evaluation utils
# Mostly based on the code written by Clement Godard:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
# EIGEN split
def read_file_data(files, data_root):
gt_files = []
gt_calib = []
im_sizes = []
im_files = []
cams = []
num_probs = 0
for filename in files:
filename = filename.split()[0]
splits = filename.split('/')
# camera_id = filename[-1] # 2 is left, 3 is right
date = splits[0]
im_id = splits[4][:10]
file_root = '{}/{}'
im = filename
vel = '{}/{}/velodyne_points/data/{}.bin'.format(splits[0], splits[1], im_id)
if os.path.isfile(data_root + im):
gt_files.append(data_root + vel)
gt_calib.append(data_root + date + '/')
im_sizes.append(cv2.imread(data_root + im).shape[:2])
im_files.append(data_root + im)
cams.append(2)
else:
num_probs += 1
print('{} missing'.format(data_root + im))
# print(num_probs, 'files missing')
return gt_files, gt_calib, im_sizes, im_files, cams
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def | read_calib_file | identifier_name | |
util.py | (im, cv2.COLOR_RGB2BGR)
_, im_data = cv2.imencode('.%s' % file_extension, im)
f.write(im_data.tostring())
def normalize_depth_for_display(depth, pc=95, crop_percent=0, normalizer=None,
cmap=CMAP_DEFAULT):
"""Converts a depth map to an RGB image."""
# Convert to disparity.
disp = 1.0 / (depth + 1e-6)
if normalizer is not None:
disp /= normalizer
else:
disp /= (np.percentile(disp, pc) + 1e-6)
disp = np.clip(disp, 0, 1)
disp = gray2rgb(disp, cmap=cmap)
keep_h = int(disp.shape[0] * (1 - crop_percent))
disp = disp[:keep_h]
return disp
def get_seq_start_end(target_index, seq_length, sample_every=1):
"""Returns absolute seq start and end indices for a given target frame."""
half_offset = int((seq_length - 1) / 2) * sample_every
end_index = target_index + half_offset
start_index = end_index - (seq_length - 1) * sample_every
return start_index, end_index
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
|
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Count the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for (_, v) in get_vars_to_save_and_restore().items():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_save_and_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or
'mu' in v.op.name or 'sigma' in v.op.name or
'global_scale_var' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
mapping = {}
if ckpt is not None:
ckpt_var = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]
not_loaded = list(ckpt_var_names)
for v in model_vars:
if v.op.name not in ckpt_var_names:
# For backward compatibility, try additional matching.
v_additional_name = v.op.name.replace('egomotion_prediction/', '')
if v_additional_name in ckpt_var_names:
# Check if shapes match.
ind = ckpt_var_names.index(v_additional_name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v_additional_name] = v
not_loaded.remove(v_additional_name)
continue
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
logging.warning('Did not find var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
else:
# Check if shapes match.
ind = ckpt_var_names.index(v.op.name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v.op.name] = v
not_loaded.remove(v.op.name)
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
if not_loaded:
logging.warning('The following variables in the checkpoint were not loaded:')
for varname_not_loaded in not_loaded:
logging.info('%s', varname_not_loaded)
else: # just get model vars.
for v in model_vars:
mapping[v.op.name] = v
return mapping
def get_imagenet_vars_to_restore(imagenet_ckpt):
"""Returns dict of variables to restore from ImageNet-checkpoint."""
vars_to_restore_imagenet = {}
ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
model_vars = tf.global_variables()
for v in model_vars:
if 'global_step' in v.op.name:
continue
mvname_noprefix = v.op.name.replace('depth_prediction/', '')
mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')
mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')
if mvname_noprefix in ckpt_var_names:
vars_to_restore_imagenet[mvname_noprefix] = v
else:
logging.info('The following variable will not be restored from '
'pretrained ImageNet-checkpoint: %s', mvname_noprefix)
return vars_to_restore_imagenet
def format_number(n):
"""Formats number with thousands commas."""
# locale.setlocale(locale.LC_ALL, 'en_US') # commented by me
# return locale.format('%d', n, grouping=True)
return n
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def read_text_lines(filepath):
with tf.gfile.Open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def save_flags(FLAGS, save_path):
import json
check_path(save_path)
save_path = os.path.join(save_path, 'flags.json')
with open(save_path, 'w') as f:
json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=False)
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def save_command(save_path):
check_path(save_path)
import sys
command = sys.argv
save_file = os.path.join(save_path, 'command.txt')
with open(save_file, 'w') as f:
f.write(' '.join(command))
def make_intrinsics_matrix(fx, fy, cx, cy):
r1 = np.stack([fx, 0, cx])
r2 = np.stack([0, fy, cy])
r3 = np.array([0., 0., 1.])
intrinsics = np.stack([r1, r2, r3])
return intrinsics
def get_multi_scale_intrinsics(intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2 ** s)
fy = intrinsics[1, 1] / (2 ** s)
cx = intrinsics[0, 2] / (2 ** s)
cy = intrinsics[1, 2] / (2 ** s)
intrinsics_multi_scale.append(make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = np.stack(intrinsics_multi_scale) # [num_scales, 3, 3]
return intrinsics_multi_scale
def pack_pred_depths(pred_dir, test_file):
"""Pack depth predictions as a single .npy file"""
test_images = read_text_lines(test_file)
save_name = 'pred_depth.npy'
output_file = os.path.join(pred_dir, save_name)
img_height = 128
img_width = 416
all_pred = np.zeros((len(test_images), img_height, img_width | return 'List of %d... %s' % (len(obj), info(obj[0])) | conditional_block |
util.py | sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Count the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for (_, v) in get_vars_to_save_and_restore().items():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_save_and_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or
'mu' in v.op.name or 'sigma' in v.op.name or
'global_scale_var' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
mapping = {}
if ckpt is not None:
ckpt_var = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var]
ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]
not_loaded = list(ckpt_var_names)
for v in model_vars:
if v.op.name not in ckpt_var_names:
# For backward compatibility, try additional matching.
v_additional_name = v.op.name.replace('egomotion_prediction/', '')
if v_additional_name in ckpt_var_names:
# Check if shapes match.
ind = ckpt_var_names.index(v_additional_name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v_additional_name] = v
not_loaded.remove(v_additional_name)
continue
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
logging.warning('Did not find var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
else:
# Check if shapes match.
ind = ckpt_var_names.index(v.op.name)
if ckpt_var_shapes[ind] == v.get_shape():
mapping[v.op.name] = v
not_loaded.remove(v.op.name)
else:
logging.warning('Shape mismatch, will not restore %s.', v.op.name)
if not_loaded:
logging.warning('The following variables in the checkpoint were not loaded:')
for varname_not_loaded in not_loaded:
logging.info('%s', varname_not_loaded)
else: # just get model vars.
for v in model_vars:
mapping[v.op.name] = v
return mapping
def get_imagenet_vars_to_restore(imagenet_ckpt):
"""Returns dict of variables to restore from ImageNet-checkpoint."""
vars_to_restore_imagenet = {}
ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
model_vars = tf.global_variables()
for v in model_vars:
if 'global_step' in v.op.name:
continue
mvname_noprefix = v.op.name.replace('depth_prediction/', '')
mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')
mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')
if mvname_noprefix in ckpt_var_names:
vars_to_restore_imagenet[mvname_noprefix] = v
else:
logging.info('The following variable will not be restored from '
'pretrained ImageNet-checkpoint: %s', mvname_noprefix)
return vars_to_restore_imagenet
def format_number(n):
"""Formats number with thousands commas."""
# locale.setlocale(locale.LC_ALL, 'en_US') # commented by me
# return locale.format('%d', n, grouping=True)
return n
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def read_text_lines(filepath):
with tf.gfile.Open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
def save_flags(FLAGS, save_path):
import json
check_path(save_path)
save_path = os.path.join(save_path, 'flags.json')
with open(save_path, 'w') as f:
json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=False)
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def save_command(save_path):
check_path(save_path)
import sys
command = sys.argv
save_file = os.path.join(save_path, 'command.txt')
with open(save_file, 'w') as f:
f.write(' '.join(command))
def make_intrinsics_matrix(fx, fy, cx, cy):
r1 = np.stack([fx, 0, cx])
r2 = np.stack([0, fy, cy])
r3 = np.array([0., 0., 1.])
intrinsics = np.stack([r1, r2, r3])
return intrinsics
def get_multi_scale_intrinsics(intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2 ** s)
fy = intrinsics[1, 1] / (2 ** s)
cx = intrinsics[0, 2] / (2 ** s)
cy = intrinsics[1, 2] / (2 ** s)
intrinsics_multi_scale.append(make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = np.stack(intrinsics_multi_scale) # [num_scales, 3, 3]
return intrinsics_multi_scale
def pack_pred_depths(pred_dir, test_file):
"""Pack depth predictions as a single .npy file"""
test_images = read_text_lines(test_file)
save_name = 'pred_depth.npy'
output_file = os.path.join(pred_dir, save_name)
img_height = 128
img_width = 416
all_pred = np.zeros((len(test_images), img_height, img_width))
for i, img_path in enumerate(test_images):
npy_path = os.path.join(pred_dir, img_path.replace('png', 'npy'))
depth = np.load(npy_path)
all_pred[i] = np.squeeze(depth)
np.save(output_file, all_pred)
# Depth evaluation utils
# Mostly based on the code written by Clement Godard:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
# EIGEN split
| def read_file_data(files, data_root): | random_line_split | |
gogo_fast_api.pb.go |
func (m *Http) GetFullyDecodeReservedExpansion() bool {
if m != nil {
return m.FullyDecodeReservedExpansion
}
return false
}
func (m *Http) GetAnyData() *types.Any {
if m != nil {
return m.AnyData
}
return nil
}
// HttpRule .
type HttpRule struct {
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// Types that are valid to be assigned to Pattern:
// *HttpRule_Get
// *HttpRule_Put
// *HttpRule_Post
// *HttpRule_Delete
// *HttpRule_Patch
// *HttpRule_Custom
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{1}
}
func (m *HttpRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HttpRule.Unmarshal(m, b)
}
func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
}
func (m *HttpRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_HttpRule.Merge(m, src)
}
func (m *HttpRule) XXX_Size() int {
return xxx_messageInfo_HttpRule.Size(m)
}
func (m *HttpRule) XXX_DiscardUnknown() {
xxx_messageInfo_HttpRule.DiscardUnknown(m)
}
var xxx_messageInfo_HttpRule proto.InternalMessageInfo
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof" json:"get,omitempty"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof" json:"put,omitempty"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof" json:"post,omitempty"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof" json:"delete,omitempty"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof" json:"patch,omitempty"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof" json:"custom,omitempty"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetResponseBody() string {
if m != nil {
return m.ResponseBody
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HttpRule) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
// CustomHttpPattern .
type CustomHttpPattern struct {
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{2}
}
func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
}
func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
}
func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomHttpPattern.Merge(m, src)
}
func (m *CustomHttpPattern) XXX_Size() int {
return xxx_messageInfo_CustomHttpPattern.Size(m)
}
func (m *CustomHttpPattern) XXX_DiscardUnknown() {
xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
}
var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "gogoapi.Http")
proto.RegisterType((*HttpRule)(nil), "gogoapi.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "gogoapi.CustomHttpPattern")
}
func init() { proto.RegisterFile("gogo_fast_api.proto", fileDescriptor_08ec8be04814436d) }
var fileDescriptor_08ec8be04814436d = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x8f, 0xd3, 0x3c,
0x10, 0x87, 0xdf, 0x6c, 0xff, 0xa5, 0xee, 0xbe, 0x87, 0xf5, 0xae, 0x90, 0xa9, 0x90, 0xa8, 0xca,
0x81, 0x9e, 0x12, 0x69, 0xe1, 0x82 | {
if m != nil {
return m.Rules
}
return nil
} | identifier_body | |
gogo_fast_api.pb.go |
return nil
}
func (m *Http) GetFullyDecodeReservedExpansion() bool {
if m != nil {
return m.FullyDecodeReservedExpansion
}
return false
}
func (m *Http) GetAnyData() *types.Any {
if m != nil {
return m.AnyData
}
return nil
}
// HttpRule .
type HttpRule struct {
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// Types that are valid to be assigned to Pattern:
// *HttpRule_Get
// *HttpRule_Put
// *HttpRule_Post
// *HttpRule_Delete
// *HttpRule_Patch
// *HttpRule_Custom
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{1}
}
func (m *HttpRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HttpRule.Unmarshal(m, b)
}
func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
}
func (m *HttpRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_HttpRule.Merge(m, src)
}
func (m *HttpRule) XXX_Size() int {
return xxx_messageInfo_HttpRule.Size(m)
}
func (m *HttpRule) XXX_DiscardUnknown() {
xxx_messageInfo_HttpRule.DiscardUnknown(m)
}
var xxx_messageInfo_HttpRule proto.InternalMessageInfo
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof" json:"get,omitempty"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof" json:"put,omitempty"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof" json:"post,omitempty"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof" json:"delete,omitempty"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof" json:"patch,omitempty"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof" json:"custom,omitempty"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetResponseBody() string {
if m != nil {
return m.ResponseBody
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HttpRule) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
// CustomHttpPattern .
type CustomHttpPattern struct {
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{2}
}
func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
}
func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
}
func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomHttpPattern.Merge(m, src)
}
func (m *CustomHttpPattern) XXX_Size() int {
return xxx_messageInfo_CustomHttpPattern.Size(m)
}
func (m *CustomHttpPattern) XXX_DiscardUnknown() {
xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
}
var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "gogoapi.Http")
proto.RegisterType((*HttpRule)(nil), "gogoapi.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "gogoapi.CustomHttpPattern")
}
func init() { proto.RegisterFile("gogo_fast_api.proto", fileDescriptor_08ec8be04814436d) }
var fileDescriptor_08ec8be04814436d = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x8f, 0xd3, 0x3c,
0x10, 0x87, 0xdf, 0x6c, 0xff, 0xa5, 0xee, 0xbe, 0x87, 0xf5, 0xae, 0x90, 0xa9, 0x90, 0xa8, 0xca,
0x81, 0x9e, 0x12, 0x69, 0xe1, 0x82, 0x7 | {
return m.Rules
} | conditional_block | |
gogo_fast_api.pb.go | _bindings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{1}
}
func (m *HttpRule) XXX_Unmarshal(b []byte) error { | func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
}
func (m *HttpRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_HttpRule.Merge(m, src)
}
func (m *HttpRule) XXX_Size() int {
return xxx_messageInfo_HttpRule.Size(m)
}
func (m *HttpRule) XXX_DiscardUnknown() {
xxx_messageInfo_HttpRule.DiscardUnknown(m)
}
var xxx_messageInfo_HttpRule proto.InternalMessageInfo
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof" json:"get,omitempty"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof" json:"put,omitempty"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof" json:"post,omitempty"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof" json:"delete,omitempty"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof" json:"patch,omitempty"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof" json:"custom,omitempty"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetResponseBody() string {
if m != nil {
return m.ResponseBody
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HttpRule) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
// CustomHttpPattern .
type CustomHttpPattern struct {
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{2}
}
func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
}
func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
}
func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomHttpPattern.Merge(m, src)
}
func (m *CustomHttpPattern) XXX_Size() int {
return xxx_messageInfo_CustomHttpPattern.Size(m)
}
func (m *CustomHttpPattern) XXX_DiscardUnknown() {
xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
}
var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "gogoapi.Http")
proto.RegisterType((*HttpRule)(nil), "gogoapi.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "gogoapi.CustomHttpPattern")
}
func init() { proto.RegisterFile("gogo_fast_api.proto", fileDescriptor_08ec8be04814436d) }
var fileDescriptor_08ec8be04814436d = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x8f, 0xd3, 0x3c,
0x10, 0x87, 0xdf, 0x6c, 0xff, 0xa5, 0xee, 0xbe, 0x87, 0xf5, 0xae, 0x90, 0xa9, 0x90, 0xa8, 0xca,
0x81, 0x9e, 0x12, 0x69, 0xe1, 0x82, 0x7a, 0x22, 0xec, 0x4a, 0x7b, 0x44, 0x39, 0x72, 0x89, 0x26,
0xf1, 0x34, 0xb5, 0x9a, 0xda, 0x56, 0x3c, 0x41, 0xe4, 0x13, 0xf1, 0x85, 0xf8, 0x40, 0x28, 0x8e,
0x5b, 0x0e, 0x88, 0xdb, 0xcc, 0x6f, 0x9e, 0x24, 0x4f, 0xc6, 0x66, 0xf7, 0xb5, 0xa9, 0x4d, 0x71,
0x00, 0x47, 0x05, 0x58, 0x95, 0xd8, 0 | return xxx_messageInfo_HttpRule.Unmarshal(m, b)
} | random_line_split |
gogo_fast_api.pb.go | ,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{1}
}
func (m *HttpRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HttpRule.Unmarshal(m, b)
}
func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
}
func (m *HttpRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_HttpRule.Merge(m, src)
}
func (m *HttpRule) XXX_Size() int {
return xxx_messageInfo_HttpRule.Size(m)
}
func (m *HttpRule) XXX_DiscardUnknown() {
xxx_messageInfo_HttpRule.DiscardUnknown(m)
}
var xxx_messageInfo_HttpRule proto.InternalMessageInfo
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof" json:"get,omitempty"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof" json:"put,omitempty"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof" json:"post,omitempty"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof" json:"delete,omitempty"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof" json:"patch,omitempty"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof" json:"custom,omitempty"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) | () string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetResponseBody() string {
if m != nil {
return m.ResponseBody
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HttpRule) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
// CustomHttpPattern .
type CustomHttpPattern struct {
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
return fileDescriptor_08ec8be04814436d, []int{2}
}
func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
}
func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
}
func (m *CustomHttpPattern) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomHttpPattern.Merge(m, src)
}
func (m *CustomHttpPattern) XXX_Size() int {
return xxx_messageInfo_CustomHttpPattern.Size(m)
}
func (m *CustomHttpPattern) XXX_DiscardUnknown() {
xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
}
var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "gogoapi.Http")
proto.RegisterType((*HttpRule)(nil), "gogoapi.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "gogoapi.CustomHttpPattern")
}
func init() { proto.RegisterFile("gogo_fast_api.proto", fileDescriptor_08ec8be04814436d) }
var fileDescriptor_08ec8be04814436d = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x8f, 0xd3, 0x3c,
0x10, 0x87, 0xdf, 0x6c, 0xff, 0xa5, 0xee, 0xbe, 0x87, 0xf5, 0xae, 0x90, 0xa9, 0x90, 0xa8, 0xca,
0x81, 0x9e, 0x12, 0x69, 0xe1, 0x82, 0x7a, 0x22, 0xec, 0x4a, 0x7b, 0x44, 0x39, 0x72, 0x89, 0x26,
0xf1, 0x34, 0xb5, 0x9a, 0xda, 0x56, 0x3c, 0x41, 0xe4, 0x13, 0xf1, 0x85, 0xf8, 0x40, 0x28, 0x8e,
0x5b, 0x0e, 0x88, 0xdb, 0xcc, 0x6f, 0x9e, 0x24, 0x4f, 0xc6, 0x66, 0xf7, 0xb5, 0xa9, 0x4d, 0x71,
0x00, 0x47, 0x05, 0x58, 0x95, 0xd8, 0 | GetSelector | identifier_name |
app.js | if(type == void 0){
this.listeners = {}
return this
}
if(handler == void 0){
delete this.listeners[type]
return this
}
let handlers = this.listeners[type] || [];
let id = handlers.indexOf(handler);
if(id != -1){
handlers.splice(id,1);
return this;
}
if(handlers.length == 0){
delete this.listeners[type]
}
return this;
}
}
/**
* 游戏地图
*/
class GridMap {
constructor(selector) {
this.$el = document.querySelector(selector);
}
create(rows, cols) {
let html = '';
| (let i = 0; i < rows; i++) {
html += '<tr>';
for (let j = 0; j < cols; j++) {
html += '<td class="map-box" data-type="empty">';
}
html += '</tr>';
}
this.rows = rows;
this.cols = cols;
this.$el.innerHTML = html;
this.$boxes = this.$el.getElementsByTagName('td');
}
clear() {
for (let i = 0; i < this.rows; i++) {
for (let j = 0; j < this.cols; j++) {
this.type([i, j], 'empty');
}
}
}
type([x,y], type) {
if (type == void 0) {
return this.$boxes[y * this.cols + x].dataset.type;
} else {
this.$boxes[y * this.cols + x].dataset.type = type;
}
}
}
class PathFinder {
constructor(map,cfg){
this.map = map;
this.rows = map.rows;
this.cols = map.cols;
this.search_type = cfg.search_type;
this.path = [[0,1],[-1,0],[0,-1],[1,0]]
}
setSearchType(type) {
this.search_type = type;
}
/**
* 返回路径
* @param src
* @param dst
* @returns {*|Array}
*/
find_path(src, dst) {
switch (this.search_type) {
case 'dfs':
return this.dfs(src, dst, {});
case 'bfs':
return this.bfs(src, dst, {});
case 'astar':
return this.astar(src, dst,{});
}
}
/**
* 判断坐标是否在地图内
* @param i
* @param j
* @returns {boolean}
*/
isValid(i, j) {
return i >= 0 && i < this.rows && j >= 0 && j < this.cols;
}
/**
* 深度优先搜索
* @param src
* @param dst
* @param visited
* @returns {*}
*/
dfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y};
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true;
var path = this.dfs(next, dst, visited);
if (path) {
path.unshift(next);
return path;
}
}
}
}
}
/**
* 广度优先搜索
*/
bfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
let queue = [];
queue.push(src);
let path = []
while (queue.length > 0) {
src = queue.shift();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y}
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true
queue.push(next);
}
}
}
}
}
/**
* A* 寻路算法
* @param src
* @param dst
* @param visited
* @returns {*[]}
*/
astar(src,dst,visited){
if(src.x == dst.x && src.y == dst.y){
return [dst]
}
let dist = (s1,s2)=>{
return Math.abs(s1.x - s2.x) + Math.abs(s1.y - s2.y);
};
let hashPos = s => s.x + '-' + s.y;
let cmp = (el1,el2) => dist(el1,dst) - dist(el2,dst);
let heap = new MinHeap(cmp);
heap.push(src);
let path = []
while(!heap.empty()){
src = heap.pop();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for(let [i,j] of this.path){
let x = src.x + i;
let y = src.y + j;
let next = {x,y};
if(this.isValid(x,y) && !visited[x + '-' + y] && this.map.type([x,y]) == 'empty'){
visited[x + '-' + y] = true;
heap.push(next);
}
}
}
}
}
class Character{
constructor(selector){
if(typeof selector == 'string'){
this.$el = document.querySelector(selector)
}else if(selector.nodeType){
this.$el = selector
}else{
this.$el = null
}
}
setPos([x,y]){
this.x = x;
this.y = y;
this.$el.style.left = x * 20 + 'px';
this.$el.style.top = y * 20 + 'px';
}
getPos() {
return {x: this.x, y: this.y};
}
}
/**
* 玩家类
*/
class Player extends Character{
constructor(selector) {
super(selector)
}
/**
* 异步移动,实现动画效果
* @param pos
*/
goto(pos) {
this.x = pos.x;
this.y = pos.y;
this.$el.style.left = this.x * 20 + 'px';
this.$el.style.top = this.y * 20 + 'px';
}
}
class Enemy extends Character{
}
class Target extends Character{
}
/**
* 主游戏类
*/
class Game extends Event{
constructor(cfg) {
super()
//初始化配置
this.cfg = {
rows: 20,
cols: 20,
search_type: 'dfs',
duration: 100
}
Object.assign(this.cfg,cfg);
//初始化数据
this.map = new GridMap('#kingsman-map');
this.player = new Player('#kingsman-player');
this.target = new Target('#kingsman-target');
this.map.create(this.cfg.rows, this.cfg.cols);
this.pathFinder = new PathFinder(this.map,{
search_type: this.cfg.search_type
});
//重置游戏状态
this.reset();
}
/**
* 设置玩家和目标
*/
setPlayerAndTarget() {
let positions = [];
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (this.map.type([i, j]) == 'empty') {
positions.push([i, j]);
}
}
}
let len = positions.length;
if (len < 2) {
throw new Error('map is full');
}
Utils.shuffle(positions);
let player = positions[0];
let target = positions[1];
this.player.setPos(player);
this.target.setPos(target);
}
setEnemy(){
}
/**
* 随机的修建障碍物 //todo 建筑迷宫算法
*/
randBuild() {
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (Math.random() > 0.9) {
this.map.type([i, j], 'wall');
}
}
}
}
/**
* 设置间隔时间
* @param duration
*/
setDuration(duration) {
this.cfg.duration = duration;
this.player.$el.style.transitionDuration = duration + 'ms';
}
/**
* 设置地图尺寸
* @param n
*/
setSize(n) {
this.cfg.rows = this.cfg.cols = n;
this.map.create(this.cfg.cols, this.cfg.rows);
this.randBuild();
}
/**
* 设置寻路算法
* @param search_type
*/
setSearchType(search_type) | for | identifier_name |
app.js | if(type == void 0){
this.listeners = {}
return this
}
if(handler == void 0){
delete this.listeners[type]
return this
}
let handlers = this.listeners[type] || [];
let id = handlers.indexOf(handler);
if(id != -1){
handlers.splice(id,1);
return this;
}
if(handlers.length == 0){
delete this.listeners[type]
}
return this;
}
}
/**
* 游戏地图
*/
class GridMap {
constructor(selector) {
this.$el = document.querySelector(selector);
}
create(rows, cols) {
let html = '';
for (let i = 0; i < rows; i++) {
html += '<tr>';
for (let j = 0; j < cols; j++) {
html += '<td class="map-box" data-type="empty">';
}
html += '</tr>';
}
this.rows = rows;
this.cols = cols;
this.$el.innerHTML = html;
this.$boxes = this.$el.getElementsByTagName('td');
}
clear() {
for (let i = 0; i < this.rows; i++) {
for (let j = 0; j < this.cols; j++) {
this.type([i, j], 'empty');
}
}
}
type([x,y], type) {
if (type == void 0) {
return this.$boxes[y * this.cols + x].dataset.type;
} else {
this.$boxes[y * this.cols + x].dataset.type = type;
}
}
}
class PathFinder {
constructor(map,cfg){
this.map = map;
this.rows = map.rows;
this.cols = map.cols;
this.search_type = cfg.search_type;
this.path = [[0,1],[-1,0],[0,-1],[1,0]]
}
setSearchType(type) {
this.search_type = type;
}
/**
* 返回路径
* @param src
* @param dst
* @returns {*|Array}
*/
find_path(src, dst) {
switch (this.search_type) {
case 'dfs':
return this.dfs(src, dst, {});
case 'bfs':
return this.bfs(src, dst, {});
case 'astar':
return this.astar(src, dst,{});
}
}
/**
* 判断坐标是否在地图内
* @param i
* @param j
* @returns {boolean}
*/
isValid(i, j) {
return i >= 0 && i < this.rows && j >= 0 && j < this.cols;
}
/**
* 深度优先搜索
* @param src
* @param dst
* @param visited
* @returns {*}
*/
dfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y};
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true;
var path = this.dfs(next, dst, visited);
if (path) {
path.unshift(next);
return path;
}
}
}
}
}
/**
* 广度优先搜索
*/
bfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
let queue = [];
queue.push(src);
let path = []
while (queue.length > 0) {
src = queue.shift();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y}
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true
queue.push(next);
}
}
}
}
}
/**
* A* 寻路算法
* @param src
* @param dst
* @param visited
* @returns {*[]}
*/
astar(src,dst,visited){
if(src.x == dst.x && src.y == dst.y){
return [dst]
}
let dist = (s1,s2)=>{
return Math.abs(s1.x - s2.x) + Math.abs(s1.y - s2.y);
};
let hashPos = s => s.x + '-' + s.y;
let cmp = (el1,el2) => dist(el1,dst) - dist(el2,dst);
let heap = new MinHeap(cmp);
heap.push(src);
let path = []
while(!heap.empty()){
src = heap.pop();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for(let [i,j] of this.path){
let x = src.x + i;
let y = src.y + j;
let next = {x,y};
if(this.isValid(x,y) && !visited[x + '-' + y] && this.map.type([x,y]) == 'empty'){
visited[x + '-' + y] = true;
heap.push(next);
}
}
}
}
}
class Character{
constructor(selector){
if(typeof selector == 'string'){
this.$el = document.querySelector(selector)
}else if(selector.nodeType){
this.$el = selector
}else{
this.$el = null
}
}
setPos([x,y]){
this.x = x;
this.y = y;
|
this.$el.style.top = y * 20 + 'px';
}
getPos() {
return {x: this.x, y: this.y};
}
}
/**
* 玩家类
*/
class Player extends Character{
constructor(selector) {
super(selector)
}
/**
* 异步移动,实现动画效果
* @param pos
*/
goto(pos) {
this.x = pos.x;
this.y = pos.y;
this.$el.style.left = this.x * 20 + 'px';
this.$el.style.top = this.y * 20 + 'px';
}
}
class Enemy extends Character{
}
class Target extends Character{
}
/**
* 主游戏类
*/
class Game extends Event{
constructor(cfg) {
super()
//初始化配置
this.cfg = {
rows: 20,
cols: 20,
search_type: 'dfs',
duration: 100
}
Object.assign(this.cfg,cfg);
//初始化数据
this.map = new GridMap('#kingsman-map');
this.player = new Player('#kingsman-player');
this.target = new Target('#kingsman-target');
this.map.create(this.cfg.rows, this.cfg.cols);
this.pathFinder = new PathFinder(this.map,{
search_type: this.cfg.search_type
});
//重置游戏状态
this.reset();
}
/**
* 设置玩家和目标
*/
setPlayerAndTarget() {
let positions = [];
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (this.map.type([i, j]) == 'empty') {
positions.push([i, j]);
}
}
}
let len = positions.length;
if (len < 2) {
throw new Error('map is full');
}
Utils.shuffle(positions);
let player = positions[0];
let target = positions[1];
this.player.setPos(player);
this.target.setPos(target);
}
setEnemy(){
}
/**
* 随机的修建障碍物 //todo 建筑迷宫算法
*/
randBuild() {
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (Math.random() > 0.9) {
this.map.type([i, j], 'wall');
}
}
}
}
/**
* 设置间隔时间
* @param duration
*/
setDuration(duration) {
this.cfg.duration = duration;
this.player.$el.style.transitionDuration = duration + 'ms';
}
/**
* 设置地图尺寸
* @param n
*/
setSize(n) {
this.cfg.rows = this.cfg.cols = n;
this.map.create(this.cfg.cols, this.cfg.rows);
this.randBuild();
}
/**
* 设置寻路算法
* @param search_type
*/
setSearchType(search_type | this.$el.style.left = x * 20 + 'px'; | conditional_block |
app.js | this.$el.getElementsByTagName('td');
}
clear() {
for (let i = 0; i < this.rows; i++) {
for (let j = 0; j < this.cols; j++) {
this.type([i, j], 'empty');
}
}
}
type([x,y], type) {
if (type == void 0) {
return this.$boxes[y * this.cols + x].dataset.type;
} else {
this.$boxes[y * this.cols + x].dataset.type = type;
}
}
}
class PathFinder {
constructor(map,cfg){
this.map = map;
this.rows = map.rows;
this.cols = map.cols;
this.search_type = cfg.search_type;
this.path = [[0,1],[-1,0],[0,-1],[1,0]]
}
setSearchType(type) {
this.search_type = type;
}
/**
* 返回路径
* @param src
* @param dst
* @returns {*|Array}
*/
find_path(src, dst) {
switch (this.search_type) {
case 'dfs':
return this.dfs(src, dst, {});
case 'bfs':
return this.bfs(src, dst, {});
case 'astar':
return this.astar(src, dst,{});
}
}
/**
* 判断坐标是否在地图内
* @param i
* @param j
* @returns {boolean}
*/
isValid(i, j) {
return i >= 0 && i < this.rows && j >= 0 && j < this.cols;
}
/**
* 深度优先搜索
* @param src
* @param dst
* @param visited
* @returns {*}
*/
dfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y};
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true;
var path = this.dfs(next, dst, visited);
if (path) {
path.unshift(next);
return path;
}
}
}
}
}
/**
* 广度优先搜索
*/
bfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
let queue = [];
queue.push(src);
let path = []
while (queue.length > 0) {
src = queue.shift();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y}
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true
queue.push(next);
}
}
}
}
}
/**
* A* 寻路算法
* @param src
* @param dst
* @param visited
* @returns {*[]}
*/
astar(src,dst,visited){
if(src.x == dst.x && src.y == dst.y){
return [dst]
}
let dist = (s1,s2)=>{
return Math.abs(s1.x - s2.x) + Math.abs(s1.y - s2.y);
};
let hashPos = s => s.x + '-' + s.y;
let cmp = (el1,el2) => dist(el1,dst) - dist(el2,dst);
let heap = new MinHeap(cmp);
heap.push(src);
let path = []
while(!heap.empty()){
src = heap.pop();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for(let [i,j] of this.path){
let x = src.x + i;
let y = src.y + j;
let next = {x,y};
if(this.isValid(x,y) && !visited[x + '-' + y] && this.map.type([x,y]) == 'empty'){
visited[x + '-' + y] = true;
heap.push(next);
}
}
}
}
}
class Character{
constructor(selector){
if(typeof selector == 'string'){
this.$el = document.querySelector(selector)
}else if(selector.nodeType){
this.$el = selector
}else{
this.$el = null
}
}
setPos([x,y]){
this.x = x;
this.y = y;
this.$el.style.left = x * 20 + 'px';
this.$el.style.top = y * 20 + 'px';
}
getPos() {
return {x: this.x, y: this.y};
}
}
/**
* 玩家类
*/
class Player extends Character{
constructor(selector) {
super(selector)
}
/**
* 异步移动,实现动画效果
* @param pos
*/
goto(pos) {
this.x = pos.x;
this.y = pos.y;
this.$el.style.left = this.x * 20 + 'px';
this.$el.style.top = this.y * 20 + 'px';
}
}
class Enemy extends Character{
}
class Target extends Character{
}
/**
* 主游戏类
*/
class Game extends Event{
constructor(cfg) {
super()
//初始化配置
this.cfg = {
rows: 20,
cols: 20,
search_type: 'dfs',
duration: 100
}
Object.assign(this.cfg,cfg);
//初始化数据
this.map = new GridMap('#kingsman-map');
this.player = new Player('#kingsman-player');
this.target = new Target('#kingsman-target');
this.map.create(this.cfg.rows, this.cfg.cols);
this.pathFinder = new PathFinder(this.map,{
search_type: this.cfg.search_type
});
//重置游戏状态
this.reset();
}
/**
* 设置玩家和目标
*/
setPlayerAndTarget() {
let positions = [];
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (this.map.type([i, j]) == 'empty') {
positions.push([i, j]);
}
}
}
let len = positions.length;
if (len < 2) {
throw new Error('map is full');
}
Utils.shuffle(positions);
let player = positions[0];
let target = positions[1];
this.player.setPos(player);
this.target.setPos(target);
}
setEnemy(){
}
/**
* 随机的修建障碍物 //todo 建筑迷宫算法
*/
randBuild() {
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (Math.random() > 0.9) {
this.map.type([i, j], 'wall');
}
}
}
}
/**
* 设置间隔时间
* @param duration
*/
setDuration(duration) {
this.cfg.duration = duration;
this.player.$el.style.transitionDuration = duration + 'ms';
}
/**
* 设置地图尺寸
* @param n
*/
setSize(n) {
this.cfg.rows = this.cfg.cols = n;
this.map.create(this.cfg.cols, this.cfg.rows);
this.randBuild();
}
/**
* 设置寻路算法
* @param search_type
*/
setSearchType(search_type) {
this.pathFinder.setSearchType(search_type);
}
move(pos) {
this.player.goto(pos);
}
/**
* 寻路
* @param target
*/
goto(dst) {
//自动寻路
if (dst == void 0) {
dst = this.target.getPos();
}
let player = this.player.getPos();
let path = this.find_path(player, dst);
for (let next of path) {
this.run_async(this.move, | [next])
.catch(err =>{
console.error(err)
})
}
let target = this.target.getPos()
if(dst.x == target.x && dst.y == target.y) {
this.run_async(function(){
this.fire('gameover'); // 游戏结束加载下一关卡
});
}
}
/**
* 寻找路径
* @param src
* @param target
* @returns {*|Array}
*/
find_path(src, target) {
return this.pathFinder.find_path(src, target); | identifier_body | |
app.js | if(type == void 0){
this.listeners = {}
return this
}
if(handler == void 0){
delete this.listeners[type]
return this
}
let handlers = this.listeners[type] || [];
let id = handlers.indexOf(handler);
if(id != -1){
handlers.splice(id,1);
return this;
}
if(handlers.length == 0){
delete this.listeners[type]
}
return this;
}
}
/**
* 游戏地图
*/
class GridMap {
constructor(selector) {
this.$el = document.querySelector(selector);
}
create(rows, cols) {
let html = '';
for (let i = 0; i < rows; i++) {
html += '<tr>';
for (let j = 0; j < cols; j++) {
html += '<td class="map-box" data-type="empty">';
}
html += '</tr>';
}
this.rows = rows;
this.cols = cols;
this.$el.innerHTML = html;
this.$boxes = this.$el.getElementsByTagName('td');
}
clear() {
for (let i = 0; i < this.rows; i++) {
for (let j = 0; j < this.cols; j++) {
this.type([i, j], 'empty');
}
}
}
type([x,y], type) {
if (type == void 0) {
return this.$boxes[y * this.cols + x].dataset.type;
} else {
this.$boxes[y * this.cols + x].dataset.type = type;
}
}
}
class PathFinder {
constructor(map,cfg){
this.map = map;
this.rows = map.rows;
this.cols = map.cols;
this.search_type = cfg.search_type;
this.path = [[0,1],[-1,0],[0,-1],[1,0]]
}
setSearchType(type) {
this.search_type = type;
}
/**
* 返回路径
* @param src
* @param dst
* @returns {*|Array}
*/
find_path(src, dst) {
switch (this.search_type) {
case 'dfs':
return this.dfs(src, dst, {});
case 'bfs':
return this.bfs(src, dst, {});
case 'astar':
return this.astar(src, dst,{});
}
}
/**
* 判断坐标是否在地图内
* @param i
* @param j
* @returns {boolean}
*/
isValid(i, j) {
return i >= 0 && i < this.rows && j >= 0 && j < this.cols;
}
/**
* 深度优先搜索
* @param src
* @param dst
* @param visited
* @returns {*}
*/
dfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y};
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true;
var path = this.dfs(next, dst, visited);
if (path) {
path.unshift(next);
return path;
}
}
}
}
}
/**
* 广度优先搜索
*/
bfs(src, dst, visited) {
if (src.x == dst.x && src.y == dst.y) {
return [dst];
} else {
let queue = [];
queue.push(src);
let path = []
while (queue.length > 0) {
src = queue.shift();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for (let [i,j] of this.path) {
let x = src.x + i;
let y = src.y + j;
let next = {x, y}
if (this.isValid(x, y) && !visited[x + '-' + y] && this.map.type([x, y]) === 'empty') {
visited[x + '-' + y] = true
queue.push(next);
}
}
}
}
}
/**
* A* 寻路算法
* @param src
* @param dst
* @param visited
* @returns {*[]}
*/
astar(src,dst,visited){
if(src.x == dst.x && src.y == dst.y){
return [dst]
}
let dist = (s1,s2)=>{
return Math.abs(s1.x - s2.x) + Math.abs(s1.y - s2.y);
};
let hashPos = s => s.x + '-' + s.y;
let cmp = (el1,el2) => dist(el1,dst) - dist(el2,dst);
let heap = new MinHeap(cmp);
heap.push(src);
let path = []
while(!heap.empty()){
src = heap.pop();
path.push(src);
if(src.x == dst.x && src.y == dst.y){
return path;
}
for(let [i,j] of this.path){
let x = src.x + i;
let y = src.y + j;
let next = {x,y};
if(this.isValid(x,y) && !visited[x + '-' + y] && this.map.type([x,y]) == 'empty'){
visited[x + '-' + y] = true;
heap.push(next);
}
}
}
}
}
class Character{
constructor(selector){
if(typeof selector == 'string'){
this.$el = document.querySelector(selector)
}else if(selector.nodeType){
this.$el = selector
}else{
this.$el = null
}
}
setPos([x,y]){
this.x = x;
this.y = y;
this.$el.style.left = x * 20 + 'px';
this.$el.style.top = y * 20 + 'px';
}
getPos() {
return {x: this.x, y: this.y};
}
}
/**
* 玩家类
*/
class Player extends Character{
constructor(selector) {
super(selector)
}
/**
* 异步移动,实现动画效果
* @param pos
*/
goto(pos) {
this.x = pos.x;
this.y = pos.y;
this.$el.style.left = this.x * 20 + 'px';
this.$el.style.top = this.y * 20 + 'px';
}
}
class Enemy extends Character{
}
class Target extends Character{
}
/**
* 主游戏类
*/
class Game extends Event{
constructor(cfg) {
super()
//初始化配置
this.cfg = {
rows: 20,
cols: 20,
search_type: 'dfs',
duration: 100
}
Object.assign(this.cfg,cfg);
//初始化数据
this.map = new GridMap('#kingsman-map');
this.player = new Player('#kingsman-player');
this.target = new Target('#kingsman-target');
this.map.create(this.cfg.rows, this.cfg.cols);
this.pathFinder = new PathFinder(this.map,{
search_type: this.cfg.search_type
});
//重置游戏状态
this.reset();
}
/**
* 设置玩家和目标
*/
setPlayerAndTarget() {
let positions = [];
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (this.map.type([i, j]) == 'empty') {
positions.push([i, j]);
} | throw new Error('map is full');
}
Utils.shuffle(positions);
let player = positions[0];
let target = positions[1];
this.player.setPos(player);
this.target.setPos(target);
}
setEnemy(){
}
/**
* 随机的修建障碍物 //todo 建筑迷宫算法
*/
randBuild() {
for (let i = 0; i < this.cfg.rows; i++) {
for (let j = 0; j < this.cfg.cols; j++) {
if (Math.random() > 0.9) {
this.map.type([i, j], 'wall');
}
}
}
}
/**
* 设置间隔时间
* @param duration
*/
setDuration(duration) {
this.cfg.duration = duration;
this.player.$el.style.transitionDuration = duration + 'ms';
}
/**
* 设置地图尺寸
* @param n
*/
setSize(n) {
this.cfg.rows = this.cfg.cols = n;
this.map.create(this.cfg.cols, this.cfg.rows);
this.randBuild();
}
/**
* 设置寻路算法
* @param search_type
*/
setSearchType(search_type) | }
}
let len = positions.length;
if (len < 2) { | random_line_split |
parse_tweet_data.py | efficient import of other processes.
Inputs: *.csv file
Outputs: *.pkl file in the input directory
'''
# construct absolute path
filepath = os.path.join(path, infile)
# import *.csv file to data frame
df = pd.read_csv(filepath)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save data frame to *.pkl file of same name
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
def convert_comb(files):
'''Takes a list of *.csv file inputs and creates a single data frames from each of them.
Inputs: Input *.csv files to be combined into a single data frame
Outputs: A data frame for each input and a single output combined data frame
'''
dfs = list()
for file in files:
ext_path = os.path.join(path, '1_DataFrames')
df = pd.read_pickle(os.path.join(ext_path, file))
dfs.append(df)
new_df = merge_df(dfs)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
new_df.to_pickle(os.path.join(ext_path, outfile))
return new_df
def sort_df(df, field='tweet_time'):
''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It
also adds a column called "unique_id_ida" with formatted ID numbers for each tweet.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def split_df(df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
|
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include
df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ | include.append('0') | conditional_block |
parse_tweet_data.py | efficient import of other processes.
Inputs: *.csv file
Outputs: *.pkl file in the input directory
'''
# construct absolute path
filepath = os.path.join(path, infile)
# import *.csv file to data frame
df = pd.read_csv(filepath)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save data frame to *.pkl file of same name
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
def convert_comb(files):
'''Takes a list of *.csv file inputs and creates a single data frames from each of them.
Inputs: Input *.csv files to be combined into a single data frame
Outputs: A data frame for each input and a single output combined data frame
'''
dfs = list()
for file in files:
ext_path = os.path.join(path, '1_DataFrames')
df = pd.read_pickle(os.path.join(ext_path, file))
dfs.append(df)
new_df = merge_df(dfs)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
new_df.to_pickle(os.path.join(ext_path, outfile))
return new_df
def sort_df(df, field='tweet_time'):
''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It
also adds a column called "unique_id_ida" with formatted ID numbers for each tweet.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def | (df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
include.append('0')
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include
df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ | split_df | identifier_name |
parse_tweet_data.py | efficient import of other processes.
Inputs: *.csv file
Outputs: *.pkl file in the input directory
'''
# construct absolute path
filepath = os.path.join(path, infile)
# import *.csv file to data frame
df = pd.read_csv(filepath)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save data frame to *.pkl file of same name
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
def convert_comb(files):
'''Takes a list of *.csv file inputs and creates a single data frames from each of them.
Inputs: Input *.csv files to be combined into a single data frame
Outputs: A data frame for each input and a single output combined data frame
'''
dfs = list()
for file in files:
ext_path = os.path.join(path, '1_DataFrames')
df = pd.read_pickle(os.path.join(ext_path, file))
dfs.append(df)
new_df = merge_df(dfs)
outfile = path_split[1] + '_' + path_split[3] + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
new_df.to_pickle(os.path.join(ext_path, outfile))
return new_df
def sort_df(df, field='tweet_time'):
''' Takes a Pandas data frame of Twitter data and sorts by a specified field to prep for data frame parsing steps. It
also adds a column called "unique_id_ida" with formatted ID numbers for each tweet.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def split_df(df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
include.append('0')
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include | outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ymd | df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
| random_line_split |
parse_tweet_data.py | Pandas data frame imported from *.csv or *.pkl file
Field in data frame to be sorted (OPTIONAL: tweet_time, aka sort by date, is the default)
Outputs: *.pkl file in the input directory with "sorted" label sorted, containing additional column "unique_id_ida"
with 7-digit ID numbers beginning with 0000000 (i.e. 1234567, 0000134)
'''
# turn any time fields into datetime objects
if field == 'tweet_time':
df['tweet_time'] = pd.to_datetime(df.tweet_time)
# sort data frame by field column
df = df.sort_values(by=field)
# generate list of unique ID numbers with 7-digits (leading zeros for smaller numbers)
in_list = list()
for i in range(0, len(df.index)):
i = str(i)
while len(i) != 7:
i = "0" + i
in_list.append(i)
# add column ID numbers to data frame
df['unique_id_ida'] = np.array(in_list)
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
df.to_pickle(os.path.join(ext_path, outfile))
return df
def split_df(df, num=30):
'''Takes an input data frame and creates inventories for that data frame, sorted by date. First automatically calls
the sort function to sort the data frame by the default (date).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Number of inventories to split into. Users discresction depending on size of the data set.
Outputs: Directory of inventories containing Tweet content and metadata. These inventories are divided in such a
way to keep them between 130 and 160 MB, labelled with alphabetical characters to order them. They are
sorted by date with the ranges of dates in each inventory in the file name
(i.e. AA_Twitter10M_090509_130214.csv)
'''
df = sort_df(df)
alphabets = string.ascii_lowercase
a_list = list()
for i in alphabets:
for j in alphabets:
a_list.append(i.upper() + j.upper())
# splits data set into 30 different data frames of equal size, which will each represent an individual inventory.
df_split = np.array_split(df, num)
subpath = os.path.join(path, '2_Inventories')
alpha_index = 0
last_i = len(df_split) - 1
for item in df_split:
df_sub = pd.concat([item.head(1), item.tail(1)])
date_bounds = pd.Series(df_sub['tweet_time'].tolist())
date_bounds_format = (date_bounds.dt.strftime('%Y%m%d')).tolist()
to_file = item[item.tweet_time.dt.strftime('%Y%m%d') != date_bounds[1].strftime('%Y%m%d')]
if alpha_index == 0:
comb_df = to_file
elif alpha_index == last_i:
comb_df = pd.concat([extra_rows, item], axis=0)
else:
comb_df = pd.concat([extra_rows, to_file], axis=0)
prevdate = str(int(date_bounds_format[1]) - 1)
filename = a_list[alpha_index] + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_format[0][2:] + '_' + prevdate[2:] + '.csv'
print(filename)
filepath = os.path.join(subpath,filename)
comb_df.to_csv(filepath)
extra_rows = item[item.tweet_time.dt.strftime('%Y%m%d') == date_bounds[1].strftime('%Y%m%d')]
alpha_index += 1
def get_lang(df, lang):
'''Takes an input data frame and generates a data frame with only a specific language's tweets (user specified).
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language code for language of interest
Outputs: Pandas data frame with a subset of tweets from that specific language
'''
lang_df = df.loc[df['tweet_language'] == lang]
outfile = path_split[1] + '_' + path_split[3] + '_sorted_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
lang_df.to_pickle(os.path.join(ext_path, outfile))
return lang_df
def strip_formatting(df, lim, lang='allLang'):
'''Takes an imput data frame and removes emojis, punctuation, HTML entities like &, links, handles, and emojis.
Then based on a user specified character limit, it removes the tweets that are below that limit and returns the sub-
data frame.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Character limit for parsing after strip functionality implemented
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Pandas data frame with subset of tweets that satisfied the character limit after removind entities
of interest
'''
tweets = df['tweet_text'].to_list()
edit_tweets = list()
include = list()
for tweet in tweets:
strip_tweet = strip_accounts(remove_punctuation(strip_html_entities(strip_links(strip_emoji(tweet)))))
edit_tweets.append(strip_tweet)
if is_length(strip_tweet, lim):
include.append('1')
else:
include.append('0')
df['stripped_tweet'] = edit_tweets
df['tweet_length'] = df['tweet_text'].str.len()
df['include_topic_model'] = include
df['stripped_tweet_length'] = df['include_topic_model'].str.len()
sub_df = df.loc[df['include_topic_model'] == '1']
outfile = path_split[1] + '_' + path_split[3] + '_sorted_strip_' + lang + '_' + today + '.pkl'
# save new data frame under *.pkl file
ext_path = os.path.join(path, '1_DataFrames')
sub_df.to_pickle(os.path.join(ext_path, outfile))
return sub_df
def extract_content(df, label='All_Languages'):
'''Takes an input data frame and extracts the individual Tweets and places it in chronological directories
incremented by intervals based on a month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Language label user provides for file naming (if it is a data frame describing a particular language)
Outputs: Directories of binned tweets by month. Each tweet is in its own text file with the stripped tweet content
only in the file. Each file is named accordingly like the following example:
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ymd = (date_bounds.dt.strftime('%Y%m%d')).tolist()
date_bounds_hms = (date_bounds.dt.strftime('%H%M')).tolist()
content = pd.Series(df['stripped_tweet'].tolist())
unid = pd.Series(df['unique_id_ida'].tolist())
print('Total Files to process: ' + str(len(date_bounds_ymd)))
parentdir = os.path.join(path, label)
os.mkdir(parentdir)
for i in range(0, len(date_bounds_ymd)):
dir = date_bounds_ymd[i][:4] + '-' + date_bounds_ymd[i][4:6]
fulldir = os.path.join(parentdir, dir)
filename = str(unid[i]) + '_' + path_split[1] + '_' + path_split[3] + '_' + date_bounds_ymd[i][2:] + '_' + date_bounds_hms[i][:4] + '.txt'
outpath = os.path.join(fulldir, filename)
if os.path.exists(outpath):
pass
else:
if os.path.isdir(fulldir):
pass
else:
os.mkdir(fulldir)
if int(i) % 10000 == 0:
print('Files up to ' + str(i) + ' processed.')
f = open(outpath, 'w', encoding='utf-8')
f.write(content[i])
f.close()
def generate_freq(df):
| '''Takes an input data frame and generates a histogram of number of tweets binned by month.
Inputs: Pandas data frame imported from *.csv or *.pkl file
Input parameter called "increment", which determined by what time interval the tweets are organized
Outputs: Histogram
'''
date_bounds = pd.Series(df['tweet_time'].tolist())
date_bounds_ym = (date_bounds.dt.strftime('%Y-%m')).tolist()
df['date_md'] = np.array(date_bounds_ym)
sort = df.sort_values(by=['date_md'])
frq = sort['date_md'].value_counts().to_dict()
frq_df = sort['date_md'].value_counts()
od = collections.OrderedDict(sorted(frq.items()))
rf_dates = list()
for item in list(od.keys()): | identifier_body | |
09_XGBC_img.py | (data_dir+'\\std0_std45_sh0-arr_sh45-arr_bow-bool_train.npy')
################################################
# ADD COLUMN NAMES AND CONVERT TO PANDAS DF
################################################
c1=['std0','std45']
c2=['sh0_{0}'.format(str(i)) for i in range(64)]
c3=['sh45_{0}'.format(str(i)) for i in range(64)]
c4=['bowties']
column_names=c1+c2+c3+c4
seeking=True
if seeking:
| y_train=train['bowties']
X_train=train.drop(columns='bowties')
y_test=test['bowties']
X_test=test.drop(columns='bowties')
y_val=train_val['bowties']
X_val=train_val.drop(columns='bowties')
pipeline=Pipeline([('Imputer',SimpleImputer(strategy='mean'))])
X_train_trans=pipeline.fit_transform(X_train)
X_val_trans=pipeline.fit_transform(X_val)
X_test_trans=pipeline.fit_transform(X_test)
# =========================================================================
# Convert back to panda dataframe because XGBoost and Scipy dont play nice
# =========================================================================
column_names_xgb=['f{}'.format(int(i)) for i in range(130)]
X_train_trans=dp.numpy_to_pd(X_train_trans,column_names_xgb)
X_val_trans=dp.numpy_to_pd(X_val_trans,column_names_xgb)
X_test_trans=dp.numpy_to_pd(X_test_trans,column_names_xgb)
param_grid={ 'gamma':[0.05],
'learning_rate':[0.1],
'max_depth':[7],
'min_child_weight':[1],
'n_estimators':[50],
'n_jobs':[-1],
'objective':['binary:logistic'],
'random_state':[42],
'reg_alpha':[0],
'reg_lambda':[1],
'scale_pos_weight':[1],
'subsample':[1],
'verbosity':[1]
}
xgb_clf=xgboost.XGBClassifier()
grid_search=GridSearchCV(xgb_clf,param_grid=param_grid,cv=5,scoring='f1',verbose=2,n_jobs=-1,iid=True)
grid_search.fit(X_train_trans,y_train)
params=grid_search.best_params_
#clf=xgboost.XGBRFClassifier(**params,n_estimators=100,n_jobs=-1,random_state=42)
clf=xgboost.XGBClassifier(**params)
clf.fit(X_train_trans,y_train,early_stopping_rounds=10,eval_set=[(X_val_trans,y_val)])
y_test=test['bowties']
X_test=test.drop(columns='bowties')
X_test=pipeline.fit_transform(X_test)
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
#0.8374384236453202 0.8808290155440415 0.8585858585858585 0.8345152519028066 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.8, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 7, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
#0.8366336633663366 0.8756476683937824 0.8556962025316456 0.8352625965436676 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.8, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 6, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
#0.8439024390243902 0.8963730569948186 0.8693467336683416 0.8497798540354795 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.2, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 7, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
# =============================================================================
# Run again with more estimators and early stopping to check for over fitting
# =============================================================================
params['n_estimators']=100
clf=xgboost.XGBClassifier(**params)
eval_set=[(X_train_trans,y_train),(X_val_trans,y_val),(X_test_trans,y_test)]
eval_metric=['error','logloss']
clf.fit(X_train_trans,y_train,eval_metric=eval_metric,eval_set=eval_set,verbose=10)
evals_result=clf.evals_result()
#Errors
train_errors=evals_result['validation_0']['error']
val_errors=evals_result['validation_1']['error']
test_errors=evals_result['validation_2']['error']
#Logloss Errors
train_errors_log=evals_result['validation_0']['logloss']
val_errors_log=evals_result['validation_1']['logloss']
test_errors_log=evals_result['validation_2']['logloss']
N=np.linspace(1,params['n_estimators'],params['n_estimators'])
plt.close('all')
#Plot error
plt.figure(1)
plt.plot(N,val_errors,'b-')
plt.plot(N,train_errors,'r-')
plt.plot(N,test_errors,'g-')
plt.legend(['Validation','Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Error')
#Plot logloss error
plt.figure(2)
plt.plot(N,val_errors_log,'b-')
plt.plot(N,train_errors_log,'r-')
plt.plot(N,test_errors_log,'g-')
plt.legend(['Validation','Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Logloss Error')
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
# =============================================================================
# Based on the logloss curves the optimal number of estimators is
# between 46 and 58 so we will run it for 70 and use early | X=dp.numpy_to_pd(X_raw,column_names)
# =============================================================================
# SPLIT DATA INTO TEST AND TRAIN BOTH BALANCED
# WITH RESPECT TO (NON)BOWTIES
# =============================================================================
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(X,X['bowties']):
train=X.loc[train_index]
test=X.loc[test_index]
# =========================================================================
# Split the training set into training and validation subsets
# =========================================================================
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(train,train['bowties']):
train=X.loc[train_index]
train_val=X.loc[test_index]
| conditional_block |
09_XGBC_img.py | (data_dir+'\\std0_std45_sh0-arr_sh45-arr_bow-bool_train.npy')
################################################
# ADD COLUMN NAMES AND CONVERT TO PANDAS DF
################################################
c1=['std0','std45']
c2=['sh0_{0}'.format(str(i)) for i in range(64)]
c3=['sh45_{0}'.format(str(i)) for i in range(64)]
c4=['bowties'] |
seeking=True
if seeking:
X=dp.numpy_to_pd(X_raw,column_names)
# =============================================================================
# SPLIT DATA INTO TEST AND TRAIN BOTH BALANCED
# WITH RESPECT TO (NON)BOWTIES
# =============================================================================
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(X,X['bowties']):
train=X.loc[train_index]
test=X.loc[test_index]
# =========================================================================
# Split the training set into training and validation subsets
# =========================================================================
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(train,train['bowties']):
train=X.loc[train_index]
train_val=X.loc[test_index]
y_train=train['bowties']
X_train=train.drop(columns='bowties')
y_test=test['bowties']
X_test=test.drop(columns='bowties')
y_val=train_val['bowties']
X_val=train_val.drop(columns='bowties')
pipeline=Pipeline([('Imputer',SimpleImputer(strategy='mean'))])
X_train_trans=pipeline.fit_transform(X_train)
X_val_trans=pipeline.fit_transform(X_val)
X_test_trans=pipeline.fit_transform(X_test)
# =========================================================================
# Convert back to panda dataframe because XGBoost and Scipy dont play nice
# =========================================================================
column_names_xgb=['f{}'.format(int(i)) for i in range(130)]
X_train_trans=dp.numpy_to_pd(X_train_trans,column_names_xgb)
X_val_trans=dp.numpy_to_pd(X_val_trans,column_names_xgb)
X_test_trans=dp.numpy_to_pd(X_test_trans,column_names_xgb)
param_grid={ 'gamma':[0.05],
'learning_rate':[0.1],
'max_depth':[7],
'min_child_weight':[1],
'n_estimators':[50],
'n_jobs':[-1],
'objective':['binary:logistic'],
'random_state':[42],
'reg_alpha':[0],
'reg_lambda':[1],
'scale_pos_weight':[1],
'subsample':[1],
'verbosity':[1]
}
xgb_clf=xgboost.XGBClassifier()
grid_search=GridSearchCV(xgb_clf,param_grid=param_grid,cv=5,scoring='f1',verbose=2,n_jobs=-1,iid=True)
grid_search.fit(X_train_trans,y_train)
params=grid_search.best_params_
#clf=xgboost.XGBRFClassifier(**params,n_estimators=100,n_jobs=-1,random_state=42)
clf=xgboost.XGBClassifier(**params)
clf.fit(X_train_trans,y_train,early_stopping_rounds=10,eval_set=[(X_val_trans,y_val)])
y_test=test['bowties']
X_test=test.drop(columns='bowties')
X_test=pipeline.fit_transform(X_test)
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
#0.8374384236453202 0.8808290155440415 0.8585858585858585 0.8345152519028066 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.8, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 7, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
#0.8366336633663366 0.8756476683937824 0.8556962025316456 0.8352625965436676 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.8, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 6, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
#0.8439024390243902 0.8963730569948186 0.8693467336683416 0.8497798540354795 {'_Booster': None, 'base_score': 0.5, 'colsample_bylevel': 1, 'colsample_bynode': 0.2, 'colsample_bytree': 1, 'gamma': 0, 'importance_type': 'gain', 'learning_rate': 0.05, 'max_delta_step': 0, 'max_depth': 7, 'min_child_weight': 5, 'n_estimators': 100, 'n_jobs': 1, 'nthread': None, 'objective': 'binary:logistic', 'random_state': 42, 'reg_alpha': 0, 'reg_lambda': 1, 'scale_pos_weight': 1, 'silent': None, 'subsample': 0.8, 'verbosity': 1}
# =============================================================================
# Run again with more estimators and early stopping to check for over fitting
# =============================================================================
params['n_estimators']=100
clf=xgboost.XGBClassifier(**params)
eval_set=[(X_train_trans,y_train),(X_val_trans,y_val),(X_test_trans,y_test)]
eval_metric=['error','logloss']
clf.fit(X_train_trans,y_train,eval_metric=eval_metric,eval_set=eval_set,verbose=10)
evals_result=clf.evals_result()
#Errors
train_errors=evals_result['validation_0']['error']
val_errors=evals_result['validation_1']['error']
test_errors=evals_result['validation_2']['error']
#Logloss Errors
train_errors_log=evals_result['validation_0']['logloss']
val_errors_log=evals_result['validation_1']['logloss']
test_errors_log=evals_result['validation_2']['logloss']
N=np.linspace(1,params['n_estimators'],params['n_estimators'])
plt.close('all')
#Plot error
plt.figure(1)
plt.plot(N,val_errors,'b-')
plt.plot(N,train_errors,'r-')
plt.plot(N,test_errors,'g-')
plt.legend(['Validation','Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Error')
#Plot logloss error
plt.figure(2)
plt.plot(N,val_errors_log,'b-')
plt.plot(N,train_errors_log,'r-')
plt.plot(N,test_errors_log,'g-')
plt.legend(['Validation','Training','Testing'])
plt.xlabel('Number of Estimators')
plt.ylabel('Logloss Error')
y_preds=clf.predict(X_test)
F_CV=grid_search.best_score_
P,R,F=precision_score(y_test,y_preds),recall_score(y_test,y_preds),f1_score(y_test,y_preds)
print(P,R,F,F_CV,params)
# =============================================================================
# Based on the logloss curves the optimal number of estimators is
# between 46 and 58 so we will run it for 70 and use | column_names=c1+c2+c3+c4
| random_line_split |
data_tensorboard.py | used in code after central_crop which squares the image
ration will be 1. So we take then random value between segmet form [1-dleta, 1+ dleta]
"""
def random_ratio_resize(img, prob=0.3, delta=0.1):
if np.random.rand() >= prob: # bigger do nothing
return img
ratio = img.shape[0] / img.shape[1] # in our case 1
ratio = np.random.uniform(max(ratio - delta, 0.01), ratio + delta) # random value form [1-delta, 1+delta]. if delta
# change we prevent from left end of segment being non positve
if ratio * img.shape[1] <= img.shape[1]:
size = (int(img.shape[1] * ratio), img.shape[1]) # e.g shape of (474, 480) after this operation
else:
size = (img.shape[0], int(img.shape[0] / ratio)) #e.g shape of (480, 472) after this operation
dh = img.shape[0] - size[1] # could be zero or (480 - less number than 480)
top, bot = dh // 2, dh - dh // 2 # could be zeros ot the sum up to dh e.g dh = 9 then top = 4, bot = 5
dw = img.shape[1] - size[0] # similar to above
left, right = dw // 2, dw - dw // 2
if size[0] > 480 or size[1] > 480: #should not happen casue one of the coordinates should always be 480
print(img.shape, size, ratio)
img = cv2.resize(img, size) # resize image
img = cv2.copyMakeBorder(img, top, bot, left, right, cv2.BORDER_CONSTANT,
(0, 0, 0)) # this function makes image back to shape of (480, 480, 3) however it add black famre
# around the image
if img.shape[0] != 480 or img.shape[1] != 480: # should have happned since the ouput shape after copyMakeBorder supposed to be
# (480,480, 3)
raise ValueError(img.shape, size) # in case of error raise exception
return img
_augmentation_transform = ImageDataGenerator(
featurewise_center=False, # Boolean. Set input mean to 0 over the dataset, feature-wise.
featurewise_std_normalization=False, # Boolean. Divide inputs by std of the dataset, feature-wise.
rotation_range=10, # Int. Degree range for random rotations.
width_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total width, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
# With width_shift_range=2 possible values are integers [-1, 0, +1], same as
# with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
height_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total height, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-height_shift_range, +height_shift_range)
#With height_shift_range=2 possible values are integers [-1, 0, +1], same as
# with height_shift_range=[-1, 0, +1], while with height_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
horizontal_flip=True, # Boolean. Randomly flip inputs horizontally.
brightness_range=(0.9, 1.1), # Tuple or list of two floats. Range for picking a brightness shift value from.
zoom_range=(0.85, 1.15), # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
fill_mode='constant', # One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside
# the boundaries of the input are filled according to the given mode:
#'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
#'nearest': aaaaaaaa|abcd|dddddddd
#'reflect': abcddcba|abcd|dcbaabcd
#'wrap': abcdabcd|abcd|abcdabcd
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
)
"""
apply_augmentation takes as input img which is an array of shape (x, y, 3)
"""
def apply_augmentation(img):
img = random_ratio_resize(img) #defina above
img = _augmentation_transform.random_transform(img) # Applies a random transformation to an image.
return img
"""
_process_csv_file take as an input a file in our case these are train_split.txt and test_split.txt (names may differ)
"""
def _process_csv_file(file):
with open(file, 'r') as fr: # open file
files = fr.readlines() # read lines
return files
class BalanceCovidDataset(keras.utils.Sequence):
'Generates data for Keras'
def __init__(
self,
data_dir,
csv_file,
is_training=True,
batch_size=8,
input_shape=(224, 224), # here default shape is (224, 224) becasue these values were for former models,
# In another file we set it for (480, 480)
n_classes=3, # normal, pneunomia, COVID-19
num_channels=3, # depth of image. Although the images are grey we keep this chanel (with possibility to delete this)
mapping={
'normal': 0,
'pneumonia': 1,
'COVID-19': 2
},
shuffle=True,
augmentation=apply_augmentation,
covid_percent=0.3,
class_weights=[1., 1., 6.], # default weight of classes. The less numbered class gets is more worthy than the others
# in this case COVID_19
top_percent=0.08 # here set to 0.08, though in above functions was set to 0.15
):
'Initialization' # seeting values in constructor
self.datadir = data_dir
self.dataset = _process_csv_file(csv_file)
self.is_training = is_training
self.batch_size = batch_size
self.N = len(self.dataset)
self.input_shape = input_shape
self.n_classes = n_classes
self.num_channels = num_channels
self.mapping = mapping
self.shuffle = True
self.covid_percent = covid_percent
self.class_weights = class_weights
self.n = 0
self.augmentation = augmentation
self.top_percent = top_percent
datasets = {'normal': [], 'pneumonia': [], 'COVID-19': []} #dictionary for classes
for l in self.dataset: # iterate for dataset
datasets[l.split()[2]].append(l) # the second argument describes the name of the class e.g l.split()[2] - normal
# append the whole line to dictionary.
self.datasets = [
datasets['normal'] + datasets['pneumonia'],
datasets['COVID-19'],
] # set dataset to list of list where the first one is the conctaenation of lists 'normal' and 'pneumonia', and the
# second one is COVID_19
print(len(self.datasets[0]), len(self.datasets[1]))
self.on_epoch_end() # is triggered once at the very beginning as well as at the end of each epoch.
# If the shuffle parameter is set to True, we will get a new order of exploration at
# each pass (or just keep a linear exploration scheme otherwise).
def __next__(self): # mothod that need to be implement, keras generator
# Get one batch of data
batch_x, batch_y, weights = self.__getitem__(self.n) # if the numer of batch is less than number of batches we call the
# __getitem__ methdd
# Batch index
self.n += 1
# If we have processed the entire dataset then
if self.n >= self.__len__(): # it means that we fed all of our training exmaples in this epoch
| self.on_epoch_end() # schuffle traing set
self.n = 0 # set to zero | conditional_block | |
data_tensorboard.py | (img, percent=0.15):
offset = int(img.shape[0] * percent) #cut the top portion of image
return img[offset:] # return image
"""
central_crop takes as an input img which is an array of shape (x, y, 3)
"""
def central_crop(img):
size = min(img.shape[0], img.shape[1]) # min of x and y
offset_h = int((img.shape[0] - size) / 2) # horizontal len
offset_w = int((img.shape[1] - size) / 2) # vertical
return img[offset_h:offset_h + size, offset_w:offset_w + size] # makes square image and centered
"""
process_image_file take as an input path of the photo for example data/train/1-s2.0-S0929664620300449-gr2_lrg-a.jpg,
top_percante for e.g top_percante = 0.08, and size of on axis of the image. In our case it will be 480
"""
def process_image_file(filepath, top_percent, size):
img = cv2.imread(filepath) # load image as array of shape (x, y , 3)
img = crop_top(img, percent=top_percent) # use function define above
img = central_crop(img) # use function define above
img = cv2.resize(img, (size, size)) # resize image from (min(x, y), min(x,y)) to (480,480). noticed that it remains
# of the shape with 3 chanels (480,480,3)
return img
"""
random_ratio_resize takes as an input image path, prob ,the probability of rotation if the random value is bigger than
prob do nothing and delta set as default to 0.1. As this function is used in code after central_crop which squares the image
ration will be 1. So we take then random value between segmet form [1-dleta, 1+ dleta]
"""
def random_ratio_resize(img, prob=0.3, delta=0.1):
if np.random.rand() >= prob: # bigger do nothing
return img
ratio = img.shape[0] / img.shape[1] # in our case 1
ratio = np.random.uniform(max(ratio - delta, 0.01), ratio + delta) # random value form [1-delta, 1+delta]. if delta
# change we prevent from left end of segment being non positve
if ratio * img.shape[1] <= img.shape[1]:
size = (int(img.shape[1] * ratio), img.shape[1]) # e.g shape of (474, 480) after this operation
else:
size = (img.shape[0], int(img.shape[0] / ratio)) #e.g shape of (480, 472) after this operation
dh = img.shape[0] - size[1] # could be zero or (480 - less number than 480)
top, bot = dh // 2, dh - dh // 2 # could be zeros ot the sum up to dh e.g dh = 9 then top = 4, bot = 5
dw = img.shape[1] - size[0] # similar to above
left, right = dw // 2, dw - dw // 2
if size[0] > 480 or size[1] > 480: #should not happen casue one of the coordinates should always be 480
print(img.shape, size, ratio)
img = cv2.resize(img, size) # resize image
img = cv2.copyMakeBorder(img, top, bot, left, right, cv2.BORDER_CONSTANT,
(0, 0, 0)) # this function makes image back to shape of (480, 480, 3) however it add black famre
# around the image
if img.shape[0] != 480 or img.shape[1] != 480: # should have happned since the ouput shape after copyMakeBorder supposed to be
# (480,480, 3)
raise ValueError(img.shape, size) # in case of error raise exception
return img
_augmentation_transform = ImageDataGenerator(
featurewise_center=False, # Boolean. Set input mean to 0 over the dataset, feature-wise.
featurewise_std_normalization=False, # Boolean. Divide inputs by std of the dataset, feature-wise.
rotation_range=10, # Int. Degree range for random rotations.
width_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total width, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
# With width_shift_range=2 possible values are integers [-1, 0, +1], same as
# with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
height_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total height, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-height_shift_range, +height_shift_range)
#With height_shift_range=2 possible values are integers [-1, 0, +1], same as
# with height_shift_range=[-1, 0, +1], while with height_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
horizontal_flip=True, # Boolean. Randomly flip inputs horizontally.
brightness_range=(0.9, 1.1), # Tuple or list of two floats. Range for picking a brightness shift value from.
zoom_range=(0.85, 1.15), # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
fill_mode='constant', # One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside
# the boundaries of the input are filled according to the given mode:
#'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
#'nearest': aaaaaaaa|abcd|dddddddd
#'reflect': abcddcba|abcd|dcbaabcd
#'wrap': abcdabcd|abcd|abcdabcd
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
)
"""
apply_augmentation takes as input img which is an array of shape (x, y, 3)
"""
def apply_augmentation(img):
img = random_ratio_resize(img) #defina above
img = _augmentation_transform.random_transform(img) # Applies a random transformation to an image.
return img
"""
_process_csv_file take as an input a file in our case these are train_split.txt and test_split.txt (names may differ)
"""
def _process_csv_file(file):
with open(file, 'r') as fr: # open file
files = fr.readlines() # read lines
return files
class BalanceCovidDataset(keras.utils.Sequence):
'Generates data for Keras'
def __init__(
self,
data_dir,
csv_file,
is_training=True,
batch_size=8,
input_shape=(224, 224), # here default shape is (224, 224) becasue these values were for former models,
# In another file we set it for (480, 480)
n_classes=3, # normal, pneunomia, COVID-19
num_channels=3, # depth of image. Although the images are grey we keep this chanel (with possibility to delete this)
mapping={
'normal': 0,
'pneumonia': 1,
'COVID-19': 2
},
shuffle=True,
augmentation=apply_augmentation,
covid_percent=0.3,
class_weights=[1., 1., 6.], # default weight of classes. The less numbered class gets is more worthy than the others
# in this case COVID_19
top_percent=0.08 # here set to 0.08, though in above functions was set to 0.15
):
'Initialization' # seeting values in constructor
self.datadir = data_dir
self.dataset = _process_csv_file(csv_file)
self.is_training = is_training
self.batch_size = batch_size
self.N = len(self.dataset)
self.input_shape = input_shape
self.n_classes = n_classes
self.num_channels = | crop_top | identifier_name | |
data_tensorboard.py | As this function is used in code after central_crop which squares the image
ration will be 1. So we take then random value between segmet form [1-dleta, 1+ dleta]
"""
def random_ratio_resize(img, prob=0.3, delta=0.1):
if np.random.rand() >= prob: # bigger do nothing
return img
ratio = img.shape[0] / img.shape[1] # in our case 1
ratio = np.random.uniform(max(ratio - delta, 0.01), ratio + delta) # random value form [1-delta, 1+delta]. if delta
# change we prevent from left end of segment being non positve
if ratio * img.shape[1] <= img.shape[1]:
size = (int(img.shape[1] * ratio), img.shape[1]) # e.g shape of (474, 480) after this operation
else:
size = (img.shape[0], int(img.shape[0] / ratio)) #e.g shape of (480, 472) after this operation
dh = img.shape[0] - size[1] # could be zero or (480 - less number than 480)
top, bot = dh // 2, dh - dh // 2 # could be zeros ot the sum up to dh e.g dh = 9 then top = 4, bot = 5
dw = img.shape[1] - size[0] # similar to above
left, right = dw // 2, dw - dw // 2
if size[0] > 480 or size[1] > 480: #should not happen casue one of the coordinates should always be 480
print(img.shape, size, ratio)
img = cv2.resize(img, size) # resize image
img = cv2.copyMakeBorder(img, top, bot, left, right, cv2.BORDER_CONSTANT,
(0, 0, 0)) # this function makes image back to shape of (480, 480, 3) however it add black famre
# around the image
if img.shape[0] != 480 or img.shape[1] != 480: # should have happned since the ouput shape after copyMakeBorder supposed to be
# (480,480, 3)
raise ValueError(img.shape, size) # in case of error raise exception
return img
_augmentation_transform = ImageDataGenerator(
featurewise_center=False, # Boolean. Set input mean to 0 over the dataset, feature-wise.
featurewise_std_normalization=False, # Boolean. Divide inputs by std of the dataset, feature-wise.
rotation_range=10, # Int. Degree range for random rotations.
width_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total width, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
# With width_shift_range=2 possible values are integers [-1, 0, +1], same as
# with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
height_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total height, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-height_shift_range, +height_shift_range)
#With height_shift_range=2 possible values are integers [-1, 0, +1], same as
# with height_shift_range=[-1, 0, +1], while with height_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
horizontal_flip=True, # Boolean. Randomly flip inputs horizontally.
brightness_range=(0.9, 1.1), # Tuple or list of two floats. Range for picking a brightness shift value from.
zoom_range=(0.85, 1.15), # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
fill_mode='constant', # One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside
# the boundaries of the input are filled according to the given mode:
#'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
#'nearest': aaaaaaaa|abcd|dddddddd
#'reflect': abcddcba|abcd|dcbaabcd
#'wrap': abcdabcd|abcd|abcdabcd
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
)
"""
apply_augmentation takes as input img which is an array of shape (x, y, 3)
"""
def apply_augmentation(img):
|
"""
_process_csv_file take as an input a file in our case these are train_split.txt and test_split.txt (names may differ)
"""
def _process_csv_file(file):
with open(file, 'r') as fr: # open file
files = fr.readlines() # read lines
return files
class BalanceCovidDataset(keras.utils.Sequence):
'Generates data for Keras'
def __init__(
self,
data_dir,
csv_file,
is_training=True,
batch_size=8,
input_shape=(224, 224), # here default shape is (224, 224) becasue these values were for former models,
# In another file we set it for (480, 480)
n_classes=3, # normal, pneunomia, COVID-19
num_channels=3, # depth of image. Although the images are grey we keep this chanel (with possibility to delete this)
mapping={
'normal': 0,
'pneumonia': 1,
'COVID-19': 2
},
shuffle=True,
augmentation=apply_augmentation,
covid_percent=0.3,
class_weights=[1., 1., 6.], # default weight of classes. The less numbered class gets is more worthy than the others
# in this case COVID_19
top_percent=0.08 # here set to 0.08, though in above functions was set to 0.15
):
'Initialization' # seeting values in constructor
self.datadir = data_dir
self.dataset = _process_csv_file(csv_file)
self.is_training = is_training
self.batch_size = batch_size
self.N = len(self.dataset)
self.input_shape = input_shape
self.n_classes = n_classes
self.num_channels = num_channels
self.mapping = mapping
self.shuffle = True
self.covid_percent = covid_percent
self.class_weights = class_weights
self.n = 0
self.augmentation = augmentation
self.top_percent = top_percent
datasets = {'normal': [], 'pneumonia': [], 'COVID-19': []} #dictionary for classes
for l in self.dataset: # iterate for dataset
datasets[l.split()[2]].append(l) # the second argument describes the name of the class e.g l.split()[2] - normal
# append the whole line to dictionary.
self.datasets = [
datasets['normal'] + datasets['pneumonia'],
datasets['COVID-19'],
] # set dataset to list of list where the first one is the conctaenation of lists 'normal' and 'pneumonia', and the
# second one is COVID_19
print(len(self.datasets[0]), len(self.datasets[1]))
self.on_epoch_end() # is triggered once at the very beginning as well as at the end of each epoch.
# If the shuffle parameter is set to True, we will get a new order of exploration at
# each pass (or just keep a linear exploration scheme otherwise).
def __next__(self): # mothod that need to be implement, keras generator
# Get one batch of data
batch_x, batch_y, weights = self.__getitem__(self.n) # if the numer of batch is less than number of batches we call the
# __getitem__ methdd
# Batch index
self.n += 1
# If we have processed the entire dataset then
if self.n >= self.__len__(): # it means that we fed all of our training exmaples in this epoch
self.on_epoch_end() # schuffle traing set
self.n = 0 | img = random_ratio_resize(img) #defina above
img = _augmentation_transform.random_transform(img) # Applies a random transformation to an image.
return img | identifier_body |
data_tensorboard.py | As this function is used in code after central_crop which squares the image
ration will be 1. So we take then random value between segmet form [1-dleta, 1+ dleta]
"""
def random_ratio_resize(img, prob=0.3, delta=0.1):
if np.random.rand() >= prob: # bigger do nothing
return img
ratio = img.shape[0] / img.shape[1] # in our case 1
ratio = np.random.uniform(max(ratio - delta, 0.01), ratio + delta) # random value form [1-delta, 1+delta]. if delta
# change we prevent from left end of segment being non positve
if ratio * img.shape[1] <= img.shape[1]:
size = (int(img.shape[1] * ratio), img.shape[1]) # e.g shape of (474, 480) after this operation
else:
size = (img.shape[0], int(img.shape[0] / ratio)) #e.g shape of (480, 472) after this operation
dh = img.shape[0] - size[1] # could be zero or (480 - less number than 480)
top, bot = dh // 2, dh - dh // 2 # could be zeros ot the sum up to dh e.g dh = 9 then top = 4, bot = 5
dw = img.shape[1] - size[0] # similar to above
left, right = dw // 2, dw - dw // 2
if size[0] > 480 or size[1] > 480: #should not happen casue one of the coordinates should always be 480
print(img.shape, size, ratio)
img = cv2.resize(img, size) # resize image
img = cv2.copyMakeBorder(img, top, bot, left, right, cv2.BORDER_CONSTANT,
(0, 0, 0)) # this function makes image back to shape of (480, 480, 3) however it add black famre
# around the image
if img.shape[0] != 480 or img.shape[1] != 480: # should have happned since the ouput shape after copyMakeBorder supposed to be
# (480,480, 3)
raise ValueError(img.shape, size) # in case of error raise exception
return img
_augmentation_transform = ImageDataGenerator(
featurewise_center=False, # Boolean. Set input mean to 0 over the dataset, feature-wise.
featurewise_std_normalization=False, # Boolean. Divide inputs by std of the dataset, feature-wise.
rotation_range=10, # Int. Degree range for random rotations.
width_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total width, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-width_shift_range, +width_shift_range)
# With width_shift_range=2 possible values are integers [-1, 0, +1], same as
# with width_shift_range=[-1, 0, +1], while with width_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
height_shift_range=0.1, # Float, 1-D array-like or int
# float: fraction of total height, if < 1, or pixels if >= 1.
# 1-D array-like: random elements from the array.
# int: integer number of pixels from interval (-height_shift_range, +height_shift_range)
#With height_shift_range=2 possible values are integers [-1, 0, +1], same as
# with height_shift_range=[-1, 0, +1], while with height_shift_range=1.0 possible values are
# floats in the interval [-1.0, +1.0).
horizontal_flip=True, # Boolean. Randomly flip inputs horizontally.
brightness_range=(0.9, 1.1), # Tuple or list of two floats. Range for picking a brightness shift value from.
zoom_range=(0.85, 1.15), # Float or [lower, upper]. Range for random zoom. If a float, [lower, upper] = [1-zoom_range, 1+zoom_range].
fill_mode='constant', # One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside
# the boundaries of the input are filled according to the given mode:
#'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
#'nearest': aaaaaaaa|abcd|dddddddd
#'reflect': abcddcba|abcd|dcbaabcd
#'wrap': abcdabcd|abcd|abcdabcd
cval=0., # Float or Int. Value used for points outside the boundaries when fill_mode = "constant".
)
"""
apply_augmentation takes as input img which is an array of shape (x, y, 3)
"""
def apply_augmentation(img):
img = random_ratio_resize(img) #defina above
img = _augmentation_transform.random_transform(img) # Applies a random transformation to an image.
return img
"""
_process_csv_file take as an input a file in our case these are train_split.txt and test_split.txt (names may differ)
"""
def _process_csv_file(file):
with open(file, 'r') as fr: # open file
files = fr.readlines() # read lines
return files
class BalanceCovidDataset(keras.utils.Sequence):
'Generates data for Keras'
def __init__(
self,
data_dir,
csv_file,
is_training=True,
batch_size=8,
input_shape=(224, 224), # here default shape is (224, 224) becasue these values were for former models,
# In another file we set it for (480, 480)
n_classes=3, # normal, pneunomia, COVID-19
num_channels=3, # depth of image. Although the images are grey we keep this chanel (with possibility to delete this) | },
shuffle=True,
augmentation=apply_augmentation,
covid_percent=0.3,
class_weights=[1., 1., 6.], # default weight of classes. The less numbered class gets is more worthy than the others
# in this case COVID_19
top_percent=0.08 # here set to 0.08, though in above functions was set to 0.15
):
'Initialization' # seeting values in constructor
self.datadir = data_dir
self.dataset = _process_csv_file(csv_file)
self.is_training = is_training
self.batch_size = batch_size
self.N = len(self.dataset)
self.input_shape = input_shape
self.n_classes = n_classes
self.num_channels = num_channels
self.mapping = mapping
self.shuffle = True
self.covid_percent = covid_percent
self.class_weights = class_weights
self.n = 0
self.augmentation = augmentation
self.top_percent = top_percent
datasets = {'normal': [], 'pneumonia': [], 'COVID-19': []} #dictionary for classes
for l in self.dataset: # iterate for dataset
datasets[l.split()[2]].append(l) # the second argument describes the name of the class e.g l.split()[2] - normal
# append the whole line to dictionary.
self.datasets = [
datasets['normal'] + datasets['pneumonia'],
datasets['COVID-19'],
] # set dataset to list of list where the first one is the conctaenation of lists 'normal' and 'pneumonia', and the
# second one is COVID_19
print(len(self.datasets[0]), len(self.datasets[1]))
self.on_epoch_end() # is triggered once at the very beginning as well as at the end of each epoch.
# If the shuffle parameter is set to True, we will get a new order of exploration at
# each pass (or just keep a linear exploration scheme otherwise).
def __next__(self): # mothod that need to be implement, keras generator
# Get one batch of data
batch_x, batch_y, weights = self.__getitem__(self.n) # if the numer of batch is less than number of batches we call the
# __getitem__ methdd
# Batch index
self.n += 1
# If we have processed the entire dataset then
if self.n >= self.__len__(): # it means that we fed all of our training exmaples in this epoch
self.on_epoch_end() # schuffle traing set
self.n = 0 # | mapping={
'normal': 0,
'pneumonia': 1,
'COVID-19': 2 | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.