text
stringlengths 3
1.05M
|
|---|
/**
* Copyright IBM Corp. 2019, 2020
*
* This source code is licensed under the Apache-2.0 license found in the
* LICENSE file in the root directory of this source tree.
*
* Code generated by @carbon/icon-build-helpers. DO NOT EDIT.
*/
'use strict';
var Icon = require('../Icon-399ca71f.js');
var React = require('react');
require('@carbon/icon-helpers');
require('prop-types');
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
var React__default = /*#__PURE__*/_interopDefaultLegacy(React);
var _path, _path2;
var SnowHeavy32 = /*#__PURE__*/React__default['default'].forwardRef(function SnowHeavy32(_ref, ref) {
var children = _ref.children,
rest = Icon._objectWithoutProperties(_ref, ["children"]);
return /*#__PURE__*/React__default['default'].createElement(Icon.Icon, Icon._extends({
width: 32,
height: 32,
viewBox: "0 0 32 32",
xmlns: "http://www.w3.org/2000/svg",
fill: "currentColor",
ref: ref
}, rest), _path || (_path = /*#__PURE__*/React__default['default'].createElement("path", {
d: "M4 26H6V28H4zM6 28H8V30H6zM8 30H10V32H8zM8 26H10V28H8zM4 30H6V32H4zM8 18H10V20H8zM10 20H12V22H10zM12 22H14V24H12zM12 18H14V20H12zM8 22H10V24H8zM18 18H20V20H18zM20 20H22V22H20zM22 22H24V24H22zM22 18H24V20H22zM18 22H20V24H18zM14 26H16V28H14zM16 28H18V30H16zM18 30H20V32H18zM18 26H20V28H18zM14 30H16V32H14z"
})), _path2 || (_path2 = /*#__PURE__*/React__default['default'].createElement("path", {
d: "M24.8008,9.1362a8.9943,8.9943,0,0,0-17.6006,0A6.4957,6.4957,0,0,0,6,21.4985v-2.26A4.4943,4.4943,0,0,1,8.144,11.019l.8155-.0639.0991-.812a6.9938,6.9938,0,0,1,13.8838,0l.0986.812.8154.0639A4.4944,4.4944,0,0,1,26,19.2383v2.26A6.4958,6.4958,0,0,0,24.8008,9.1362Z"
})), children);
});
module.exports = SnowHeavy32;
|
# Copyright (c) 2012-2016 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Planbcoin base58 encoding and decoding.
Based on https://planbcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Planbcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/planbcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
import "./styles.css";
import GetAppIcon from "@material-ui/icons/GetApp";
import MoreVertIcon from "@material-ui/icons/MoreVert";
function Header() {
return (
<div className="header-container">
<div className="header-content-container">
<div className="logo-container">
<div className="logo-icon">
<GetAppIcon fontSize="large" />
</div>
<div className="logo-text">
<h2 className="first">Baixa</h2>
<h2 className="second">Tube</h2>
</div>
</div>
<a
className="more"
target="_blank"
href="https://www.google.com.br/"
rel="noopener noreferrer"
>
<MoreVertIcon fontSize="large" className="more" />
</a>
</div>
</div>
);
}
export default Header;
|
import React from 'react';
import PropTypes from 'prop-types';
import ComponentFactory from './ComponentFactory';
const Subscript = ({ nodeData, ...rest }) => (
<sub>
{nodeData.children.map((child, index) => (
<ComponentFactory {...rest} key={index} nodeData={child} />
))}
</sub>
);
Subscript.propTypes = {
nodeData: PropTypes.shape({
children: PropTypes.arrayOf(PropTypes.object).isRequired,
}).isRequired,
};
export default Subscript;
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Restrained Amber Minimization of a structure."""
import io
import time
from typing import Collection, Optional, Sequence
from absl import logging
import ml_collections
import numpy as np
from simtk import openmm
from simtk import unit
from simtk.openmm import app as openmm_app
from simtk.openmm.app.internal.pdbstructure import PdbStructure
from alphafold.common import protein
from alphafold.common import residue_constants
from alphafold.model import folding
from alphafold.relax import cleanup
from alphafold.relax import utils
ENERGY = unit.kilocalories_per_mole
LENGTH = unit.angstroms
def will_restrain(atom: openmm_app.Atom, rset: str) -> bool:
"""Returns True if the atom will be restrained by the given restraint set."""
if rset == "non_hydrogen":
return atom.element.name != "hydrogen"
elif rset == "c_alpha":
return atom.name == "CA"
def _add_restraints(
system: openmm.System,
reference_pdb: openmm_app.PDBFile,
stiffness: unit.Unit,
rset: str,
exclude_residues: Sequence[int]):
"""Adds a harmonic potential that restrains the end-to-end distance."""
assert rset in ["non_hydrogen", "c_alpha"]
force = openmm.CustomExternalForce(
"0.5 * k * ((x-x0)^2 + (y-y0)^2 + (z-z0)^2)")
force.addGlobalParameter("k", stiffness)
for p in ["x0", "y0", "z0"]:
force.addPerParticleParameter(p)
for i, atom in enumerate(reference_pdb.topology.atoms()):
if atom.residue.index in exclude_residues:
continue
if will_restrain(atom, rset):
force.addParticle(i, reference_pdb.positions[i])
logging.info("Restraining %d / %d particles.",
force.getNumParticles(), system.getNumParticles())
system.addForce(force)
def _openmm_minimize(
pdb_str: str,
max_iterations: int,
tolerance: unit.Unit,
stiffness: unit.Unit,
restraint_set: str,
exclude_residues: Sequence[int]):
"""Minimize energy via openmm."""
pdb_file = io.StringIO(pdb_str)
pdb = openmm_app.PDBFile(pdb_file)
force_field = openmm_app.ForceField("amber99sb.xml")
constraints = openmm_app.HBonds
system = force_field.createSystem(
pdb.topology, constraints=constraints)
if stiffness > 0 * ENERGY / (LENGTH**2):
_add_restraints(system, pdb, stiffness, restraint_set, exclude_residues)
integrator = openmm.LangevinIntegrator(0, 0.01, 0.0)
platform = openmm.Platform.getPlatformByName("CPU")
simulation = openmm_app.Simulation(
pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
ret = {}
state = simulation.context.getState(getEnergy=True, getPositions=True)
ret["einit"] = state.getPotentialEnergy().value_in_unit(ENERGY)
ret["posinit"] = state.getPositions(asNumpy=True).value_in_unit(LENGTH)
simulation.minimizeEnergy(maxIterations=max_iterations,
tolerance=tolerance)
state = simulation.context.getState(getEnergy=True, getPositions=True)
ret["efinal"] = state.getPotentialEnergy().value_in_unit(ENERGY)
ret["pos"] = state.getPositions(asNumpy=True).value_in_unit(LENGTH)
ret["min_pdb"] = _get_pdb_string(simulation.topology, state.getPositions())
return ret
def _get_pdb_string(topology: openmm_app.Topology, positions: unit.Quantity):
"""Returns a pdb string provided OpenMM topology and positions."""
with io.StringIO() as f:
openmm_app.PDBFile.writeFile(topology, positions, f)
return f.getvalue()
def _check_cleaned_atoms(pdb_cleaned_string: str, pdb_ref_string: str):
"""Checks that no atom positions have been altered by cleaning."""
cleaned = openmm_app.PDBFile(io.StringIO(pdb_cleaned_string))
reference = openmm_app.PDBFile(io.StringIO(pdb_ref_string))
cl_xyz = np.array(cleaned.getPositions().value_in_unit(LENGTH))
ref_xyz = np.array(reference.getPositions().value_in_unit(LENGTH))
for ref_res, cl_res in zip(reference.topology.residues(),
cleaned.topology.residues()):
assert ref_res.name == cl_res.name
for rat in ref_res.atoms():
for cat in cl_res.atoms():
if cat.name == rat.name:
if not np.array_equal(cl_xyz[cat.index], ref_xyz[rat.index]):
raise ValueError(f"Coordinates of cleaned atom {cat} do not match "
f"coordinates of reference atom {rat}.")
def _check_residues_are_well_defined(prot: protein.Protein):
"""Checks that all residues contain non-empty atom sets."""
if (prot.atom_mask.sum(axis=-1) == 0).any():
raise ValueError("Amber minimization can only be performed on proteins with"
" well-defined residues. This protein contains at least"
" one residue with no atoms.")
def _check_atom_mask_is_ideal(prot):
"""Sanity-check the atom mask is ideal, up to a possible OXT."""
atom_mask = prot.atom_mask
ideal_atom_mask = protein.ideal_atom_mask(prot)
utils.assert_equal_nonterminal_atom_types(atom_mask, ideal_atom_mask)
def clean_protein(
prot: protein.Protein,
checks: bool = True):
"""Adds missing atoms to Protein instance.
Args:
prot: A `protein.Protein` instance.
checks: A `bool` specifying whether to add additional checks to the cleaning
process.
Returns:
pdb_string: A string of the cleaned protein.
"""
_check_atom_mask_is_ideal(prot)
# Clean pdb.
prot_pdb_string = protein.to_pdb(prot)
pdb_file = io.StringIO(prot_pdb_string)
alterations_info = {}
fixed_pdb = cleanup.fix_pdb(pdb_file, alterations_info)
fixed_pdb_file = io.StringIO(fixed_pdb)
pdb_structure = PdbStructure(fixed_pdb_file)
cleanup.clean_structure(pdb_structure, alterations_info)
logging.info("alterations info: %s", alterations_info)
# Write pdb file of cleaned structure.
as_file = openmm_app.PDBFile(pdb_structure)
pdb_string = _get_pdb_string(as_file.getTopology(), as_file.getPositions())
if checks:
_check_cleaned_atoms(pdb_string, prot_pdb_string)
return pdb_string
def make_atom14_positions(prot):
"""Constructs denser atom positions (14 dimensions instead of 37)."""
restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37
restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14
restype_atom14_mask = []
for rt in residue_constants.restypes:
atom_names = residue_constants.restype_name_to_atom14_names[
residue_constants.restype_1to3[rt]]
restype_atom14_to_atom37.append([
(residue_constants.atom_order[name] if name else 0)
for name in atom_names
])
atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
restype_atom37_to_atom14.append([
(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0)
for name in residue_constants.atom_types
])
restype_atom14_mask.append([(1. if name else 0.) for name in atom_names])
# Add dummy mapping for restype 'UNK'.
restype_atom14_to_atom37.append([0] * 14)
restype_atom37_to_atom14.append([0] * 37)
restype_atom14_mask.append([0.] * 14)
restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32)
restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32)
restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32)
# Create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein.
residx_atom14_to_atom37 = restype_atom14_to_atom37[prot["aatype"]]
residx_atom14_mask = restype_atom14_mask[prot["aatype"]]
# Create a mask for known ground truth positions.
residx_atom14_gt_mask = residx_atom14_mask * np.take_along_axis(
prot["all_atom_mask"], residx_atom14_to_atom37, axis=1).astype(np.float32)
# Gather the ground truth positions.
residx_atom14_gt_positions = residx_atom14_gt_mask[:, :, None] * (
np.take_along_axis(prot["all_atom_positions"],
residx_atom14_to_atom37[..., None],
axis=1))
prot["atom14_atom_exists"] = residx_atom14_mask
prot["atom14_gt_exists"] = residx_atom14_gt_mask
prot["atom14_gt_positions"] = residx_atom14_gt_positions
prot["residx_atom14_to_atom37"] = residx_atom14_to_atom37
# Create the gather indices for mapping back.
residx_atom37_to_atom14 = restype_atom37_to_atom14[prot["aatype"]]
prot["residx_atom37_to_atom14"] = residx_atom37_to_atom14
# Create the corresponding mask.
restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
for restype, restype_letter in enumerate(residue_constants.restypes):
restype_name = residue_constants.restype_1to3[restype_letter]
atom_names = residue_constants.residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = residue_constants.atom_order[atom_name]
restype_atom37_mask[restype, atom_type] = 1
residx_atom37_mask = restype_atom37_mask[prot["aatype"]]
prot["atom37_atom_exists"] = residx_atom37_mask
# As the atom naming is ambiguous for 7 of the 20 amino acids, provide
# alternative ground truth coordinates where the naming is swapped
restype_3 = [
residue_constants.restype_1to3[res] for res in residue_constants.restypes
]
restype_3 += ["UNK"]
# Matrices for renaming ambiguous atoms.
all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3}
for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
correspondences = np.arange(14)
for source_atom_swap, target_atom_swap in swap.items():
source_index = residue_constants.restype_name_to_atom14_names[
resname].index(source_atom_swap)
target_index = residue_constants.restype_name_to_atom14_names[
resname].index(target_atom_swap)
correspondences[source_index] = target_index
correspondences[target_index] = source_index
renaming_matrix = np.zeros((14, 14), dtype=np.float32)
for index, correspondence in enumerate(correspondences):
renaming_matrix[index, correspondence] = 1.
all_matrices[resname] = renaming_matrix.astype(np.float32)
renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3])
# Pick the transformation matrices for the given residue sequence
# shape (num_res, 14, 14).
renaming_transform = renaming_matrices[prot["aatype"]]
# Apply it to the ground truth positions. shape (num_res, 14, 3).
alternative_gt_positions = np.einsum("rac,rab->rbc",
residx_atom14_gt_positions,
renaming_transform)
prot["atom14_alt_gt_positions"] = alternative_gt_positions
# Create the mask for the alternative ground truth (differs from the
# ground truth mask, if only one of the atoms in an ambiguous pair has a
# ground truth position).
alternative_gt_mask = np.einsum("ra,rab->rb",
residx_atom14_gt_mask,
renaming_transform)
prot["atom14_alt_gt_exists"] = alternative_gt_mask
# Create an ambiguous atoms mask. shape: (21, 14).
restype_atom14_is_ambiguous = np.zeros((21, 14), dtype=np.float32)
for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
for atom_name1, atom_name2 in swap.items():
restype = residue_constants.restype_order[
residue_constants.restype_3to1[resname]]
atom_idx1 = residue_constants.restype_name_to_atom14_names[resname].index(
atom_name1)
atom_idx2 = residue_constants.restype_name_to_atom14_names[resname].index(
atom_name2)
restype_atom14_is_ambiguous[restype, atom_idx1] = 1
restype_atom14_is_ambiguous[restype, atom_idx2] = 1
# From this create an ambiguous_mask for the given sequence.
prot["atom14_atom_is_ambiguous"] = (
restype_atom14_is_ambiguous[prot["aatype"]])
return prot
def find_violations(prot_np: protein.Protein):
"""Analyzes a protein and returns structural violation information.
Args:
prot_np: A protein.
Returns:
violations: A `dict` of structure components with structural violations.
violation_metrics: A `dict` of violation metrics.
"""
batch = {
"aatype": prot_np.aatype,
"all_atom_positions": prot_np.atom_positions.astype(np.float32),
"all_atom_mask": prot_np.atom_mask.astype(np.float32),
"residue_index": prot_np.residue_index,
}
batch["seq_mask"] = np.ones_like(batch["aatype"], np.float32)
batch = make_atom14_positions(batch)
violations = folding.find_structural_violations(
batch=batch,
atom14_pred_positions=batch["atom14_gt_positions"],
config=ml_collections.ConfigDict(
{"violation_tolerance_factor": 12, # Taken from model config.
"clash_overlap_tolerance": 1.5, # Taken from model config.
}))
violation_metrics = folding.compute_violation_metrics(
batch=batch,
atom14_pred_positions=batch["atom14_gt_positions"],
violations=violations,
)
return violations, violation_metrics
def get_violation_metrics(prot: protein.Protein):
"""Computes violation and alignment metrics."""
structural_violations, struct_metrics = find_violations(prot)
violation_idx = np.flatnonzero(
structural_violations["total_per_residue_violations_mask"])
struct_metrics["residue_violations"] = violation_idx
struct_metrics["num_residue_violations"] = len(violation_idx)
struct_metrics["structural_violations"] = structural_violations
return struct_metrics
def _run_one_iteration(
*,
pdb_string: str,
max_iterations: int,
tolerance: float,
stiffness: float,
restraint_set: str,
max_attempts: int,
exclude_residues: Optional[Collection[int]] = None):
"""Runs the minimization pipeline.
Args:
pdb_string: A pdb string.
max_iterations: An `int` specifying the maximum number of L-BFGS iterations.
A value of 0 specifies no limit.
tolerance: kcal/mol, the energy tolerance of L-BFGS.
stiffness: kcal/mol A**2, spring constant of heavy atom restraining
potential.
restraint_set: The set of atoms to restrain.
max_attempts: The maximum number of minimization attempts.
exclude_residues: An optional list of zero-indexed residues to exclude from
restraints.
Returns:
A `dict` of minimization info.
"""
exclude_residues = exclude_residues or []
# Assign physical dimensions.
tolerance = tolerance * ENERGY
stiffness = stiffness * ENERGY / (LENGTH**2)
start = time.time()
minimized = False
attempts = 0
while not minimized and attempts < max_attempts:
attempts += 1
try:
logging.info("Minimizing protein, attempt %d of %d.",
attempts, max_attempts)
ret = _openmm_minimize(
pdb_string, max_iterations=max_iterations,
tolerance=tolerance, stiffness=stiffness,
restraint_set=restraint_set,
exclude_residues=exclude_residues)
minimized = True
except Exception as e: # pylint: disable=broad-except
logging.info(e)
if not minimized:
raise ValueError(f"Minimization failed after {max_attempts} attempts.")
ret["opt_time"] = time.time() - start
ret["min_attempts"] = attempts
return ret
def run_pipeline(
prot: protein.Protein,
stiffness: float,
max_outer_iterations: int = 1,
place_hydrogens_every_iteration: bool = True,
max_iterations: int = 0,
tolerance: float = 2.39,
restraint_set: str = "non_hydrogen",
max_attempts: int = 100,
checks: bool = True,
exclude_residues: Optional[Sequence[int]] = None):
"""Run iterative amber relax.
Successive relax iterations are performed until all violations have been
resolved. Each iteration involves a restrained Amber minimization, with
restraint exclusions determined by violation-participating residues.
Args:
prot: A protein to be relaxed.
stiffness: kcal/mol A**2, the restraint stiffness.
max_outer_iterations: The maximum number of iterative minimization.
place_hydrogens_every_iteration: Whether hydrogens are re-initialized
prior to every minimization.
max_iterations: An `int` specifying the maximum number of L-BFGS steps
per relax iteration. A value of 0 specifies no limit.
tolerance: kcal/mol, the energy tolerance of L-BFGS.
The default value is the OpenMM default.
restraint_set: The set of atoms to restrain.
max_attempts: The maximum number of minimization attempts per iteration.
checks: Whether to perform cleaning checks.
exclude_residues: An optional list of zero-indexed residues to exclude from
restraints.
Returns:
out: A dictionary of output values.
"""
# `protein.to_pdb` will strip any poorly-defined residues so we need to
# perform this check before `clean_protein`.
_check_residues_are_well_defined(prot)
pdb_string = clean_protein(prot, checks=checks)
exclude_residues = exclude_residues or []
exclude_residues = set(exclude_residues)
violations = np.inf
iteration = 0
while violations > 0 and iteration < max_outer_iterations:
ret = _run_one_iteration(
pdb_string=pdb_string,
exclude_residues=exclude_residues,
max_iterations=max_iterations,
tolerance=tolerance,
stiffness=stiffness,
restraint_set=restraint_set,
max_attempts=max_attempts)
prot = protein.from_pdb_string(ret["min_pdb"])
if place_hydrogens_every_iteration:
pdb_string = clean_protein(prot, checks=True)
else:
pdb_string = ret["min_pdb"]
ret.update(get_violation_metrics(prot))
ret.update({
"num_exclusions": len(exclude_residues),
"iteration": iteration,
})
violations = ret["violations_per_residue"]
exclude_residues = exclude_residues.union(ret["residue_violations"])
logging.info("Iteration completed: Einit %.2f Efinal %.2f Time %.2f s "
"num residue violations %d num residue exclusions %d ",
ret["einit"], ret["efinal"], ret["opt_time"],
ret["num_residue_violations"], ret["num_exclusions"])
iteration += 1
return ret
def get_initial_energies(pdb_strs: Sequence[str],
stiffness: float = 0.0,
restraint_set: str = "non_hydrogen",
exclude_residues: Optional[Sequence[int]] = None):
"""Returns initial potential energies for a sequence of PDBs.
Assumes the input PDBs are ready for minimization, and all have the same
topology.
Allows time to be saved by not pdbfixing / rebuilding the system.
Args:
pdb_strs: List of PDB strings.
stiffness: kcal/mol A**2, spring constant of heavy atom restraining
potential.
restraint_set: Which atom types to restrain.
exclude_residues: An optional list of zero-indexed residues to exclude from
restraints.
Returns:
A list of initial energies in the same order as pdb_strs.
"""
exclude_residues = exclude_residues or []
openmm_pdbs = [openmm_app.PDBFile(PdbStructure(io.StringIO(p)))
for p in pdb_strs]
force_field = openmm_app.ForceField("amber99sb.xml")
system = force_field.createSystem(openmm_pdbs[0].topology,
constraints=openmm_app.HBonds)
stiffness = stiffness * ENERGY / (LENGTH**2)
if stiffness > 0 * ENERGY / (LENGTH**2):
_add_restraints(system, openmm_pdbs[0], stiffness, restraint_set,
exclude_residues)
simulation = openmm_app.Simulation(openmm_pdbs[0].topology,
system,
openmm.LangevinIntegrator(0, 0.01, 0.0),
openmm.Platform.getPlatformByName("CPU"))
energies = []
for pdb in openmm_pdbs:
try:
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(getEnergy=True)
energies.append(state.getPotentialEnergy().value_in_unit(ENERGY))
except Exception as e: # pylint: disable=broad-except
logging.error("Error getting initial energy, returning large value %s", e)
energies.append(unit.Quantity(1e20, ENERGY))
return energies
|
import React from "react";
export default function SurveyQuestionnaire() {
return (
<div className="newUserForm">
<h1>Survey</h1>
<p>
Take a quick survey to find your perfect match! These
questions are answered by the numbers 1 through 5. If you
really, really disagree with the statement, your answer would
be 1. If you find a statement to hit extremely close to home,
put in a 5.
</p>
<form>
<div class="form-group">
<input
type="text"
class="form-control"
placeholder="Name (Required)"
/>
</div>
<div class="form-group">
<input
id="photo"
type="text"
class="form-control"
placeholder="Link to Profile Photo (Required)"
/>
</div>
<div class="form-group">
<input
type="text"
class="form-control"
placeholder="Location"
/>
</div>
<div class="form-group">
<input
type="text"
class="form-control"
placeholder="Gender"
/>
</div>
<div class="form-group">
<label for="question1">I like to do janitorial work.</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question2">
I have some bird-like features (or so I've been told).
</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question3">
I think very highly of myself.
</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question4">I enjoy martial arts.</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question5">I read as a hobby.</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question6">
I am God's gift to the opposite sex.
</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question7">
I don't so well with the opposite sex.
</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question8">I am good with money.</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question9">I work out at the gym often.</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
<div class="form-group">
<label for="question10">
I consider myself aggressive.
</label>
<select class="form-control">
<option value="empty"></option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</div>
</form>
<div class="text-center">
<button type="submit" class="btn">
Submit
</button>
</div>
</div>
);
}
|
// 7.2.1. RequireObjectCoercible ( argument )
// The abstract operation ToObject converts argument to a value of type Object according to Table 12:
// Table 12: ToObject Conversions
/*
|----------------------------------------------------------------------------------------------------------------------------------------------------|
| Argument Type | Result |
|----------------------------------------------------------------------------------------------------------------------------------------------------|
| Undefined | Throw a TypeError exception. |
| Null | Throw a TypeError exception. |
| Boolean | Return argument. |
| Number | Return argument. |
| String | Return argument. |
| Symbol | Return argument. |
| Object | Return argument. |
|----------------------------------------------------------------------------------------------------------------------------------------------------|
*/
function RequireObjectCoercible(argument) { // eslint-disable-line no-unused-vars
if (argument === null || argument === undefined) {
throw TypeError();
}
return argument;
}
|
# This program has been developed by students from the bachelor Computer Science at Utrecht University within the
# Software and Game project course
# ©Copyright Utrecht University Department of Information and Computing Sciences.
"""
Django settings for mofa project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import logging
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's52+4q(4zx)w9xw=@a^yagzq@79$^7=!&h+!v@)o*qzhn%xhe+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Set JQUERY_URL to true in order to work with smart_selects
JQUERY_URL = "https://code.jquery.com/jquery-3.4.1.min.js"
# ALL ALLOWED HOSTS NEED TO BE ADDED HERE
ALLOWED_HOSTS = ['mofa.boxinabox.nl', 'box.science.uu.nl', 'host.docker.internal', 'dockerhost', 'localhost',
'127.0.0.1', '[::1]', '0.0.0.0',
'172.20.0.16', '172.21.0.3']
# Application definition
INSTALLED_APPS = [
'analytics.apps.AnalyticsConfig',
'database_API.apps.DatabaseApiConfig',
'assistants.apps.AssistantsConfig',
'scheduler.apps.SchedulerConfig',
'courses.apps.CoursesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'smart_selects',
'rest_framework',
'rest_framework.authtoken',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mofa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'mofa', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mofa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': '5432',
}
}
# Using SQLite3 for development is also possible. Uncomment the code below
#
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'courses.User'
# When executing django-nose, include these arguments
NOSE_ARGS = [
'--with-coverage',
'--cover-package=assistants,courses,scheduler,analytics',
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.getenv("TIME_ZONE")
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "mofa/static"),
]
# Logging
if os.getenv('TESTING'):
logging.basicConfig(level=logging.CRITICAL)
else:
log_format = '%(asctime)s: %(message)s'
logging.basicConfig(filename='../log.log', level=logging.WARNING, format=log_format)
if not os.getenv('TESTING'):
# Moodle
MOODLE_BASE_URL = os.getenv("MOODLE_BASE_URL") if not str(os.getenv("MOODLE_BASE_URL")).endswith('/') \
else os.getenv("MOODLE_BASE_URL")[:-1]
MOODLE_BASE_IP = os.getenv("MOODLE_BASE_IP") if not str(os.getenv("MOODLE_BASE_IP")).endswith('/') \
else os.getenv("MOODLE_BASE_IP")[:-1]
MOODLE_WEBSERVICE_URL = os.getenv("MOODLE_WEBSERVICE_URL")
MOODLE_TOKEN = os.getenv("MOODLE_TOKEN")
# Learning Locker
LL_URL = os.getenv("LL_URL")
LL_AUTH_KEY = os.getenv("LL_AUTH_KEY")
ORGANISATION = os.getenv("ORGANISATION")
# Django
DJANGO_PORT = os.getenv("DJANGO_PORT")
DJANGO_URL = os.getenv("DJANGO_URL")
SYNC_AGENT_URLS = {'course': f'{DJANGO_URL}:{DJANGO_PORT}/assistants/api/course_sync_agent/',
'user': f'{DJANGO_URL}:{DJANGO_PORT}/assistants/api/user_sync_agent/',
'question': f'{DJANGO_URL}:{DJANGO_PORT}/assistants/api/question_sync_agent/'}
|
import sqlalchemy
from .db_session import SqlAlchemyBase
class DBGame(SqlAlchemyBase):
__tablename__ = 'games'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True, autoincrement=True)
chat_id = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
player_name = sqlalchemy.Column(sqlalchemy.String, nullable=True)
current_location = sqlalchemy.Column(sqlalchemy.String, nullable=True)
spaceship_name = sqlalchemy.Column(sqlalchemy.String, nullable=True)
state = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
player_inventory = sqlalchemy.Column(sqlalchemy.String, nullable=True)
player_money = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
player_hp = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
player_armor = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
player_weapon = sqlalchemy.Column(sqlalchemy.String, nullable=True)
player_armor_set = sqlalchemy.Column(sqlalchemy.String, nullable=True)
player_laser_ammo = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
fight_system_enemy = sqlalchemy.Column(sqlalchemy.String, nullable=True)
fight_system_max_action_points = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
fight_system_action_points = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
player_quest_items = sqlalchemy.Column(sqlalchemy.String, nullable=True)
|
#!/usr/bin/env python3
"""Execute a submission"""
import argparse
import os
import signal
import subprocess
import tempfile
import time
import json
import traceback
import socket
import logging
import pathlib
import sys
episode_length = 2 * 60 * 1000
class LocalExecutionConfig:
"""Configuration for local execution."""
singularity_user_image = None
singularity_backend_image = None
host_output_dir = None
robot_data_log_path = "/output/robot_data.dat"
camera_data_log_path = "/output/camera_data.dat"
singularity_binary = "singularity"
visual = False
git_repo = None
git_branch = None
git_ssh_command = None
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-dir",
"-o",
type=str,
required=True,
help="""Path to the output directory. All output files will be
stored there.""",
)
parser.add_argument(
"--repository",
"-r",
type=str,
required=True,
help="""Git repository with the user code.""",
)
parser.add_argument(
"--branch",
"-b",
type=str,
default="master",
help="""Branch of the Git repository that is used.""",
)
parser.add_argument(
"--backend-image",
type=str,
required=True,
help="""Path to the Singularity image for the backend.""",
)
parser.add_argument(
"--user-image",
type=str,
help="""Path to the Singularity image for the user code. If not
specified, the same image as for the backend is used.""",
)
parser.add_argument(
"--visualize",
"-v",
action="store_true",
help="""Show visualization of the simulation.""",
)
parser.add_argument(
"--nv",
action="store_true",
help="""Set the '--nv' flag when running Singularity for enabling
Nvidia support. This may be needed when running with
visualization on a machine that uses Nvidia drivers. See the
documentation of Singularity for more information.
""",
)
args = parser.parse_args()
self.host_output_dir = os.path.abspath(args.output_dir)
self.visual = args.visualize
self.nv = args.nv
self.git_repo = args.repository
self.git_branch = args.branch
self.singularity_backend_image = os.path.abspath(args.backend_image)
if args.user_image:
self.singularity_user_image = os.path.abspath(args.user_image)
else:
self.singularity_user_image = self.singularity_backend_image
class SubmissionRunner:
"""Run a submission."""
def __init__(self, config):
"""Initialize.
Args:
config: Configuration structure, either SubmissionSystemConfig or
LocalExecutionConfig.
"""
self.config = config
self.goal = None
def clone_user_repository(self):
"""Clone the user repository."""
logging.info(
"Clone user git repository %s (%s)",
self.config.git_repo,
self.config.git_branch,
)
if self.config.git_ssh_command:
os.environ["GIT_SSH_COMMAND"] = self.config.git_ssh_command
git_cmd = [
"git",
"clone",
"--recurse-submodules",
"-b",
self.config.git_branch,
self.config.git_repo,
"usercode",
]
subprocess.run(git_cmd, check=True)
# get current revision
git_cmd = [
"git",
"--git-dir",
"usercode/.git",
"rev-parse",
"HEAD",
]
revision_bytes = subprocess.check_output(git_cmd)
self.git_revision = revision_bytes.decode("utf-8").strip()
def _sample_goal(self, difficulty: int):
"""Sample goal of the given difficulty level."""
# sample the goal using the move_cube module in the container
cmd = [
self.config.singularity_binary,
"run",
"-eC",
self.config.singularity_backend_image,
"python3 -m trifinger_simulation.tasks move_cube sample_goal {:d}".format(
difficulty
),
]
try:
output_bytes = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stdout.decode("utf-8"))
# convert bytes to string
output = output_bytes.decode("utf-8")
goal_json = None
for line in output.split("\n"):
if line.startswith("pybullet build time"):
continue
else:
goal_json = line
break
if not goal_json:
raise RuntimeError("Failed to sample goal.")
return goal_json
def _validate_goal_file(self, source_path, filename):
cmd = [
self.config.singularity_binary,
"run",
"-eC",
"-B",
source_path,
self.config.singularity_backend_image,
(
"python3 -m trifinger_simulation.tasks move_cube"
" validate_goal_file {}".format(filename)
),
]
try:
subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stdout.decode("utf-8"))
def load_goal(self, source_path: str):
"""Sample or load the goal for this submission."""
# expect the user to provide a file "goal.json" at the root of the repository
goal_file = os.path.join(source_path, "usercode/goal.json")
try:
self._validate_goal_file(source_path, goal_file)
with open(goal_file, "r") as fh:
goalconfig = json.load(fh)
self.difficulty = int(goalconfig["difficulty"])
if "goal" in goalconfig:
goal = goalconfig["goal"]
self.goal = json.dumps(goal)
else:
self.goal = self._sample_goal(self.difficulty)
except Exception as e:
raise RuntimeError(
"Failed to load goal configuration. Make sure you provide a valid"
" 'goal.json' in your code repository.\n"
" Error: %s" % e
)
def build_workspace(self, workspace_path):
"""Build the workspace with the user code."""
logging.info("Build the user code")
build_cmd = [
self.config.singularity_binary,
"exec",
"--cleanenv",
"--contain",
"-B",
"{}:/ws".format(workspace_path),
self.config.singularity_user_image,
"bash",
"-c",
". /setup.bash; cd /ws; catbuild",
]
proc = subprocess.run(
build_cmd,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
# store output
stdout_file = os.path.join(
self.config.host_output_dir, "build_output.txt"
)
with open(stdout_file, "wb") as fh:
fh.write(proc.stdout)
def start_backend(self):
"""Start the backend."""
logging.info("Run the backend")
indicator_file_name = "backend_ready_indicator"
self.backend_indicator_file = pathlib.Path(
self.config.host_output_dir, indicator_file_name
)
# make sure the file does not exist in the beginning
if self.backend_indicator_file.exists():
self.backend_indicator_file.unlink()
backend_rosrun_cmd = " ".join(
[
"rosrun trifinger_simulation pybullet_backend.py",
"--finger-type trifingerpro",
"--add-cube",
"--real-time-mode",
"--visualize" if self.config.visual else "",
"--robot-logfile {}".format(self.config.robot_data_log_path),
"--camera-logfile {}".format(self.config.camera_data_log_path),
"--max-number-of-actions {}".format(episode_length),
"--ready-indicator /output/{}".format(indicator_file_name),
]
)
singularity_flags = [
"--cleanenv",
"--contain",
"-B",
"/dev,/run,{}:/output".format(self.config.host_output_dir),
]
if self.config.nv:
singularity_flags.append("--nv")
run_backend_cmd = [
self.config.singularity_binary,
"exec",
]
run_backend_cmd += singularity_flags
run_backend_cmd += [
self.config.singularity_backend_image,
"bash",
"-c",
". /setup.bash; {}".format(backend_rosrun_cmd),
]
logging.info("Start backend")
logging.debug(" ".join(run_backend_cmd))
self.backend_process = subprocess.Popen(
run_backend_cmd, start_new_session=True
)
logging.info("Wait until backend is ready...")
start_time = time.time()
while not self.backend_indicator_file.is_file():
time.sleep(5)
# if the backend takes too long to initialize, abort
if (time.time() - start_time) > 60:
logging.critical("FAILURE: Backend did not start in time.")
return False
logging.info("Backend is ready.")
return True
def stop_backend(self):
"""Stop the backend process."""
# TODO timeout
logging.info("Wait until backend has stopped")
while self.backend_indicator_file.is_file():
time.sleep(5)
if self.backend_process.poll() is not None:
# backend terminated without deleting the indicator file
return False
logging.info("Backend has stopped. Give some time to store logs.")
try:
self.backend_process.wait(60)
logging.info("Backend process terminated.")
return True
except subprocess.TimeoutExpired:
pass
logging.info("Backend still running. Send SIGINT.")
# the backend spawns several subprocesses by itself, so kill the whole process
# group instead of just the main process (otherwise some processes will keep
# running in the backgound).
backend_pgid = os.getpgid(self.backend_process.pid)
os.killpg(backend_pgid, signal.SIGINT)
try:
self.backend_process.wait(10)
except subprocess.TimeoutExpired:
logging.warning("Backend still running. Send SIGTERM.")
try:
os.killpg(backend_pgid, signal.SIGTERM)
self.backend_process.wait(3)
except subprocess.TimeoutExpired:
logging.error("Backend still running. Send SIGKILL.")
# FIXME this does not seem to kill everything, the pybullet gui is still
# running when this script terminates...
os.killpg(backend_pgid, signal.SIGKILL)
self.backend_process.wait()
return True
def run_user_code(self, workspace_path):
"""Run the user script."""
assert self.goal is not None
logging.info("Run the user code.")
# create user output directory if it does not yet exist
user_output_dir = os.path.join(self.config.host_output_dir, "user")
if not os.path.exists(user_output_dir):
os.mkdir(user_output_dir)
# store the goal to a file
goal_file = os.path.join(self.config.host_output_dir, "goal.json")
goal_info = {
"difficulty": self.difficulty,
"goal": json.loads(self.goal),
}
with open(goal_file, "w") as fh:
json.dump(goal_info, fh, indent=4)
# binding full /dev as only binding /dev/shm does not work with --contain
exec_cmd = (
". /setup.bash;"
". /ws/devel/setup.bash;"
"/ws/src/usercode/run {:d} {!r}"
)
run_user_cmd = [
self.config.singularity_binary,
"exec",
"--cleanenv",
"--contain",
"-B",
"{}:/ws,/dev,/run,{}:/output".format(workspace_path, user_output_dir),
self.config.singularity_user_image,
"bash",
"-c",
exec_cmd.format(self.difficulty, self.goal),
]
try:
# TODO make sure the user cannot spawn processes that keep running after the
# main one terminates (probably same method as for backend should be used).
proc = subprocess.run(
run_user_cmd,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
logging.info("User code terminated.")
stdout = proc.stdout
stderr = proc.stderr
returncode = proc.returncode
except subprocess.CalledProcessError as e:
logging.error(
"User code exited with non-zero exist status: %d",
e.returncode,
)
stdout = e.stdout
stderr = e.stderr
returncode = e.returncode
# TODO: indicate this somehow to the user
# store output
stdout_file = os.path.join(
self.config.host_output_dir, "user_stdout.txt"
)
stderr_file = os.path.join(
self.config.host_output_dir, "user_stderr.txt"
)
with open(stdout_file, "wb") as fh:
fh.write(stdout)
with open(stderr_file, "wb") as fh:
fh.write(stderr)
return returncode
def store_info(self):
"""Store some information about this submission into a file."""
info = {
"git_revision": self.git_revision,
"robot_name": socket.gethostname(),
"timestamp": time.asctime(),
}
info_file = os.path.join(self.config.host_output_dir, "info.json")
with open(info_file, "w") as fh:
json.dump(info, fh, indent=4)
def store_report(self, backend_error, user_returncode):
"""Store a "report" file with some information about the result.
This file contains some information whether execution was successful or
if there was some error. It is created at the very end, so it also
serves as a indicator that the execution is over.
"""
report = {
"backend_error": backend_error,
}
if not backend_error:
report["user_returncode"] = user_returncode
report_file = os.path.join(self.config.host_output_dir, "report.json")
with open(report_file, "w") as fh:
json.dump(report, fh, indent=4)
def run(self):
"""Run the whole pipeline."""
try:
with tempfile.TemporaryDirectory(
prefix="run_submission-"
) as ws_dir:
logging.info("Use temporary workspace %s", ws_dir)
user_returncode = None
# create "src" directory and cd into it
src_dir = os.path.join(ws_dir, "src")
os.mkdir(src_dir)
os.chdir(src_dir)
self.clone_user_repository()
self.load_goal(src_dir)
self.store_info()
os.chdir(ws_dir)
self.build_workspace(ws_dir)
backend_okay = self.start_backend()
if backend_okay:
user_returncode = self.run_user_code(ws_dir)
backend_okay = self.stop_backend()
# create the report last, so it can be used as indicator that
# the execution is over
self.store_report(not backend_okay, user_returncode)
logging.info("Finished.")
except Exception as e:
logging.critical("FAILURE: %s", e)
# FIXME just for debugging, remove later
traceback.print_exc()
error_report_file = os.path.join(
self.config.host_output_dir, "error_report.txt"
)
with open(error_report_file, "w") as fh:
fh.write(
"Submission failed with the following error:\n{}\n".format(
e
)
)
def main():
log_handler = logging.StreamHandler(sys.stdout)
logging.basicConfig(
format="[SUBMISSION %(levelname)s %(asctime)s] %(message)s",
level=logging.DEBUG,
handlers=[log_handler],
)
config = LocalExecutionConfig()
runner = SubmissionRunner(config)
runner.run()
if __name__ == "__main__":
main()
|
import json
import os
import pytest
import shutil
import subprocess
from eth_utils import (
is_checksum_address,
is_dict,
to_text,
)
from web3.utils.toolz import (
assoc,
)
from .utils import (
kill_proc_gracefully,
)
KEYFILE_PW = 'web3py-test'
GETH_16_FIXTURE = {
'datadir': 'geth-16-datadir-fixture',
'block_hash_with_log': '0x5d84bd72195aacbbf6f3ed66be7a16495ed470cbc3e4764c69e4be75ab084148',
'block_with_txn_hash': '0x4000549a8a573ed2e436de3a9014fdf71922f59aa11753870baa2ad03a32ebfc',
'emitter_address': '0x4aA591a07989b4F810E2F5cE97e769D60710f168',
'emitter_deploy_txn_hash': '0x1f676a3d88a8eb3210df677f3dca96edd78b646f8dcecab82d186d7394c8ab6c',
'empty_block_hash': '0xd09336bcc6164d8d958914f7800356a3bb0cf05f98e20aefc00ce23d9ca62d2d',
'keyfile_pw': 'web3py-test',
'math_address': '0xd794C821fCCFF5D96F5Db44af7e29977630A9dc2',
'math_deploy_txn_hash': '0xbefcf394f431fd983901d16c155da2d009da720b7b88cb9c7dce66f5d3ac44e7',
'mined_txn_hash': '0x95110dd5943f513a1fd29767b48fe2178b973e99f5d73693d889081d7bdcd0c2',
'raw_txn_account': '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6',
'txn_hash_with_log': '0x2fd8dcd6ab1318245f8423df8e31f66f5d0fac2db34d7ab4a2a21a71037beae1',
}
GETH_17_FIXTURE = {
'datadir': 'geth-17-datadir-fixture',
'block_hash_with_log': '0x78a60c6b31c7af5e5ce87bad73b595dfe5b8715b161f4d3ded468ddcb14b5aeb',
'block_with_txn_hash': '0x034faac7d0932774d9d837a97d55061a2dca9724c9779427a075f0a475aa3f43',
'emitter_address': '0x4aA591a07989b4F810E2F5cE97e769D60710f168',
'emitter_deploy_txn_hash': '0x1f676a3d88a8eb3210df677f3dca96edd78b646f8dcecab82d186d7394c8ab6c',
'empty_block_hash': '0xc7a1b4c19f6c1d830a743f7a93a58bab129f4671f1eb1a82ae77e6643d733b9b',
'keyfile_pw': 'web3py-test',
'math_address': '0xd794C821fCCFF5D96F5Db44af7e29977630A9dc2',
'math_deploy_txn_hash': '0xbefcf394f431fd983901d16c155da2d009da720b7b88cb9c7dce66f5d3ac44e7',
'mined_txn_hash': '0x95110dd5943f513a1fd29767b48fe2178b973e99f5d73693d889081d7bdcd0c2',
'raw_txn_account': '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6',
'txn_hash_with_log': '0x2fd8dcd6ab1318245f8423df8e31f66f5d0fac2db34d7ab4a2a21a71037beae1',
}
GETH_181_DIRECTORY_NAME = 'geth-1.8.1-datadir-fixture'
@pytest.fixture(scope='module')
def geth_binary():
from geth.install import (
get_executable_path,
install_geth,
)
if 'GETH_BINARY' in os.environ:
return os.environ['GETH_BINARY']
elif 'GETH_VERSION' in os.environ:
geth_version = os.environ['GETH_VERSION']
_geth_binary = get_executable_path(geth_version)
if not os.path.exists(_geth_binary):
install_geth(geth_version)
assert os.path.exists(_geth_binary)
return _geth_binary
else:
return 'geth'
def absolute_datadir(directory_name):
return os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
directory_name,
))
def load_fixture_data(fixture_path):
fixture_path = absolute_datadir(fixture_path)
config_path = os.path.join(fixture_path, 'config.json')
with open(config_path) as config_file:
loaded_data = json.loads(config_file.read())
return assoc(loaded_data, 'datadir', fixture_path)
@pytest.fixture(scope="module")
def geth_fixture_data(geth_binary):
from geth import get_geth_version
version = get_geth_version(geth_executable=os.path.expanduser(geth_binary))
if version.major == 1:
if version.minor == 6:
return GETH_16_FIXTURE
elif version.minor == 7:
return GETH_17_FIXTURE
elif version.minor == 8:
return load_fixture_data(GETH_181_DIRECTORY_NAME)
assert False, "Unsupported geth version"
@pytest.fixture(scope='module')
def datadir(tmpdir_factory, geth_fixture_data):
fixture_datadir = absolute_datadir(geth_fixture_data['datadir'])
base_dir = tmpdir_factory.mktemp('goethereum')
tmp_datadir = os.path.join(str(base_dir), 'datadir')
shutil.copytree(fixture_datadir, tmp_datadir)
return tmp_datadir
@pytest.fixture(scope='module')
def genesis_file(datadir):
genesis_file_path = os.path.join(datadir, 'genesis.json')
return genesis_file_path
@pytest.fixture(scope='module')
def geth_process(geth_binary, datadir, genesis_file, geth_command_arguments):
init_datadir_command = (
geth_binary,
'--datadir', str(datadir),
'init',
str(genesis_file),
)
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
proc = subprocess.Popen(
geth_command_arguments,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
try:
yield proc
finally:
kill_proc_gracefully(proc)
output, errors = proc.communicate()
print(
"Geth Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
@pytest.fixture(scope='module')
def coinbase(web3):
return web3.eth.coinbase
@pytest.fixture(scope="module")
def math_contract_deploy_txn_hash(geth_fixture_data):
return geth_fixture_data['math_deploy_txn_hash']
@pytest.fixture(scope="module")
def math_contract(web3, math_contract_factory, geth_fixture_data):
return math_contract_factory(address=geth_fixture_data['math_address'])
@pytest.fixture(scope="module")
def math_contract_address(math_contract, address_conversion_func):
return address_conversion_func(math_contract.address)
@pytest.fixture(scope="module")
def emitter_contract(web3, emitter_contract_factory, geth_fixture_data):
return emitter_contract_factory(address=geth_fixture_data['emitter_address'])
@pytest.fixture(scope="module")
def emitter_contract_address(emitter_contract, address_conversion_func):
return address_conversion_func(emitter_contract.address)
@pytest.fixture
def unlocked_account(web3, unlockable_account, unlockable_account_pw):
web3.personal.unlockAccount(unlockable_account, unlockable_account_pw)
yield unlockable_account
web3.personal.lockAccount(unlockable_account)
@pytest.fixture(scope='module')
def unlockable_account_pw(geth_fixture_data):
return geth_fixture_data['keyfile_pw']
@pytest.fixture(scope="module")
def unlockable_account(web3, coinbase):
yield coinbase
@pytest.fixture()
def unlockable_account_dual_type(unlockable_account, address_conversion_func):
return address_conversion_func(unlockable_account)
@pytest.yield_fixture
def unlocked_account_dual_type(web3, unlockable_account_dual_type, unlockable_account_pw):
web3.personal.unlockAccount(unlockable_account_dual_type, unlockable_account_pw)
yield unlockable_account_dual_type
web3.personal.lockAccount(unlockable_account_dual_type)
@pytest.fixture(scope="module")
def funded_account_for_raw_txn(geth_fixture_data):
account = geth_fixture_data['raw_txn_account']
assert is_checksum_address(account)
return account
@pytest.fixture(scope="module")
def empty_block(web3, geth_fixture_data):
block = web3.eth.getBlock(geth_fixture_data['empty_block_hash'])
assert is_dict(block)
return block
@pytest.fixture(scope="module")
def block_with_txn(web3, geth_fixture_data):
block = web3.eth.getBlock(geth_fixture_data['block_with_txn_hash'])
assert is_dict(block)
return block
@pytest.fixture(scope="module")
def mined_txn_hash(geth_fixture_data):
return geth_fixture_data['mined_txn_hash']
@pytest.fixture(scope="module")
def block_with_txn_with_log(web3, geth_fixture_data):
block = web3.eth.getBlock(geth_fixture_data['block_hash_with_log'])
assert is_dict(block)
return block
@pytest.fixture(scope="module")
def txn_hash_with_log(geth_fixture_data):
return geth_fixture_data['txn_hash_with_log']
|
# Generated by Django 3.0.5 on 2020-04-18 23:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='Modified at')),
('participants', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Participants')),
],
options={
'verbose_name': 'Session',
'verbose_name_plural': 'Sessions',
},
),
migrations.CreateModel(
name='Round',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('letter', models.TextField(max_length=1, verbose_name='Letter')),
('session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basta.Session', verbose_name='Session')),
],
options={
'verbose_name': 'Round',
'verbose_name_plural': 'Rounds',
'unique_together': {('letter', 'session')},
},
),
migrations.CreateModel(
name='Play',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15, verbose_name='Name')),
('surname', models.CharField(max_length=15, verbose_name='Surname')),
('plant', models.CharField(max_length=15, verbose_name='Flower / Fruit / Vegetable')),
('animal', models.CharField(max_length=15, verbose_name='Animal')),
('place', models.CharField(max_length=15, verbose_name='City / Country')),
('film', models.TextField(max_length=40, verbose_name='Movie / Series')),
('obj', models.CharField(max_length=15, verbose_name='Object')),
('brand', models.CharField(max_length=15, verbose_name='Brand')),
('score', models.IntegerField(default=0, editable=False)),
('cur_round', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basta.Round')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Play',
'verbose_name_plural': 'Plays',
},
),
]
|
start_number = int(input())
sum_numbers = 0
while sum_numbers < start_number:
number = int(input())
sum_numbers += number
else:
print(sum_numbers)
|
import os
import sys
import unittest
from torch.testing._internal.common_utils import GRAPH_EXECUTOR, ProfilingMode, \
num_profiled_runs, enable_profiling_mode_for_profiling_tests
from torch.testing._internal.common_jit import check_against_reference
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, disable_autodiff_subgraph_inlining
from torch.testing import FileCheck
from typing import List, Tuple, Optional
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.SIMPLE, "Simple Executor doesn't support gradients")
class TestAutodiffSubgraphSlicing(JitTestCase):
# TODO: It is better if we can test directly on graphs instead of the current
# end-to-end fashion.
def _perform_ad_subgraph_slicing(self, fn, *input_sizes):
with disable_autodiff_subgraph_inlining():
with enable_profiling_mode_for_profiling_tests():
ge = torch.jit.script(fn)
inputs = [torch.randn(size, requires_grad=True) for size in input_sizes]
ge(*inputs, profile_and_replay=True)
return ge.graph_for(*inputs)
def assertGraphSize(self, graph, size):
nodes = list(filter(lambda n: (n.kind() != "prim::BailOut" and
n.kind() != "prim::BailoutTemplate" and
n.kind() != "prim::TypeCheck" and
n.kind() != "prim::RequiresGradCheck"),
graph.nodes()))
self.assertEqual(len(list(nodes)), size)
def test_chunk_constant_script_ad(self):
@torch.jit.script
def func(x):
x1, x2 = torch.chunk(x, 2)
return (x1, x2)
input = torch.rand(6, 10).requires_grad_()
with disable_autodiff_subgraph_inlining():
with enable_profiling_mode_for_profiling_tests():
output = func(input, profile_and_replay=True)
self.assertAutodiffNode(func.graph_for(input), True, ['prim::ConstantChunk'], [])
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "This threshold is only valid for Profiling Executor")
def test_diff_graph_inline_threshold(self):
with enable_profiling_mode_for_profiling_tests():
NUM_RUNS = 1
with num_profiled_runs(NUM_RUNS):
@torch.jit.script
def foo(x):
# two nodes should be fused
# see https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/runtime/graph_executor_impl.h#L49
return torch.sigmoid(torch.sigmoid(x))
@torch.jit.script
def bar(x):
# two nodes should NOT be fused
return torch.sigmoid(x)
input = torch.rand([4, 4], requires_grad=True)
foo(input)
foo(input)
bar(input)
bar(input)
print(foo.graph_for(input))
self.assertGraphContainsExactly(foo.graph_for(input), 'prim::DifferentiableGraph', 1)
self.assertGraphContainsExactly(bar.graph_for(input), 'prim::DifferentiableGraph', 0)
def test_bias_as_module_attr(self):
with enable_profiling_mode_for_profiling_tests():
class M(torch.nn.Module):
def __init__(self, has_bias):
super(M, self).__init__()
self.ll = torch.nn.Linear(10, 10, has_bias)
def forward(self, x, y):
return self.ll(x + y) * x + y
x = torch.rand(10, 10, requires_grad=True)
no_bias = M(False)
scripted_no_bias = torch.jit.script(no_bias)
scripted_no_bias(x, x)
scripted_no_bias(x, x)
scripted_no_bias(x, x)
has_bias = M(True)
check_against_reference(self, scripted_no_bias, no_bias, lambda x: x, (x, x,), check_types=False)
scripted_has_bias = torch.jit.script(has_bias)
scripted_has_bias(x, x)
scripted_has_bias(x, x)
scripted_has_bias(x, x)
check_against_reference(self, scripted_has_bias, has_bias, lambda x: x, (x, x,), check_types=False)
def test_constructed_bias(self):
with enable_profiling_mode_for_profiling_tests():
def method1(x, weight, b1, b2):
bias = b1 * b2
return torch.nn.functional.linear(x, weight, bias)
N = 10
x = torch.rand(N, N, requires_grad=True)
weight = torch.rand(N, N, requires_grad=True)
b1 = torch.rand(N, N, requires_grad=True)
b2 = torch.rand(N, N, requires_grad=True)
scripted = self.checkScript(method1, (x, weight, b1, b2))
# check_types requires last_graph on scripted to be set, so we just skip it
check_against_reference(self, scripted, method1, lambda x: x, (x, weight, b1, b2), check_types=False)
def test_bias_as_arg(self):
with enable_profiling_mode_for_profiling_tests():
def method1(x, weight, bias: Optional[torch.Tensor]):
return torch.nn.functional.linear(x, weight, bias).relu() + 2
N = 10
x = torch.rand(N, N, requires_grad=True)
weight = torch.rand(N, N, requires_grad=True)
bias = None
scripted = self.checkScript(method1, (x, weight, bias))
# check_types requires last_graph on scripted to be set, so we just skip it
check_against_reference(self, scripted, method1, lambda x: x, (x, weight, bias), check_types=False)
bias = torch.rand(N, N, requires_grad=True)
scripted = self.checkScript(method1, (x, weight, bias))
# check_types requires last_graph on scripted to be set, so we just skip it
check_against_reference(self, scripted, method1, lambda x: x, (x, weight, bias), check_types=False)
def test_requires_grad_for_tensor_list(self):
with enable_profiling_mode_for_profiling_tests():
# output & var_list[0] should have requires_grad set to True
def func(input0: torch.Tensor, input1: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
var_list = [input0, input1]
var = torch.cat(var_list)
output = var + 1.0
return output, var_list
jit_f = torch.jit.script(func)
input0 = torch.randn((2,), requires_grad=True)
input1 = torch.randn((2,))
output_ref = func(input0, input1)
for i in range(2):
output = jit_f(input0, input1)
assert(output_ref[0].requires_grad == output[0].requires_grad)
assert(output_ref[1][0].requires_grad == output[1][0].requires_grad)
assert(output_ref[1][1].requires_grad == output[1][1].requires_grad)
@unittest.skip("disable until we property handle tensor lists with undefined gradients")
def test_differentiable_graph_ops_requires_grad(self):
x = torch.randn(8, 2, dtype=torch.float).requires_grad_()
y = torch.randn(8, 2, dtype=torch.float)
def t(x : torch.Tensor, y : torch.Tensor, flag : bool):
o = x + 1.0
o1 = torch.relu(o)
o = y + 1.5
o2 = torch.relu(o)
o3 = o1 + o2
if flag:
o = o1 + 1.0
oo1 = torch.relu(o)
o = o2 + 2.5
oo2 = torch.relu(o)
oo3 = oo1 + oo2
else:
o = o1 * 1.0
oo1 = torch.relu(o)
o = o2 * 2.0
oo2 = torch.relu(o)
oo3 = oo1 + oo2
return o1, o2, o3, oo1, oo2, oo3
with enable_profiling_mode_for_profiling_tests():
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, False)
jit_o = t_jit(x, y, False)
o = t(x, y, False)
FileCheck().check("prim::DifferentiableGraph").run(t_jit.graph_for(x, y, False))
# validate the differentiableGraphOps are marking proper requires_grad
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.requires_grad, jit_oo.requires_grad)
self.assertEqual(oo, jit_oo)
# one more runs to trigger fusion
jit_o = t_jit(x, y, False)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo.requires_grad, jit_oo.requires_grad)
self.assertEqual(oo, jit_oo)
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING, "Simple Executor doesn't support gradients")
def test_prune_grad(self):
@torch.jit.script
def t(input, bias):
return torch.nn.functional.relu(input + bias)
input = torch.randn(2, 8, requires_grad=True)
bias = torch.randn(8, requires_grad=False) # bias does NOT require grad
NUM_PROFILED_RUNS = 1
with num_profiled_runs(NUM_PROFILED_RUNS):
WARMUP = 3 # 2 runs to reach backward + 1 to optimize it
for x in range(WARMUP):
o = t(input, bias)
o.sum().backward()
fwd_plan = list(t.get_debug_state().execution_plans.values())[0]
bwd_graph = list(fwd_plan.code.grad_executor_states()[0].execution_plans.values())[0].graph
tup = next(bwd_graph.outputs())
self.assertEqual(len(list(tup.node().inputs())), 1)
def test_simple_merge(self):
# o --> o
def fn(x, y, z):
a = x * y
b = a * z
return b
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphSize(graph, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_simple_no_merge(self):
# o: autodiff supported. x: not autodiff supported.
# o --> x
def fn(x, y, z):
a = x * y
b = torch.zeros([abs(int(y))])
return a, b
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
g_str = str(graph)
FileCheck().check("aten::Int").check("aten::zeros").check_not("aten::mul").run(g_str[0:g_str.find("return")])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_does_not_merge_unrelated(self):
# o o
def fn(w, x, y, z):
a = x * y
b = w * z
return a, b
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
self.assertGraphSize(graph, 3)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
def test_merges_without_cycles(self):
# o --> o --> o
# | ^
# \_________/
def fn(w, x, y):
a = w * x
b = a * y
c = a * b
return c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphSize(graph, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_merges_dense(self):
# o o
# |\ /|
# | \ / |
# | /\ |
# vv vv
# o o
def fn(x, y):
a, b = x.chunk(2)
c, d = y.chunk(2)
return a + c, b + d
graph = self._perform_ad_subgraph_slicing(fn, 2, 2)
self.assertGraphSize(graph, 2)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_does_not_create_cycles(self):
# o --> x --> o
# | ^
# \_________/
def fn(w, x, y):
a = w * x
b = torch.zeros(abs(int(a)))
c = a * b
return c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
def test_merges_up(self):
# o --> x o
# | ^
# \_________/
def fn(w, x, y, z):
a = w * x
b = torch.zeros(abs(int(y)))
c = a * z
return b, c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
g_str = str(graph)
FileCheck().check_not("aten::add").run(g_str[0:g_str.find("return")])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_merges_down(self):
# o x --> o
# | ^
# \_________/
def fn(v, w, x, y):
a = v * w
b = torch.ones(int(y))
c = b * a
return a, c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
num_nodes = 4 if GRAPH_EXECUTOR == ProfilingMode.PROFILING else 3
# add moved down
g_str = str(graph)
FileCheck().check_not("aten::add").run(g_str[0:g_str.find("return")])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_respects_lexical_scoping(self):
def fn(x, k):
y = x * 1.1
if bool(k):
k = k + y
z = y * k
return z, k
graph = self._perform_ad_subgraph_slicing(fn, 1, 1)
# We should not have combined the two multiplications into
# the same group; they should each be a separate DiffGraph
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 3)
def test_merge_respects_aliasing(self):
def fn(x, k, cond):
y = x * 1.1
y = y * k
y = y * 2.2
if bool(cond):
z1 = y[0]
z2 = y[1]
z1.add_(3)
out = z2 + k + 3.3
out = out * out
return out
graph = self._perform_ad_subgraph_slicing(fn, [2, 2], [2, 2], 1)
# z2 did did not get merged into the subgraph
FileCheck().check("prim::If").check("aten::select").check_next("aten::select")\
.check_next("aten::add_").check("Differentiable").run(graph)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
|
# -*- coding: utf-8 -*-
"""
@author: KBK
"""
import random
from scipy.stats import norm
import sys
from editdistance import eval as lev
def readLyrics(idir,fw_freqdict={},bw_freqdict={}):
"""
returns a dict in following form:
word : {successor1: probability, successor2:probability}
"""
files = os.listdir(idir)
for fl in files:
with open(os.path.join(idir,fl),"r") as f:
corpus = fixChars(f.read())
lines = corpus.replace("\n\n","\n").strip().lower().split("\n")
for l in lines:
words = l.strip().replace(" "," ").split()
for curr, succ in list(set(zip(words[:-1], words[1:]))):
#set is used for unique sequence effect per song (nakarat etkisinden korumak)
if curr not in fw_freqdict:
fw_freqdict[curr] = {succ: 1}
if succ not in bw_freqdict:
bw_freqdict[succ] = {curr: 1}
else:
if succ not in fw_freqdict[curr]:
fw_freqdict[curr][succ] = 1
else:
fw_freqdict[curr][succ] += 1
if curr not in bw_freqdict[succ]:
bw_freqdict[succ][curr] = 1
else:
bw_freqdict[succ][curr] += 1
#convert freq_dict to prob_dict
fw_probdict = {}
for curr, curr_dict in fw_freqdict.items():
fw_probdict[curr] = {}
curr_total = sum(curr_dict.values())
for succ in curr_dict:
fw_probdict[curr][succ] = float(curr_dict[succ]) / curr_total
bw_probdict = {}
for succ, succ_dict in bw_freqdict.items():
bw_probdict[succ] = {}
succ_total = sum(succ_dict.values())
for curr in succ_dict:
bw_probdict[succ][curr] = float(succ_dict[curr]) / succ_total
return fw_probdict, bw_probdict
def markov_next(curr, prob_dict):
if curr not in prob_dict:
return random.choice(list(prob_dict.keys()))
else:
succ_probs = prob_dict[curr]
rand_prob = random.random()
curr_prob = 0.0
for succ in succ_probs:
curr_prob += succ_probs[succ]
if rand_prob <= curr_prob:
return succ
def markov_prev(curr, prob_dict):
if curr not in prob_dict:
return random.choice(list(prob_dict.keys()))
else:
pred_probs = prob_dict[curr]
rand_prob = random.random()
curr_prob = 0.0
for prev in pred_probs:
curr_prob += pred_probs[prev]
if rand_prob <= curr_prob:
return prev
def makeSerdarOrtac(curr, fw_probdict,bw_probdict, nlines = 4):
lyrics = [curr]
l = 1 #current n lines
w = len(lyrics) #n words in the current line
while l <= nlines:
if l == 1:
cur_rand = random.random()
threshold = norm.cdf(w,loc=4,scale=1.2) # we want roughly 4 words per line
if cur_rand > threshold:
lyrics.append(markov_next(lyrics[-1],fw_probdict))
w += 1
else:
lyrics.append("\n")
w = 0
l += 1
elif l < nlines:
if lyrics[-1] == "\n":
prev = random.choice(list(fw_probdict.keys()))
sim = levenshteinSimilarity(lyrics[-2],prev)
eps = 0
while sim < 0.75-eps+random.random()/4 and eps<0.50: #500 trials else random
prev = random.choice(list(fw_probdict.keys()))
sim = levenshteinSimilarity(lyrics[-2],prev)
eps+=0.001
lyrics.append(prev)
w += 1
else:
cur_rand = random.random()
threshold = norm.cdf(w,loc=4,scale=1.2) # we want roughly 4 words per line
if cur_rand > threshold:
lyrics.insert(-w,markov_prev(lyrics[-w],bw_probdict))
w += 1
else:
lyrics.append("\n")
w = 0
l += 1
elif l == nlines:
cur_rand = random.random()
threshold = norm.cdf(w,loc=4,scale=1.2) # we want roughly 4 words per line
if cur_rand > threshold:
lyrics.append(markov_next(lyrics[-1],fw_probdict))
w += 1
else:
lyrics.append("\n")
w = 0
l += 1
return " ".join(lyrics)
def fixChars(text):
for p in [".",",","!","?",":",";","x","(",")"]:
text = text.replace(p," ")
for p in [str(i) for i in range(10)]: #numbers
text = text.replace(p,"")
repl = [("â","a"),("û","u"),("Ş","ş"),("Ç","ç"),("Ü","ü"),("İ","i"),("Ö","ö")]
for a,b in repl:
text = text.replace(a,b)
return text
def levenshteinSimilarity(a,b):
sim = (max(len(a),len(b))-lev(a,b))/float(max(len(a),len(b)))
return sim
if __name__ == '__main__':
import os
os.chdir("..\serdarortac") #directory containing lyrics
idir = os.path.join(os.getcwd(),"lyrics")
ortac_fwprobdict, ortac_bwprobdict = readLyrics(idir)
start_word = raw_input("What do you want to start your song with?\n > ")
print("Here's your Serdar Ortac lyrics:\n")
print(makeSerdarOrtac(start_word, ortac_fwprobdict, ortac_bwprobdict))
|
Vue.component('task-list', {
template: `
<ul>
<task v-for="task in tasks">{{ task.task }}</task>
</ul>
`,
data() {
return {
tasks: [
{ task: 'Go to the store', complete: true },
{ task: 'Collect mail', complete: false },
{ task: 'Buy pet food', complete: false },
{ task: 'Wash car', complete: false },
{ task: 'Buy birthday card', complete: false },
]
}
}
});
Vue.component('task', {
template: '<li><slot></slot></li>'
});
new Vue({
el: '#example'
});
|
/*
Software Uart (Lite)
By Liyanboy74
https://github.com/liyanboy74
*/
#include "main.h"
#define Number_Of_SoftUarts 2
#define SoftUartTxBufferSize 32
#define SoftUartRxBufferSize 64
typedef enum {
SoftUart_OK,
SoftUart_Error
}SoftUartState_E;
typedef struct{
uint8_t Tx[SoftUartTxBufferSize];
uint8_t Rx[SoftUartRxBufferSize];
}SoftUartBuffer_S;
typedef struct {
__IO uint8_t TxNComplated;
uint8_t TxEnable;
uint8_t RxEnable;
uint8_t TxBitShift,TxBitCounter;
uint8_t RxBitShift,RxBitCounter;
uint8_t TxIndex,TxSize;
uint8_t RxIndex;
SoftUartBuffer_S *Buffer;
GPIO_TypeDef *TxPort;
uint16_t TxPin;
GPIO_TypeDef *RxPort;
uint16_t RxPin;
uint8_t RxTimingFlag;
uint8_t RxBitOffset;
} SoftUart_S;
extern TIM_HandleTypeDef htim3;
// SoftUartHandler must call in interrupt every 1*(1/BR)
// if BR=9600 then 1*(1/9600)=104.16 uS
void SoftUartHandler(void);
void SoftUartWaitUntilTxComplate(uint8_t SoftUartNumber);
uint8_t SoftUartRxAlavailable(uint8_t SoftUartNumber);
SoftUartState_E SoftUartPuts(uint8_t SoftUartNumber,uint8_t *Str,uint8_t Len);
SoftUartState_E SoftUartEnableRx(uint8_t SoftUartNumber);
SoftUartState_E SoftUartDisableRx(uint8_t SoftUartNumber);
SoftUartState_E SoftUartInit(uint8_t SoftUartNumber,GPIO_TypeDef *TxPort,uint16_t TxPin,GPIO_TypeDef *RxPort,uint16_t RxPin);
SoftUartState_E SoftUartReadRxBuffer(uint8_t SoftUartNumber,uint8_t *Buffer,uint8_t Len);
// Call in RX Faling Edge External Interupt
// ID is (0 to Number_Of_SoftUarts) Su RX Num
void SoftUartRxHelper(uint8_t ID);
|
// Copyright 2020 Phyronnaz
#pragma once
#include "CoreMinimal.h"
#include "VoxelMinimal.h"
#include "IVoxelPool.generated.h"
UENUM(BlueprintType)
enum class EVoxelTaskType : uint8
{
// Meshing of chunks that don't have collisions and are not visible
ChunksMeshing,
// Meshing of not visible chunks that have collisions
CollisionsChunksMeshing,
// Meshing of visible chunks that don't have collisions
VisibleChunksMeshing,
// Meshing of visible chunks that have collisions
VisibleCollisionsChunksMeshing,
// PhysX collision cooking, once the meshing task is done
CollisionCooking,
// Height spawners
FoliageBuild,
// Building of the instanced mesh components culling tree, used for spawners
// The meshes are not updated until the build is done
HISMBuild,
// Async edit functions such as AddSphereAsync
AsyncEditFunctions,
// Mesh merge tasks are used after meshing to create the render buffers
// Note: they are also used if bMergeChunks = false!
MeshMerge,
// The render octree is used to determine the LODs to display
// Should be done as fast as possible to start meshing tasks
RenderOctree
};
namespace EVoxelTaskType_DefaultPriorityCategories
{
enum Type : int32
{
Min = 0,
Max = 1000000,
ChunksMeshing = 0,
CollisionsChunksMeshing = 1,
VisibleChunksMeshing = 10,
VisibleCollisionsChunksMeshing = 100,
CollisionCooking = 100,
FoliageBuild = 100,
HISMBuild = 1000,
AsyncEditFunctions = 50,
MeshMerge = 100000,
RenderOctree = 1000000
};
}
namespace EVoxelTaskType_DefaultPriorityOffsets
{
enum Type : int32
{
ChunksMeshing = 0,
CollisionsChunksMeshing = 0,
VisibleChunksMeshing = 0,
// By default, do collision cooking slightly before collision meshing, and foliage slightly after
VisibleCollisionsChunksMeshing = 0,
CollisionCooking = +32,
FoliageBuild = -32,
HISMBuild = 0,
AsyncEditFunctions = 0,
MeshMerge = 0,
RenderOctree = 0
};
}
class FVoxelQueuedThreadPool;
class FQueuedThreadPool;
class IVoxelQueuedWork;
class UWorld;
class VOXEL_API IVoxelPool
{
public:
virtual ~IVoxelPool() {}
//~ Begin IVoxelPool Interface
virtual void QueueTask(EVoxelTaskType Type, IVoxelQueuedWork* Task) = 0;
virtual void QueueTasks(EVoxelTaskType Type, const TArray<IVoxelQueuedWork*>& Tasks) = 0;
virtual int32 GetNumTasks() const = 0;
//~ End IVoxelPool Interface
public:
static TVoxelSharedPtr<IVoxelPool> GetWorldPool(UWorld* World);
static TVoxelSharedPtr<IVoxelPool> GetGlobalPool();
static TVoxelSharedPtr<IVoxelPool> GetPoolForWorld(UWorld* World);
public:
static void SetWorldPool(UWorld* World, const TVoxelSharedRef<IVoxelPool>& Pool, const FString& Creator);
static void SetGlobalPool(const TVoxelSharedRef<IVoxelPool>& Pool, const FString& Creator);
public:
static void DestroyWorldPool(UWorld* World);
static void DestroyGlobalPool();
static void Shutdown();
private:
static TMap<TWeakObjectPtr<UWorld>, TVoxelSharedPtr<IVoxelPool>> WorldsPools;
static TVoxelSharedPtr<IVoxelPool> GlobalPool;
};
|
export default function fullNameFromIdentityString(identityStr) {
return identityStr.split(':')[1];
}
|
import React, { Component } from "react";
import Fade from "@material-ui/core/Fade";
import { PropTypes } from "prop-types";
import ReactMarkdown from "react-markdown";
import SectionHeader from "./SectionHeader";
import appState from "../store/appState";
import { getListOf } from "./util";
import { observer } from "mobx-react";
import { toJS } from "mobx";
class EducationItem extends Component {
getDate() {
let result = `${this.props.startDate} to ${this.props.endDate}`;
if (this.props.isCurrent) {
result += " (expected)";
}
return result;
}
render() {
return (
<section className="education-item item">
<h3 className="education-name name">{this.props.name}</h3>
<div className="education-date date">{this.getDate()}</div>
<ReactMarkdown
className="education-notes notes markdown-body"
source={this.props.notes.join("\n")}
/>
</section>
);
}
}
class Education extends Component {
render() {
if (!toJS(appState.education.data)) {
return <div />;
} else {
return (
appState.education.show && (
<Fade in={true} timeout={500}>
<section className="education" id="education">
<SectionHeader title={"Education"} />
{getListOf(
EducationItem,
toJS(appState.education.data),
appState.featured
)}
</section>
</Fade>
)
);
}
}
}
EducationItem.propTypes = {
startDate: PropTypes.string,
endDate: PropTypes.string,
isCurrent: PropTypes.bool,
name: PropTypes.string,
notes: PropTypes.array,
};
export default observer(Education);
|
RULES = 'B3/S23'
WIDTH = 160
HEIGHT = 90
CELL_SIZE = 8 # for windowed mode
DENSITY = .2
CYCLE_SLEEP = .1
DISPLAY_FPS = True
FULLSCREEN = False
|
function toggle() {
let button = document.querySelector('.button');
let divExtra = document.getElementById('extra');
if (button.textContent === 'More' ) {
divExtra.style.display = 'block';
button.textContent = 'Less';
}
else{
divExtra.style.display = 'none';
button.textContent = 'More';
}
}
|
//
// MHHTTPService+Live.h
// WeChat
//
// Created by senba on 2017/10/19.
// Copyright © 2017年 CoderMikeHe. All rights reserved.
// 获取直播相关的接口
#import "MHHTTPService.h"
#import "MHLiveRoom.h"
@interface MHHTTPService (Live)
/// https://live.9158.com/Room/GetHotLive_v2?cache=3&lat=22.54192103514200&lon=113.96939828211362&page=1&province=%E5%B9%BF%E4%B8%9C%E7%9C%81&type=0&useridx=61856069
/**
获取直播间列表
@param useridx The current special ` user ` 's idstr
@param type ;类型:type = 0 为热门
@param page 获取第几页的数据
@param lat 维度 , 可以传 nil , 则会获取定位的维度
@param lon 经度 , 可以传 nil , 则会获取定位的经度
@param province 省份 可以传 nil 则会获取定位的省份
@return Returns a signal which will send complete, or error.
*/
- (RACSignal *)fetchLivesWithUseridx:(NSString *)useridx type:(NSInteger)type page:(NSInteger)page lat:(NSNumber *)lat lon:(NSNumber *)lon province:(NSString *)province;
@end
|
import pytest
import requests
from airtech_api.utils import success_messages
from mock import Mock
from airtech_api.utils.constants import PAYSTACK_INITIALIZE_URL
from airtech_api.utils.error_messages import serialization_errors
from airtech_api.booking.models import Booking
from django.utils import timezone
from tests.helpers.assertion_helpers import assert_missing_header, assert_invalid_token_format
from datetime import datetime
from dateutil.parser import parse
USER_BOOKING_URL = '/api/v1/user/bookings/{}/payment'
class RequestsResponseMock:
def __init__(self,
status_code,
raise_exception=False,
exception_msg='An error occured',
**kwargs):
if raise_exception:
raise Exception(exception_msg)
self._json = kwargs
self.status_code = status_code
def json(self):
return self._json
@pytest.mark.django_db
class TestPayForFlightTicketRoute:
def test_user_pays_for_booking_ticket_succeeds(
self, client, valid_user_one_token, saved_valid_user_one,
saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
booking.paid_at = None
booking.save()
paystack_res_mock = {
'status': True,
'data': {
'status': 'success',
'gateway_response': 'Payment was made to account',
'metadata': {
'callbackURL': 'https://test.com',
'bookingId': 'booking-UUID'
},
}
}
paystack_res_mock = {
'status': True,
'data': {
'authorization_url': 'http://payment-link.com',
}
}
paystack_response = RequestsResponseMock(200, False,
**paystack_res_mock)
requests.post = Mock(return_value=paystack_response)
client_callback = 'https://test.com'
response = client.post(
USER_BOOKING_URL.format(booking.id),
content_type='application/json',
HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token),
data={
'callbackURL': client_callback,
})
response_body = response.data
response_data = response_body['data']
(url, ), res = requests.post.call_args
data = res['data']
# Reqeust Assetions
assert response.status_code == 200
assert response_body['status'] == 'success'
assert response_body['message'] == success_messages[
'payment_url_created']
assert response_data['paymentLink'] == paystack_res_mock['data'][
'authorization_url']
metadata = eval(data['metadata'])
# Post call assertions
assert url == PAYSTACK_INITIALIZE_URL
assert data['amount'] == booking.ticket_price
assert data['email'] == booking.created_by.email
assert metadata['callbackURL'] == client_callback
assert metadata['bookingId'] == str(booking.id)
assert metadata['username'] == saved_valid_user_one.username
assert metadata['email'] == saved_valid_user_one.email
def test_user_pays_for_ticket_that_has_already_been_bought_fails(
self, client, valid_user_one_token,
saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
booking.paid_at = timezone.now()
booking.save()
paystack_res_mock = {
'status': True,
'data': {
'authorization_url': 'http://payment-link.com',
}
}
paystack_response = RequestsResponseMock(200, False,
**paystack_res_mock)
requests.post = Mock(return_value=paystack_response)
client_callback = 'https://test.com'
response = client.post(
USER_BOOKING_URL.format(booking.id),
content_type='application/json',
HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token),
data={
'callbackURL': client_callback,
})
response_body = response.data
# Reqeust Assetions
assert response.status_code == 400
assert response_body['status'] == 'error'
assert response_body['message'] == serialization_errors[
'booking_already_paid']
assert requests.post.called is False
def test_user_pays_for_ticket_that_is_expired_fails(
self, client, valid_user_one_token, expired_booking):
paystack_res_mock = {
'status': True,
'data': {
'authorization_url': 'http://payment-link.com',
}
}
paystack_response = RequestsResponseMock(200, False,
**paystack_res_mock)
requests.post = Mock(return_value=paystack_response)
client_callback = 'https://test.com'
response = client.post(
USER_BOOKING_URL.format(expired_booking.id),
content_type='application/json',
HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token),
data={
'callbackURL': client_callback,
})
response_body = response.data
# Reqeust Assetions
assert response.status_code == 400
assert response_body['status'] == 'error'
assert response_body['message'] == serialization_errors[
'booking_expired']
assert requests.post.called is False
def test_make_payment_with_invalid_callback_url_fails(
self, client, valid_user_one_token,
saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
paystack_res_mock = {
'status': True,
'data': {
'authorization_url': 'http://payment-link.com',
}
}
paystack_response = RequestsResponseMock(200, False,
**paystack_res_mock)
requests.post = Mock(return_value=paystack_response)
client_callback = 'utc://test.com'
response = client.post(
USER_BOOKING_URL.format(booking.id),
content_type='application/json',
HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token),
data={
'callbackURL': client_callback,
})
response_body = response.data
# Reqeust Assetions
assert response.status_code == 400
assert response_body['status'] == 'error'
assert response_body['message'] == serialization_errors[
'invalid_url'].format('callbackURL')
assert requests.post.called is False
def test_payment_fails_when_paystack_throws_an_error(
self, client, valid_user_one_token,
saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
requests.post = Mock(return_value=Exception())
client_callback = 'https://test.com'
response = client.post(
USER_BOOKING_URL.format(booking.id),
content_type='application/json',
HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token),
data={
'callbackURL': client_callback,
})
response_body = response.data
# Reqeust Assetions
assert response.status_code == 400
assert response_body['status'] == 'error'
assert response_body['message'] == serialization_errors[
'payment_link_error']
assert requests.post.called
def test_make_payment_without_invalid_token_fails(
self, client, saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
response = client.post(
USER_BOOKING_URL.format(booking.id),
content_type='application/json',
HTTP_AUTHORIZATION='{}'.format('invalid-token'),
)
assert_invalid_token_format(response)
def test_make_payment_with_missing_token_fails(
self, client, saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
response = client.post(
USER_BOOKING_URL.format(booking.id),
content_type='application/json',
)
assert_missing_header(response)
@pytest.mark.django_db
class TestPaymentRedirectRoute:
def test_get_call_raises_an_exception(
self, client, saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
requests.get = Mock(side_effect=Exception())
response = client.get(
USER_BOOKING_URL.format(booking.id) + '?reference=blah')
assert response.status_code == 400
assert response.data['status'] == 'error'
assert response.data['message'] == \
serialization_errors['paystack_threw_error']
def test_paystack_returns_failure_when_verifying_user(
self, client, saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
booking.paid_at = None
booking.save()
payment_time = datetime.now()
client_callback = 'https://test.com'
paystack_res_mock = {
'status': False,
'data': {
'status': 'failure',
'gateway_response': 'Payment was made to account',
'paid_at': payment_time,
'metadata': {
'callbackURL': client_callback,
'bookingId': str(booking.id)
},
}
}
paystack_response = RequestsResponseMock(200, False,
**paystack_res_mock)
requests.get = Mock(return_value=paystack_response)
reference_mock = 'reference-101'
response = client.get(
USER_BOOKING_URL.format(booking.id) +
f'?reference={reference_mock}', )
query_params = {
query.split('=')[0]: query.split('=')[1]
for query in response.url.split('?')[1].split('&')
}
assert response.status_code == 303
assert response.url.startswith(client_callback)
assert query_params['success'] == 'false'
assert query_params['bookingId'] == str(booking.id)
def test_paystack_returns_insufficient_funds_error(
self, client, saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
booking.paid_at = None
booking.save()
payment_time = datetime.now()
client_callback = 'https://test.com'
paystack_res_mock = {
'status': True,
'data': {
'status': 'failure',
'gateway_response': 'Insufficient funds',
'paid_at': payment_time,
'metadata': {
'callbackURL': client_callback,
'bookingId': str(booking.id)
},
}
}
paystack_response = RequestsResponseMock(200, False,
**paystack_res_mock)
requests.get = Mock(return_value=paystack_response)
reference_mock = 'reference-101'
response = client.get(
USER_BOOKING_URL.format(booking.id) +
f'?reference={reference_mock}', )
query_params = {
query.split('=')[0]: query.split('=')[1]
for query in response.url.split('?')[1].split('&')
}
assert response.status_code == 303
assert response.url.startswith(client_callback)
assert query_params['success'] == 'false'
assert query_params['bookingId'] == str(booking.id)
def test_successful_payment_redirects_user_succeeds(
self, client, valid_user_one_token, saved_valid_user_one,
saved_bulk_inserted_bookings_for_user_one):
booking = saved_bulk_inserted_bookings_for_user_one[0]
booking.paid_at = None
booking.save()
payment_time = timezone.now()
client_callback = 'https://test.com'
paystack_res_mock = {
'status': True,
'data': {
'status': 'success',
'gateway_response': 'Payment was made to account',
'paid_at': payment_time,
'metadata': {
'callbackURL': client_callback,
'bookingId': str(booking.id)
},
}
}
paystack_response = RequestsResponseMock(200, False,
**paystack_res_mock)
requests.get = Mock(return_value=paystack_response)
reference_mock = 'sample-reference'
response = client.get(
USER_BOOKING_URL.format(booking.id) +
f'?reference={reference_mock}',
# content_type='application/json',
)
query_params = {
query.split('=')[0]: query.split('=')[1]
for query in response.url.split('?')[1].split('&')
}
booking = Booking.objects.get(pk=booking.id)
# Reqeust Assetions
assert response.status_code == 303
assert query_params['success'] == 'true'
assert query_params['bookingId'] == str(booking.id)
assert response.url.startswith(client_callback)
assert booking.paid_at == payment_time
|
from .foo import inc
|
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Thread management for memcached.
*/
#include "memcached.h"
#include <assert.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#ifdef __sun
#include <atomic.h>
#endif
#define ITEMS_PER_ALLOC 64
/* An item in the connection queue. */
typedef struct conn_queue_item CQ_ITEM;
struct conn_queue_item {
int sfd;
enum conn_states init_state;
int event_flags;
int read_buffer_size;
enum network_transport transport;
CQ_ITEM *next;
};
/* A connection queue. */
typedef struct conn_queue CQ;
struct conn_queue {
CQ_ITEM *head;
CQ_ITEM *tail;
pthread_mutex_t lock;
};
/* Locks for cache LRU operations */
pthread_mutex_t lru_locks[POWER_LARGEST];
/* Connection lock around accepting new connections */
pthread_mutex_t conn_lock = PTHREAD_MUTEX_INITIALIZER;
#if !defined(HAVE_GCC_ATOMICS) && !defined(__sun)
pthread_mutex_t atomics_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif
/* Lock for global stats */
static pthread_mutex_t stats_lock = PTHREAD_MUTEX_INITIALIZER;
/* Lock to cause worker threads to hang up after being woken */
static pthread_mutex_t worker_hang_lock;
/* Free list of CQ_ITEM structs */
static CQ_ITEM *cqi_freelist;
static pthread_mutex_t cqi_freelist_lock;
static pthread_mutex_t *item_locks;
/* size of the item lock hash table */
static uint32_t item_lock_count;
unsigned int item_lock_hashpower;
#define hashsize(n) ((unsigned long int)1<<(n))
#define hashmask(n) (hashsize(n)-1)
static LIBEVENT_DISPATCHER_THREAD dispatcher_thread;
/*
* Each libevent instance has a wakeup pipe, which other threads
* can use to signal that they've put a new connection on its queue.
*/
static LIBEVENT_THREAD *threads;
/*
* Number of worker threads that have finished setting themselves up.
*/
static int init_count = 0;
static pthread_mutex_t init_lock;
static pthread_cond_t init_cond;
static void thread_libevent_process(int fd, short which, void *arg);
unsigned short refcount_incr(unsigned short *refcount) {
#ifdef HAVE_GCC_ATOMICS
return __sync_add_and_fetch(refcount, 1);
#elif defined(__sun)
return atomic_inc_ushort_nv(refcount);
#else
unsigned short res;
mutex_lock(&atomics_mutex);
(*refcount)++;
res = *refcount;
mutex_unlock(&atomics_mutex);
return res;
#endif
}
unsigned short refcount_decr(unsigned short *refcount) {
#ifdef HAVE_GCC_ATOMICS
return __sync_sub_and_fetch(refcount, 1);
#elif defined(__sun)
return atomic_dec_ushort_nv(refcount);
#else
unsigned short res;
mutex_lock(&atomics_mutex);
(*refcount)--;
res = *refcount;
mutex_unlock(&atomics_mutex);
return res;
#endif
}
/* item_lock() must be held for an item before any modifications to either its
* associated hash bucket, or the structure itself.
* LRU modifications must hold the item lock, and the LRU lock.
* LRU's accessing items must item_trylock() before modifying an item.
* Items accessable from an LRU must not be freed or modified
* without first locking and removing from the LRU.
*/
void item_lock(uint32_t hv) {
mutex_lock(&item_locks[hv & hashmask(item_lock_hashpower)]);
}
void *item_trylock(uint32_t hv) {
pthread_mutex_t *lock = &item_locks[hv & hashmask(item_lock_hashpower)];
if (pthread_mutex_trylock(lock) == 0) {
return lock;
}
return NULL;
}
void item_trylock_unlock(void *lock) {
mutex_unlock((pthread_mutex_t *) lock);
}
void item_unlock(uint32_t hv) {
mutex_unlock(&item_locks[hv & hashmask(item_lock_hashpower)]);
}
static void wait_for_thread_registration(int nthreads) {
while (init_count < nthreads) {
pthread_cond_wait(&init_cond, &init_lock);
}
}
static void register_thread_initialized(void) {
pthread_mutex_lock(&init_lock);
init_count++;
pthread_cond_signal(&init_cond);
pthread_mutex_unlock(&init_lock);
/* Force worker threads to pile up if someone wants us to */
pthread_mutex_lock(&worker_hang_lock);
pthread_mutex_unlock(&worker_hang_lock);
}
/* Must not be called with any deeper locks held */
void pause_threads(enum pause_thread_types type) {
char buf[1];
int i;
buf[0] = 0;
switch (type) {
case PAUSE_ALL_THREADS:
slabs_rebalancer_pause();
lru_crawler_pause();
lru_maintainer_pause();
case PAUSE_WORKER_THREADS:
buf[0] = 'p';
pthread_mutex_lock(&worker_hang_lock);
break;
case RESUME_ALL_THREADS:
slabs_rebalancer_resume();
lru_crawler_resume();
lru_maintainer_resume();
case RESUME_WORKER_THREADS:
pthread_mutex_unlock(&worker_hang_lock);
break;
default:
fprintf(stderr, "Unknown lock type: %d\n", type);
assert(1 == 0);
break;
}
/* Only send a message if we have one. */
if (buf[0] == 0) {
return;
}
pthread_mutex_lock(&init_lock);
init_count = 0;
for (i = 0; i < settings.num_threads; i++) {
if (write(threads[i].notify_send_fd, buf, 1) != 1) {
perror("Failed writing to notify pipe");
/* TODO: This is a fatal problem. Can it ever happen temporarily? */
}
}
wait_for_thread_registration(settings.num_threads);
pthread_mutex_unlock(&init_lock);
}
/*
* Initializes a connection queue.
*/
static void cq_init(CQ *cq) {
pthread_mutex_init(&cq->lock, NULL);
cq->head = NULL;
cq->tail = NULL;
}
/*
* Looks for an item on a connection queue, but doesn't block if there isn't
* one.
* Returns the item, or NULL if no item is available
*/
static CQ_ITEM *cq_pop(CQ *cq) {
CQ_ITEM *item;
pthread_mutex_lock(&cq->lock);
item = cq->head;
if (NULL != item) {
cq->head = item->next;
if (NULL == cq->head)
cq->tail = NULL;
}
pthread_mutex_unlock(&cq->lock);
return item;
}
/*
* Adds an item to a connection queue.
*/
static void cq_push(CQ *cq, CQ_ITEM *item) {
item->next = NULL;
pthread_mutex_lock(&cq->lock);
if (NULL == cq->tail)
cq->head = item;
else
cq->tail->next = item;
cq->tail = item;
pthread_mutex_unlock(&cq->lock);
}
/*
* Returns a fresh connection queue item.
*/
static CQ_ITEM *cqi_new(void) {
CQ_ITEM *item = NULL;
pthread_mutex_lock(&cqi_freelist_lock);
if (cqi_freelist) {
item = cqi_freelist;
cqi_freelist = item->next;
}
pthread_mutex_unlock(&cqi_freelist_lock);
if (NULL == item) {
int i;
/* Allocate a bunch of items at once to reduce fragmentation */
item = malloc(sizeof(CQ_ITEM) * ITEMS_PER_ALLOC);
if (NULL == item) {
STATS_LOCK();
stats.malloc_fails++;
STATS_UNLOCK();
return NULL;
}
/*
* Link together all the new items except the first one
* (which we'll return to the caller) for placement on
* the freelist.
*/
for (i = 2; i < ITEMS_PER_ALLOC; i++)
item[i - 1].next = &item[i];
pthread_mutex_lock(&cqi_freelist_lock);
item[ITEMS_PER_ALLOC - 1].next = cqi_freelist;
cqi_freelist = &item[1];
pthread_mutex_unlock(&cqi_freelist_lock);
}
return item;
}
/*
* Frees a connection queue item (adds it to the freelist.)
*/
static void cqi_free(CQ_ITEM *item) {
pthread_mutex_lock(&cqi_freelist_lock);
item->next = cqi_freelist;
cqi_freelist = item;
pthread_mutex_unlock(&cqi_freelist_lock);
}
/*
* Creates a worker thread.
*/
static void create_worker(void *(*func)(void *), void *arg) {
pthread_attr_t attr;
int ret;
pthread_attr_init(&attr);
if ((ret = pthread_create(&((LIBEVENT_THREAD*)arg)->thread_id, &attr, func, arg)) != 0) {
fprintf(stderr, "Can't create thread: %s\n",
strerror(ret));
exit(1);
}
}
/*
* Sets whether or not we accept new connections.
*/
void accept_new_conns(const bool do_accept) {
pthread_mutex_lock(&conn_lock);
do_accept_new_conns(do_accept);
pthread_mutex_unlock(&conn_lock);
}
/****************************** LIBEVENT THREADS *****************************/
/*
* Set up a thread's information.
*/
static void setup_thread(LIBEVENT_THREAD *me) {
me->base = event_init();
if (! me->base) {
fprintf(stderr, "Can't allocate event base\n");
exit(1);
}
/* Listen for notifications from other threads */
event_set(&me->notify_event, me->notify_receive_fd,
EV_READ | EV_PERSIST, thread_libevent_process, me);
event_base_set(me->base, &me->notify_event);
if (event_add(&me->notify_event, 0) == -1) {
fprintf(stderr, "Can't monitor libevent notify pipe\n");
exit(1);
}
me->new_conn_queue = malloc(sizeof(struct conn_queue));
if (me->new_conn_queue == NULL) {
perror("Failed to allocate memory for connection queue");
exit(EXIT_FAILURE);
}
cq_init(me->new_conn_queue);
if (pthread_mutex_init(&me->stats.mutex, NULL) != 0) {
perror("Failed to initialize mutex");
exit(EXIT_FAILURE);
}
me->suffix_cache = cache_create("suffix", SUFFIX_SIZE, sizeof(char*),
NULL, NULL);
if (me->suffix_cache == NULL) {
fprintf(stderr, "Failed to create suffix cache\n");
exit(EXIT_FAILURE);
}
}
/*
* Worker thread: main event loop
*/
static void *worker_libevent(void *arg) {
LIBEVENT_THREAD *me = arg;
/* Any per-thread setup can happen here; memcached_thread_init() will block until
* all threads have finished initializing.
*/
me->l = logger_create();
if (me->l == NULL) {
abort();
}
register_thread_initialized();
event_base_loop(me->base, 0);
return NULL;
}
/*
* Processes an incoming "handle a new connection" item. This is called when
* input arrives on the libevent wakeup pipe.
*/
static void thread_libevent_process(int fd, short which, void *arg) {
LIBEVENT_THREAD *me = arg;
CQ_ITEM *item;
char buf[1];
unsigned int timeout_fd;
if (read(fd, buf, 1) != 1) {
if (settings.verbose > 0)
fprintf(stderr, "Can't read from libevent pipe\n");
return;
}
switch (buf[0]) {
case 'c':
item = cq_pop(me->new_conn_queue);
if (NULL != item) {
conn *c = conn_new(item->sfd, item->init_state, item->event_flags,
item->read_buffer_size, item->transport,
me->base);
if (c == NULL) {
if (IS_UDP(item->transport)) {
fprintf(stderr, "Can't listen for events on UDP socket\n");
exit(1);
} else {
if (settings.verbose > 0) {
fprintf(stderr, "Can't listen for events on fd %d\n",
item->sfd);
}
close(item->sfd);
}
} else {
c->thread = me;
}
cqi_free(item);
}
break;
/* we were told to pause and report in */
case 'p':
register_thread_initialized();
break;
/* a client socket timed out */
case 't':
if (read(fd, &timeout_fd, sizeof(timeout_fd)) != sizeof(timeout_fd)) {
if (settings.verbose > 0)
fprintf(stderr, "Can't read timeout fd from libevent pipe\n");
return;
}
conn_close_idle(conns[timeout_fd]);
break;
}
}
/* Which thread we assigned a connection to most recently. */
static int last_thread = -1;
/*
* Dispatches a new connection to another thread. This is only ever called
* from the main thread, either during initialization (for UDP) or because
* of an incoming connection.
*/
void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags,
int read_buffer_size, enum network_transport transport) {
CQ_ITEM *item = cqi_new();
char buf[1];
if (item == NULL) {
close(sfd);
/* given that malloc failed this may also fail, but let's try */
fprintf(stderr, "Failed to allocate memory for connection object\n");
return ;
}
int tid = (last_thread + 1) % settings.num_threads;
LIBEVENT_THREAD *thread = threads + tid;
last_thread = tid;
item->sfd = sfd;
item->init_state = init_state;
item->event_flags = event_flags;
item->read_buffer_size = read_buffer_size;
item->transport = transport;
cq_push(thread->new_conn_queue, item);
MEMCACHED_CONN_DISPATCH(sfd, thread->thread_id);
buf[0] = 'c';
if (write(thread->notify_send_fd, buf, 1) != 1) {
perror("Writing to thread notify pipe");
}
}
/*
* Re-dispatches a connection back to the original thread. Can be called from
* any side thread borrowing a connection.
* TODO: Look into this. too complicated?
*/
#ifdef BOGUS_DEFINE
void redispatch_conn(conn *c) {
CQ_ITEM *item = cqi_new();
char buf[1];
if (item == NULL) {
/* Can't cleanly redispatch connection. close it forcefully. */
/* FIXME: is conn_cleanup() necessary?
* if conn was handed off to a side thread it should be clean.
* could also put it into a "clean_me" state?
*/
c->state = conn_closed;
close(c->sfd);
return;
}
LIBEVENT_THREAD *thread = c->thread;
item->sfd = sfd;
/* pass in the state somehow?
item->init_state = conn_closing; */
item->event_flags = c->event_flags;
item->conn = c;
}
#endif
/* This misses the allow_new_conns flag :( */
void sidethread_conn_close(conn *c) {
c->state = conn_closed;
if (settings.verbose > 1)
fprintf(stderr, "<%d connection closed from side thread.\n", c->sfd);
close(c->sfd);
STATS_LOCK();
stats_state.curr_conns--;
STATS_UNLOCK();
return;
}
/*
* Returns true if this is the thread that listens for new TCP connections.
*/
int is_listen_thread() {
return pthread_self() == dispatcher_thread.thread_id;
}
/********************************* ITEM ACCESS *******************************/
/*
* Allocates a new item.
*/
item *item_alloc(char *key, size_t nkey, int flags, rel_time_t exptime, int nbytes) {
item *it;
/* do_item_alloc handles its own locks */
it = do_item_alloc(key, nkey, flags, exptime, nbytes);
return it;
}
/*
* Returns an item if it hasn't been marked as expired,
* lazy-expiring as needed.
*/
item *item_get(const char *key, const size_t nkey, conn *c) {
item *it;
uint32_t hv;
hv = hash(key, nkey);
item_lock(hv);
it = do_item_get(key, nkey, hv, c);
item_unlock(hv);
return it;
}
item *item_touch(const char *key, size_t nkey, uint32_t exptime, conn *c) {
item *it;
uint32_t hv;
hv = hash(key, nkey);
item_lock(hv);
it = do_item_touch(key, nkey, exptime, hv, c);
item_unlock(hv);
return it;
}
/*
* Links an item into the LRU and hashtable.
*/
int item_link(item *item) {
int ret;
uint32_t hv;
hv = hash(ITEM_key(item), item->nkey);
item_lock(hv);
ret = do_item_link(item, hv);
item_unlock(hv);
return ret;
}
/*
* Decrements the reference count on an item and adds it to the freelist if
* needed.
*/
void item_remove(item *item) {
uint32_t hv;
hv = hash(ITEM_key(item), item->nkey);
item_lock(hv);
do_item_remove(item);
item_unlock(hv);
}
/*
* Replaces one item with another in the hashtable.
* Unprotected by a mutex lock since the core server does not require
* it to be thread-safe.
*/
int item_replace(item *old_it, item *new_it, const uint32_t hv) {
return do_item_replace(old_it, new_it, hv);
}
/*
* Unlinks an item from the LRU and hashtable.
*/
void item_unlink(item *item) {
uint32_t hv;
hv = hash(ITEM_key(item), item->nkey);
item_lock(hv);
do_item_unlink(item, hv);
item_unlock(hv);
}
/*
* Moves an item to the back of the LRU queue.
*/
void item_update(item *item) {
uint32_t hv;
hv = hash(ITEM_key(item), item->nkey);
item_lock(hv);
do_item_update(item);
item_unlock(hv);
}
/*
* Does arithmetic on a numeric item value.
*/
enum delta_result_type add_delta(conn *c, const char *key,
const size_t nkey, int incr,
const int64_t delta, char *buf,
uint64_t *cas) {
enum delta_result_type ret;
uint32_t hv;
hv = hash(key, nkey);
item_lock(hv);
ret = do_add_delta(c, key, nkey, incr, delta, buf, cas, hv);
item_unlock(hv);
return ret;
}
/*
* Stores an item in the cache (high level, obeys set/add/replace semantics)
*/
enum store_item_type store_item(item *item, int comm, conn* c) {
enum store_item_type ret;
uint32_t hv;
hv = hash(ITEM_key(item), item->nkey);
item_lock(hv);
ret = do_store_item(item, comm, c, hv);
item_unlock(hv);
return ret;
}
/******************************* GLOBAL STATS ******************************/
void STATS_LOCK() {
pthread_mutex_lock(&stats_lock);
}
void STATS_UNLOCK() {
pthread_mutex_unlock(&stats_lock);
}
void threadlocal_stats_reset(void) {
int ii;
for (ii = 0; ii < settings.num_threads; ++ii) {
pthread_mutex_lock(&threads[ii].stats.mutex);
#define X(name) threads[ii].stats.name = 0;
THREAD_STATS_FIELDS
#undef X
memset(&threads[ii].stats.slab_stats, 0,
sizeof(threads[ii].stats.slab_stats));
pthread_mutex_unlock(&threads[ii].stats.mutex);
}
}
void threadlocal_stats_aggregate(struct thread_stats *stats) {
int ii, sid;
/* The struct has a mutex, but we can safely set the whole thing
* to zero since it is unused when aggregating. */
memset(stats, 0, sizeof(*stats));
for (ii = 0; ii < settings.num_threads; ++ii) {
pthread_mutex_lock(&threads[ii].stats.mutex);
#define X(name) stats->name += threads[ii].stats.name;
THREAD_STATS_FIELDS
#undef X
for (sid = 0; sid < MAX_NUMBER_OF_SLAB_CLASSES; sid++) {
#define X(name) stats->slab_stats[sid].name += \
threads[ii].stats.slab_stats[sid].name;
SLAB_STATS_FIELDS
#undef X
}
pthread_mutex_unlock(&threads[ii].stats.mutex);
}
}
void slab_stats_aggregate(struct thread_stats *stats, struct slab_stats *out) {
int sid;
memset(out, 0, sizeof(*out));
for (sid = 0; sid < MAX_NUMBER_OF_SLAB_CLASSES; sid++) {
#define X(name) out->name += stats->slab_stats[sid].name;
SLAB_STATS_FIELDS
#undef X
}
}
/*
* Initializes the thread subsystem, creating various worker threads.
*
* nthreads Number of worker event handler threads to spawn
* main_base Event base for main thread
*/
void memcached_thread_init(int nthreads, struct event_base *main_base) {
int i;
int power;
for (i = 0; i < POWER_LARGEST; i++) {
pthread_mutex_init(&lru_locks[i], NULL);
}
pthread_mutex_init(&worker_hang_lock, NULL);
pthread_mutex_init(&init_lock, NULL);
pthread_cond_init(&init_cond, NULL);
pthread_mutex_init(&cqi_freelist_lock, NULL);
cqi_freelist = NULL;
/* Want a wide lock table, but don't waste memory */
if (nthreads < 3) {
power = 10;
} else if (nthreads < 4) {
power = 11;
} else if (nthreads < 5) {
power = 12;
} else {
/* 8192 buckets, and central locks don't scale much past 5 threads */
power = 13;
}
if (power >= hashpower) {
fprintf(stderr, "Hash table power size (%d) cannot be equal to or less than item lock table (%d)\n", hashpower, power);
fprintf(stderr, "Item lock table grows with `-t N` (worker threadcount)\n");
fprintf(stderr, "Hash table grows with `-o hashpower=N` \n");
exit(1);
}
item_lock_count = hashsize(power);
item_lock_hashpower = power;
item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
if (! item_locks) {
perror("Can't allocate item locks");
exit(1);
}
for (i = 0; i < item_lock_count; i++) {
pthread_mutex_init(&item_locks[i], NULL);
}
threads = calloc(nthreads, sizeof(LIBEVENT_THREAD));
if (! threads) {
perror("Can't allocate thread descriptors");
exit(1);
}
dispatcher_thread.base = main_base;
dispatcher_thread.thread_id = pthread_self();
for (i = 0; i < nthreads; i++) {
int fds[2];
if (pipe(fds)) {
perror("Can't create notify pipe");
exit(1);
}
threads[i].notify_receive_fd = fds[0];
threads[i].notify_send_fd = fds[1];
setup_thread(&threads[i]);
/* Reserve three fds for the libevent base, and two for the pipe */
stats_state.reserved_fds += 5;
}
/* Create threads after we've done all the libevent setup. */
for (i = 0; i < nthreads; i++) {
create_worker(worker_libevent, &threads[i]);
}
/* Wait for all the threads to set themselves up before returning. */
pthread_mutex_lock(&init_lock);
wait_for_thread_registration(nthreads);
pthread_mutex_unlock(&init_lock);
}
|
#
# PySNMP MIB module Nortel-Magellan-Passport-TextualConventionsMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-Magellan-Passport-TextualConventionsMIB
# Produced by pysmi-0.3.4 at Wed May 1 14:26:10 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint")
Integer32, Unsigned32 = mibBuilder.importSymbols("Nortel-Magellan-Passport-StandardTextualConventionsMIB", "Integer32", "Unsigned32")
passportMIBs, = mibBuilder.importSymbols("Nortel-Magellan-Passport-UsefulDefinitionsMIB", "passportMIBs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, TimeTicks, Unsigned32, IpAddress, Counter32, Bits, iso, ModuleIdentity, Gauge32, ObjectIdentity, Integer32, Counter64, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "Unsigned32", "IpAddress", "Counter32", "Bits", "iso", "ModuleIdentity", "Gauge32", "ObjectIdentity", "Integer32", "Counter64", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
textualConventionsMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2))
class Hex(Unsigned32):
pass
class Gauge64(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class Unsigned64(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class DigitString(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 1024)
class WildcardedDigitString(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 1024)
class HexString(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 1024)
class AsciiString(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 1024)
class ExtendedAsciiString(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 1024)
class DashedHexString(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 1024)
class EnterpriseDateAndTime(OctetString):
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(2, 2), ValueSizeConstraint(5, 5), ValueSizeConstraint(8, 8), ValueSizeConstraint(10, 10), ValueSizeConstraint(13, 13), ValueSizeConstraint(16, 16), ValueSizeConstraint(19, 19), )
class Link(ObjectIdentifier):
pass
class IntegerSequence(OctetString):
pass
class FixedPoint1(Unsigned32):
pass
class FixedPoint2(Unsigned32):
pass
class FixedPoint3(Unsigned32):
pass
class FixedPoint4(Unsigned32):
pass
class FixedPoint5(Unsigned32):
pass
class FixedPoint6(Unsigned32):
pass
class FixedPoint7(Unsigned32):
pass
class FixedPoint8(Unsigned32):
pass
class FixedPoint9(Unsigned32):
pass
class AsciiStringIndex(OctetString):
pass
class NonReplicated(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1))
namedValues = NamedValues(("present", 1))
textualConventionsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2, 1))
textualConventionsGroupBC = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2, 1, 3))
textualConventionsGroupBC02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2, 1, 3, 2))
textualConventionsGroupBC02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2, 1, 3, 2, 2))
textualConventionsCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2, 3))
textualConventionsCapabilitiesBC = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2, 3, 3))
textualConventionsCapabilitiesBC02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2, 3, 3, 2))
textualConventionsCapabilitiesBC02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 2, 3, 3, 2, 2))
mibBuilder.exportSymbols("Nortel-Magellan-Passport-TextualConventionsMIB", WildcardedDigitString=WildcardedDigitString, textualConventionsGroupBC=textualConventionsGroupBC, FixedPoint1=FixedPoint1, textualConventionsCapabilities=textualConventionsCapabilities, Gauge64=Gauge64, Hex=Hex, textualConventionsCapabilitiesBC02=textualConventionsCapabilitiesBC02, IntegerSequence=IntegerSequence, FixedPoint3=FixedPoint3, FixedPoint8=FixedPoint8, FixedPoint9=FixedPoint9, FixedPoint4=FixedPoint4, AsciiStringIndex=AsciiStringIndex, AsciiString=AsciiString, FixedPoint2=FixedPoint2, textualConventionsCapabilitiesBC02A=textualConventionsCapabilitiesBC02A, textualConventionsGroup=textualConventionsGroup, DashedHexString=DashedHexString, FixedPoint7=FixedPoint7, FixedPoint5=FixedPoint5, Link=Link, FixedPoint6=FixedPoint6, textualConventionsGroupBC02A=textualConventionsGroupBC02A, textualConventionsGroupBC02=textualConventionsGroupBC02, ExtendedAsciiString=ExtendedAsciiString, EnterpriseDateAndTime=EnterpriseDateAndTime, textualConventionsCapabilitiesBC=textualConventionsCapabilitiesBC, HexString=HexString, DigitString=DigitString, NonReplicated=NonReplicated, Unsigned64=Unsigned64, textualConventionsMIB=textualConventionsMIB)
|
try:
import numpy
except ImportError:
print "Numpy is not installed"
try:
import matplotlib
except ImportError:
print "Matplotlib is not installed"
try:
import scipy
except ImportError:
print "Scipy is not installed"
|
// Set up Answer
var answer = "Now that's a good question!";
module.exports.ask = function(question) {
console.log(question);
return answer;
};
|
from django.urls import path, re_path
from . import views
urlpatterns = [
path(r'dnscanner/',views.SannerDnDetailView.as_view({"get":"list"}),name="dnscanner"),
path(r'list/', views.DnListViewSet.as_view({"get": "list", "post": "create"}), name="dnlist"),
re_path(r'^list/(?P<pk>\d+)/$', views.DnListViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="dnlist_1"),
path(r'detail/', views.DnDetailViewSet.as_view({"get": "list", "post": "create", 'put': 'update'}), name="dndetail"),
re_path(r'^detail/(?P<pk>\d+)/$', views.DnDetailViewSet.as_view({
'get': 'retrieve',
'delete': 'destroy'
}), name="dndetail_1"),
re_path(r'^viewprint/(?P<pk>\d+)/$', views.DnViewPrintViewSet.as_view({
'get': 'retrieve',
}), name="dnviewprint_1"),
re_path(r'^neworder/(?P<pk>\d+)/$', views.DnNewOrderViewSet.as_view({
'post': 'create',
}), name="preloadid_1"),
path(r'orderrelease/', views.DnOrderReleaseViewSet.as_view({"post": "create"}), name="orderrelease"),
re_path(r'^orderrelease/(?P<pk>\d+)/$', views.DnOrderReleaseViewSet.as_view({
'put': 'update',
}), name="orderrelease_1"),
path(r'pickinglistfilter/', views.DnPickingListFilterViewSet.as_view({"get": "list"}), name="pickinglistfilter"),
re_path(r'^pickinglist/(?P<pk>\d+)/$', views.DnPickingListViewSet.as_view({
'get': 'retrieve',
}), name="pickinglist_1"),
path(r'picked/', views.DnPickedViewSet.as_view({'put': 'update'}), name="picked"),
re_path(r'^picked/(?P<pk>\d+)/$', views.DnPickedViewSet.as_view({
'post': 'create',
}), name="picked_1"),
re_path(r'^dispatch/(?P<pk>\d+)/$', views.DnDispatchViewSet.as_view({
'post': 'create',
}), name="dispatch_1"),
re_path(r'^pod/(?P<pk>\d+)/$', views.DnPODViewSet.as_view({
'post': 'create',
}), name="pod_1"),
path(r'filelist/', views.FileListDownloadView.as_view({"get": "list"}), name="dnfilelistdownload"),
path(r'filedetail/', views.FileDetailDownloadView.as_view({"get": "list"}), name="dnfiledetaildownload"),
]
|
define("ace/snippets/visualforce",["require","exports","module"], function(require, exports, module) {
"use strict";
exports.snippetText =undefined;
exports.scope = "visualforce";
}); (function() {
window.require(["ace/snippets/visualforce"], function(m) {
if (typeof module == "object" && typeof exports == "object" && module) {
module.exports = m;
}
});
})();
|
import json
import csv
from tqdm import tqdm, trange
import argparse
import random
import math
import sys
import numpy as np
import os
import pdb
import ast
import copy
import codecs
import sys
csv.field_size_limit(sys.maxsize)
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BartTokenizer, AdamW, get_linear_schedule_with_warmup
from transformers import BartConfig
from doc_gen_bart_decoder_prefix import MultiHeadBartForConditionalGeneration
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
from metrics import evaluate_nq
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a input_ids with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
class GenerationInputExample(object):
def __init__(self, guid, source, target, context=None):
self.guid = guid
self.source = source
self.target = target
self.context = context
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures:
def __init__(self,
example_index,
source_ids,
source_mask,
source_len,
target_ids,
target_labels,
target_len,
doc_ids,
doc_mask,
doc_len):
self.example_index = example_index
self.source_ids = source_ids
self.source_mask = source_mask
self.source_len = source_len
self.target_ids = target_ids
self.target_labels = target_labels
self.target_len = target_len
self.doc_ids = doc_ids
self.doc_mask = doc_mask
self.doc_len = doc_len
class MultiBartQA:
def __init__(self, args=None):
if args:
self.args = args
else:
self.args = self.parse_args()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cpu = torch.device("cpu")
self.generator = MultiHeadBartForConditionalGeneration.from_pretrained_multi(self.args, self.args.model_file_path)
self.generator.to(self.device)
self.tokenizer = BartTokenizer.from_pretrained(self.args.model_name) # Need to add base to "tokenization_bart.py" when using transformers==2.11.0
def save(self, num_updates):
model_to_save = (
self.generator.module if hasattr(self.generator, "module") else self.generator
)
checkpoint = {
'model': model_to_save.state_dict(),
'optimizer': self.get_optimizer(),
'args': self.args
}
output_dir = os.path.join(self.args.output_dir, f"checkpoint-{num_updates}")
os.makedirs(output_dir, exist_ok=True)
torch.save(checkpoint, os.path.join(output_dir, 'model.pt'))
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument("--seed",
default=42,
type=int,
help="Random seed")
parser.add_argument("--model_type",
default='doha',
type=str,
help="doc-attn/doha/doc-enc/doc-enc-fr")
parser.add_argument("--model_name",
default='facebook/bart-large',
type=str,
help="BART model")
parser.add_argument('--data_dir',
type = str,
default = 'cmu_data/',
help = 'path to data_dir')
parser.add_argument('--output_dir',
type = str,
default = 'trained_models/',
help = 'path to save the model')
parser.add_argument('--log_file_path',
type = str,
default = './log.txt',
help = 'Log file')
parser.add_argument('--model_file_path',
type = str,
default = './pytorch_model.bin',
help = 'Model file')
parser.add_argument("--source_max_len",
default=512,
type=int,
help="Max len of source")
parser.add_argument("--target_max_len",
default=128,
type=int,
help="Max len of target")
parser.add_argument("--train_batch_size",
default=2,
type=int,
help="Total batch size for training.")
parser.add_argument("--validation_timing",
default=1000,
type=int,
help="Check dev score after every N updates")
parser.add_argument("--eval_batch_size",
default=16,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=25.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=8,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--max_grad_norm', help='gradient clipping for Max gradient norm.', required=False, default=1.0,
type=float)
parser.add_argument("--do_train",
action='store_true',
help="Flag to indicate whether to train or not")
parser.add_argument("--do_eval",
action='store_true',
help="Flag to indicate whether to train or not")
parser.add_argument("--do_generate",
action='store_true',
help="Flag to indicate whether to train or not")
parser.add_argument('--experiment_type',
type = str,
default = 'chat_context',
help = 'Type of input to be fed. Options are '
'[doc_only | chat_document | chat_wizard]')
return parser.parse_args()
def load_examples(self, data_dir, filename):
examples = []
with codecs.open(data_dir + filename, 'r', 'utf-8') as inp:
spam = csv.reader(inp, delimiter='\t')
for row in spam:
guid = row[0]
source = row[1]
target = row[2]
context = row[3]
examples.append(GenerationInputExample(
guid=guid,
source=source,
target=target,
context=context
))
return examples
def convert_examples_to_features(self, examples):
config = self.generator.model.config
features = []
index = 0
for e in tqdm(examples, desc='Examples'):
# Process source information
source = 'chat: ' + e.source
source_tokens = self.tokenizer.tokenize(source)[:self.args.source_max_len-2]
source_ids = [config.bos_token_id] + self.tokenizer.convert_tokens_to_ids(source_tokens) + [config.eos_token_id] # <s> ... </s>
source_len = len(source_ids)
source_mask = [1] * source_len
padding_len = self.args.source_max_len - source_len
source_ids += ([config.pad_token_id] * padding_len)
source_mask += ([0] * padding_len)
assert len(source_ids) == self.args.source_max_len
assert len(source_mask) == self.args.source_max_len
if self.args.experiment_type == 'doc_only':
document = 'document: ' + e.context
elif self.args.experiment_type == 'chat_document':
document = 'chat: ' + e.source + 'document: ' + e.context
elif self.args.experiment_type == 'chat_wizard':
context = e.context
context = ast.literal_eval(context)
all_docs = ''
for doc in context:
title = list(doc.keys())[0]
passage = ' '.join(doc[title])
all_docs = all_docs + ' title: ' + title + ' text: ' + passage
document = 'chat: ' + e.source + ' document: ' + all_docs
else:
print('Unrecongnized argument for experiment type')
doc_tokens = self.tokenizer.tokenize(document)[:self.args.source_max_len-2]
doc_ids = [config.bos_token_id] + self.tokenizer.convert_tokens_to_ids(doc_tokens) + [config.eos_token_id] # <s> ... </s>
doc_len = len(doc_ids)
doc_mask = [1] * doc_len
padding_len = self.args.source_max_len - doc_len
doc_ids += ([config.pad_token_id] * padding_len)
doc_mask += ([0] * padding_len)
assert len(doc_ids) == self.args.source_max_len
assert len(doc_mask) == self.args.source_max_len
# Process target information
answer = e.target
answer_tokens = self.tokenizer.tokenize(answer)[:self.args.target_max_len-1] # -1 for <s> or </s>
if len(answer_tokens) == 0:
print(e.source, e.context, e.target)
continue
target_ids = [config.bos_token_id] + self.tokenizer.convert_tokens_to_ids(answer_tokens) # <s> ...
target_labels = self.tokenizer.convert_tokens_to_ids(answer_tokens) + [config.eos_token_id] # ... </s>
target_len = len(target_ids)
padding_len = self.args.target_max_len - target_len
target_ids += ([config.pad_token_id] * padding_len)
target_labels += ([-100] * padding_len) # -100 is the default index to be ignored
assert len(target_ids) == self.args.target_max_len
assert len(target_labels) == self.args.target_max_len
f = InputFeatures(
index,
source_ids,
source_mask,
source_len,
target_ids,
target_labels,
target_len,
doc_ids,
doc_mask,
doc_len
)
features.append(f)
index += 1
return features
def init_seed(self):
random.seed(self.args.seed)
np.random.seed(self.args.seed)
torch.manual_seed(self.args.seed)
if torch.cuda.is_available:
torch.cuda.manual_seed(self.args.seed)
def get_optimizer(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in self.generator.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": 0.01},
{"params": [p for n, p in self.generator.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0},
]
return AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
def get_train_dataloader(self,
train_features,
train_batch_size):
all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long)
all_source_len = torch.tensor([f.source_len for f in train_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)
all_target_labels = torch.tensor([f.target_labels for f in train_features], dtype=torch.long)
all_target_len = torch.tensor([f.target_len for f in train_features], dtype=torch.long)
all_doc_ids = torch.tensor([f.doc_ids for f in train_features], dtype=torch.long)
all_doc_mask = torch.tensor([f.doc_mask for f in train_features], dtype=torch.long)
all_doc_len = torch.tensor([f.doc_len for f in train_features], dtype=torch.long)
train_data = TensorDataset(
all_source_ids,
all_source_mask,
all_source_len,
all_target_ids,
all_target_labels,
all_target_len,
all_doc_ids,
all_doc_mask,
all_doc_len
)
train_sampler = RandomSampler(train_data)
return DataLoader(train_data, sampler=train_sampler, batch_size=train_batch_size)
def get_eval_dataloader(self, dev_features, dev_batch_size):
all_example_indices = torch.tensor([f.example_index for f in dev_features], dtype=torch.long)
all_source_ids = torch.tensor([f.source_ids for f in dev_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in dev_features], dtype=torch.long)
all_source_len = torch.tensor([f.source_len for f in dev_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in dev_features], dtype=torch.long)
all_target_labels = torch.tensor([f.target_labels for f in dev_features], dtype=torch.long)
all_target_len = torch.tensor([f.target_len for f in dev_features], dtype=torch.long)
all_doc_ids = torch.tensor([f.doc_ids for f in dev_features], dtype=torch.long)
all_doc_mask = torch.tensor([f.doc_mask for f in dev_features], dtype=torch.long)
all_doc_len = torch.tensor([f.doc_len for f in dev_features], dtype=torch.long)
eval_data = TensorDataset(
all_example_indices,
all_source_ids,
all_source_mask,
all_source_len,
all_target_ids,
all_target_labels,
all_target_len,
all_doc_ids,
all_doc_mask,
all_doc_len
)
eval_sampler = SequentialSampler(eval_data)
return DataLoader(eval_data, sampler=eval_sampler, batch_size=dev_batch_size)
def get_train_batch_data(self, batch):
batch_source_max_len = batch[2].max().item()
batch_target_max_len = batch[5].max().item()
batch_doc_max_len = batch[8].max().item()
batch_total_tokens = batch[5].sum().item()
batch = tuple(t.to(self.device) for t in batch)
source_ids, source_mask, _, target_ids, target_labels, _, doc_ids, doc_mask, _ = batch
source_ids = source_ids[:, :batch_source_max_len]
source_mask = source_mask[:, :batch_source_max_len]
doc_ids = doc_ids[:, :batch_doc_max_len]
doc_mask = doc_mask[:, :batch_doc_max_len]
target_ids = target_ids[:, :batch_target_max_len]
target_labels = target_labels[:, :batch_target_max_len].contiguous()
return source_ids, source_mask, target_ids, target_labels, doc_ids, doc_mask, batch_total_tokens
def get_eval_batch_data(self, batch):
example_indices = batch[0].tolist()
batch_source_max_len = batch[3].max().item()
batch_target_max_len = batch[6].max().item()
batch_total_tokens = batch[6].sum().item()
batch_doc_max_len = batch[9].max().item()
batch = tuple(t.to(self.device) for t in batch)
_, source_ids, source_mask, __, target_ids, target_labels, _, doc_ids, doc_mask, _ = batch
source_ids = source_ids[:, :batch_source_max_len]
source_mask = source_mask[:, :batch_source_max_len]
doc_ids = doc_ids[:, :batch_doc_max_len]
doc_mask = doc_mask[:, :batch_doc_max_len]
target_ids = target_ids[:, :batch_target_max_len]
target_labels = target_labels[:, :batch_target_max_len].contiguous()
return example_indices, source_ids, source_mask, target_ids, target_labels, doc_ids, doc_mask, batch_total_tokens
def encode(self, source_ids, source_mask, doc_ids, doc_mask):
# (B, N, L) -> (B*N, L) -> (B*N, L, D) -> (B, N*L, D) --> (B, N, L, D)
# [(B, L1), (B, L2)] --> [(B, L1, D), (B, L2, D)]
# (B, N, L) -> (B*N, L) -> (B*N, L, D) -> (B, N*L, D) --> Aggregate[(B, N*L, V) + (B, L, V)] --> (B, L, V)
# (B, N, L) -> (B*N, L) -> (B, N*L)
source_reps = self.generator.model.encoder(
input_ids=source_ids,
attention_mask=source_mask
)
source_reps = source_reps[0]
if self.args.model_type.find('doc-enc') >= 0:
doc_encoder = self.generator.model.doc_encoder
else:
doc_encoder = self.generator.model.encoder
doc_reps_prefix = doc_encoder(input_ids=doc_ids, attention_mask=doc_mask,
token_mask = torch.triu(fill_with_neg_inf(torch.zeros(doc_mask.shape[-1], doc_mask.shape[-1])), 1).to(dtype=torch.int, device=doc_ids.device))
doc_reps_suffix = doc_encoder(input_ids=doc_ids, attention_mask=doc_mask,
token_mask = torch.tril(fill_with_neg_inf(torch.zeros(doc_mask.shape[-1], doc_mask.shape[-1])), 0).to(dtype=torch.int, device=doc_ids.device))
return source_reps, (doc_reps_prefix[0], doc_reps_suffix[0])
def train(self):
self.init_seed()
cached_features_devfile = os.path.join(
self.args.data_dir,
"cached_Bart_{}_DoHA_task_{}_dev_srcLen{}_tgtLen{}".format(
self.tokenizer.__class__.__name__,
self.args.experiment_type,
str(self.args.source_max_len),
str(self.args.target_max_len),
),
)
dev_examples = self.load_examples(self.args.data_dir, 'dev.tsv')
if os.path.exists(cached_features_devfile):
dev_features = torch.load(cached_features_devfile)
else:
dev_features = self.convert_examples_to_features(dev_examples)
torch.save(dev_features, cached_features_devfile)
dev_data = (dev_examples, dev_features)
cached_features_trainfile = os.path.join(
self.args.data_dir,
"cached_Bart_{}_DoHA_task_{}_train_srcLen{}_tgtLen{}".format(
self.tokenizer.__class__.__name__,
self.args.experiment_type,
str(self.args.source_max_len),
str(self.args.target_max_len),
),
)
if os.path.exists(cached_features_trainfile):
train_features = torch.load(cached_features_trainfile)
else:
train_examples = self.load_examples(self.args.data_dir, 'train.tsv')
train_features = self.convert_examples_to_features(train_examples)
torch.save(train_features, cached_features_trainfile)
train_batch_size = int(self.args.train_batch_size / self.args.gradient_accumulation_steps)
num_train_steps = int(len(train_features) / train_batch_size / self.args.gradient_accumulation_steps * self.args.num_train_epochs)
optimizer = self.get_optimizer()
t_total = num_train_steps
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(t_total * self.args.warmup_proportion), num_training_steps=t_total)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_features))
logger.info(" Batch size = %d", train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
train_dataloader = self.get_train_dataloader(train_features, train_batch_size)
self.generator.zero_grad()
self.generator.train()
num_updates = 0
curr_loss, curr_total_words = 0, 0
if self.args.log_file_path is not None:
f_log = open(self.args.log_file_path, 'w')
else:
f_log = None
for epoch in trange(int(self.args.num_train_epochs), desc="Epoch"):
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
source_ids, source_mask, target_ids, target_labels, doc_ids, doc_mask, batch_total_tokens = self.get_train_batch_data(batch)
source_reps, doc_reps = self.encode(source_ids, source_mask, doc_ids, doc_mask)
outputs = self.generator(input_ids=None,
attention_mask=(source_mask, doc_mask),
encoder_outputs=(source_reps, doc_reps),
decoder_input_ids=target_ids,
lm_labels=target_labels,
labels=target_labels)
loss = outputs[0]
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
curr_loss += (loss.item()*batch_total_tokens)
curr_total_words += batch_total_tokens
loss.backward()
if (step + 1) % self.args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(self.generator.parameters(), self.args.max_grad_norm)
optimizer.step()
scheduler.step()
self.generator.zero_grad()
num_updates += 1
if (num_updates+1) % 10 == 0:
train_stat_curr = {
'step': step,
'num_updates': num_updates,
'epoch': epoch,
'loss': loss.item(),
'train_ppl': math.exp(min(curr_loss/curr_total_words, 100))
}
print(str(train_stat_curr))
sys.stdout.flush()
curr_loss, curr_total_words = 0, 0
if num_updates % self.args.validation_timing == 0:
results = self.evaluate(dev_data)
results["steps"] = step
results["num_updates"] = num_updates
if f_log is not None:
f_log.write(str(results))
f_log.write('\n')
f_log.flush()
self.save(num_updates)
if f_log is not None:
f_log.close()
def predict(self, dev_data):
dev_examples, dev_features = dev_data
eval_dataloader = self.get_eval_dataloader(dev_features, self.args.eval_batch_size)
self.generator.eval()
pred = [None] * len(dev_examples)
total_eval_loss, total_words = 0, 0
for batch in tqdm(eval_dataloader, desc="Generating"):
example_indices, source_ids, source_mask, target_ids, \
target_labels, doc_ids, doc_mask, batch_total_tokens = self.get_eval_batch_data(batch)
with torch.no_grad():
source_reps, doc_reps = self.encode(source_ids, source_mask, doc_ids, doc_mask)
outputs = self.generator(input_ids=None,
attention_mask=(source_mask, doc_mask),
encoder_outputs=(source_reps, doc_reps),
decoder_input_ids=target_ids,
lm_labels=target_labels,
labels=target_labels)
loss = outputs[0]
total_eval_loss += (loss.item()*batch_total_tokens)
total_words += batch_total_tokens
predicted_ids = self.generator.generate(
input_ids=source_mask,
attention_mask=(source_mask, doc_mask),
encoder_outputs=(source_reps, doc_reps),
num_beams=1,
max_length=self.args.target_max_len,
early_stopping=True,
do_sample=True,
temperature=1.0,
top_k=0,
top_p=0.9,
)
predicted_ids = predicted_ids.to(self.cpu)
for i in range(len(example_indices)):
if pred[example_indices[i]] is not None:
continue
answer = self.tokenizer.decode(
predicted_ids[i].tolist(),
skip_special_tokens=True,
clean_up_tokenization_spaces=False
)
pred[example_indices[i]] = answer
self.generator.train()
return pred, total_eval_loss, total_words
def evaluate_file(self, eval_file='dev.tsv', save_file=True):
examples = self.load_examples(self.args.data_dir, eval_file)
features = self.convert_examples_to_features(examples)
pred, total_eval_loss, total_words = self.predict((examples, features))
results = evaluate_nq(examples, pred, total_eval_loss, total_words)
if save_file:
with codecs.open(self.args.output_dir + f'{eval_file}_predictions.txt', 'w', 'utf-8') as out:
for p in pred:
p = self.clean_text(p)
out.write(p + '\n')
with codecs.open(self.args.output_dir + f'{eval_file}_reference.txt', 'w', 'utf-8') as out:
for example in examples:
target = self.clean_text(example.target)
out.write(target + '\n')
return results
def evaluate(self, dev_data=None, save_file=False):
if dev_data is None:
cached_features_devfile = os.path.join(
self.args.data_dir,
"cached_Bart_{}_DoHA_task_{}_dev_srcLen{}_tgtLen{}".format(
self.tokenizer.__class__.__name__,
self.args.experiment_type,
str(self.args.source_max_len),
str(self.args.target_max_len),
),
)
dev_examples = self.load_examples(self.args.data_dir, 'dev.tsv')
if os.path.exists(cached_features_devfile):
dev_features = torch.load(cached_features_devfile)
else:
dev_features = self.convert_examples_to_features(dev_examples)
torch.save(dev_features, cached_features_devfile)
else:
dev_examples, dev_features = dev_data
pred, total_eval_loss, total_words = self.predict((dev_examples, dev_features))
results = evaluate_nq(dev_examples, pred, total_eval_loss, total_words)
if save_file:
with codecs.open(self.args.output_dir + 'dev_predictions.txt', 'w', 'utf-8') as out:
for p in pred:
p = self.clean_text(p)
out.write(p + '\n')
with codecs.open(self.args.output_dir + 'dev_reference.txt', 'w', 'utf-8') as out:
for example in dev_examples:
target = self.clean_text(example.target)
out.write(target + '\n')
return results
def clean_text(self, text):
text = ' '.join(text.split('\n'))
text = ' '.join(text.split('\t'))
text = ' '.join(text.split())
return text
def generate(self):
if self.args.experiment_type == 'chat_wizard':
self.generate_wizard()
else:
cached_features_testfile = os.path.join(
self.args.data_dir,
"cached_Bart_{}_DoHA_task_{}_test_srcLen{}_tgtLen{}".format(
self.tokenizer.__class__.__name__,
self.args.experiment_type,
str(self.args.source_max_len),
str(self.args.target_max_len),
),
)
test_examples = self.load_examples(self.args.data_dir, 'test.tsv')
if os.path.exists(cached_features_testfile):
test_features = torch.load(cached_features_testfile)
else:
test_features = self.convert_examples_to_features(test_examples)
torch.save(test_features, cached_features_testfile)
pred, total_eval_loss, total_words = self.predict((test_examples, test_features))
with codecs.open(self.args.output_dir + 'predictions.txt', 'w', 'utf-8') as out:
for p in pred:
p = self.clean_text(p)
out.write(p + '\n')
with codecs.open(self.args.output_dir + 'reference.txt', 'w', 'utf-8') as out:
for example in test_examples:
target = self.clean_text(example.target)
out.write(target + '\n')
results = evaluate_nq(test_examples, pred, total_eval_loss, total_words)
print(str(results))
def generate_wizard(self):
cached_features_testfile = os.path.join(
self.args.data_dir,
"cached_Bart_{}_DoHA_task_{}_test_seen_srcLen{}_tgtLen{}".format(
self.tokenizer.__class__.__name__,
self.args.experiment_type,
str(self.args.source_max_len),
str(self.args.target_max_len),
),
)
test_examples = self.load_examples(self.args.data_dir, 'test_seen.tsv')
if os.path.exists(cached_features_testfile):
test_features = torch.load(cached_features_testfile)
else:
test_features = self.convert_examples_to_features(test_examples)
torch.save(test_features, cached_features_testfile)
pred, total_eval_loss, total_words = self.predict((test_examples, test_features))
with codecs.open(self.args.output_dir + 'predictions_seen.txt', 'w', 'utf-8') as out:
for p in pred:
p = self.clean_text(p)
out.write(p + '\n')
with codecs.open(self.args.output_dir + 'reference_seen.txt', 'w', 'utf-8') as out:
for example in test_examples:
target = self.clean_text(example.target)
out.write(target + '\n')
with codecs.open(self.args.output_dir + 'all_results_seen.csv', 'w', 'utf-8') as out:
writer_ = csv.writer(out, delimiter=',')
for i in range(len(pred)):
writer_.writerow([i, test_examples[i].target, pred[i]])
results = evaluate_nq(test_examples, pred, total_eval_loss, total_words)
print(str(results))
cached_features_testfile = os.path.join(
self.args.data_dir,
"cached_Bart_{}_DoHA_task_{}_test_unseen_srcLen{}_tgtLen{}".format(
self.tokenizer.__class__.__name__,
self.args.experiment_type,
str(self.args.source_max_len),
str(self.args.target_max_len),
),
)
test_examples = self.load_examples(self.args.data_dir, 'test_unseen.tsv')
if os.path.exists(cached_features_testfile):
test_features = torch.load(cached_features_testfile)
else:
test_features = self.convert_examples_to_features(test_examples)
torch.save(test_features, cached_features_testfile)
pred, total_eval_loss, total_words = self.predict((test_examples, test_features))
with codecs.open(self.args.output_dir + 'predictions_unseen.txt', 'w', 'utf-8') as out:
for p in pred:
p = self.clean_text(p)
out.write(p + '\n')
with codecs.open(self.args.output_dir + 'reference_unseen.txt', 'w', 'utf-8') as out:
for example in test_examples:
target = self.clean_text(example.target)
out.write(target + '\n')
with codecs.open(self.args.output_dir + 'all_results_unseen.csv', 'w', 'utf-8') as out:
writer_ = csv.writer(out, delimiter=',')
for i in range(len(pred)):
writer_.writerow([i, test_examples[i].target, pred[i]])
results = evaluate_nq(test_examples, pred, total_eval_loss, total_words)
print(str(results))
def freeze_params(model):
for par in model.parameters():
par.requires_grad = False
FREEZE_MODEL_TYPES = set(['doc-attn', 'doc-enc-fr'])
def freeze_model(qa):
if qa.args.model_type in FREEZE_MODEL_TYPES:
freeze_params(qa.generator.model.shared) #Freeze embeddings
freeze_params(qa.generator.model.encoder) #Freeze encoder
#Freeze all decoder components other than encoder_doc_attn
freeze_params(qa.generator.model.decoder.embed_tokens)
freeze_params(qa.generator.model.decoder.embed_positions)
freeze_params(qa.generator.model.decoder.layernorm_embedding)
for layer in qa.generator.model.decoder.layers:
freeze_params(layer.self_attn)
freeze_params(layer.self_attn_layer_norm)
freeze_params(layer.encoder_attn)
freeze_params(layer.encoder_attn_layer_norm)
freeze_params(layer.fc1)
freeze_params(layer.fc2)
freeze_params(layer.final_layer_norm)
train_params = [
p
for p in qa.generator.parameters()
if p.requires_grad
]
logging.info(f'Model type: {qa.args.model_type} #Trainable tensors: {len(train_params)}')
class ModelArgs:
def __init__(self, model_type='doha', model_file_path='./model.bin', experiment_type='chat_document', eval_batch_size=16):
self.model_type = model_type
self.seed = 42
self.model_name = 'facebook/bart-large'
self.model_file_path = model_file_path
self.source_max_len = 1024
self.target_max_len = 128
self.experiment_type = experiment_type
self.eval_batch_size = eval_batch_size
def main():
qa = MultiBartQA()
logging.info(qa.args)
if qa.args.do_train:
freeze_model(qa)
qa.train()
elif qa.args.do_eval:
results = qa.evaluate_file('dev.tsv', save_file=True)
print('dev.tsv', str(results))
qa.evaluate_file('test.tsv', save_file=True)
print('test.tsv Done')
elif qa.args.do_generate:
qa.generate()
else:
print("Specify whether to train, eval or generate")
if __name__ == '__main__':
main()
|
import Vue from 'vue'
import Vuex from 'vuex'
Vue.use(Vuex)
export default new Vuex.Store({
state: {
// 1.list保存用户信息
list:0,
// 2.isLogin 保存用户登录状态
isLogin:false,
// 3.保存用户的书架信息
bookShelf:0,
// 4.保存用户书籍的书籍长度(书籍数量)
booki:0
},
mutations: {
// 1.用户登录成功后调用
data(state,res){
// 1.1将返回的用户数据保存在list
state.list= res.user[0];
// 1.11将返回的书架书籍保存在bookShelf,如果不是undefi的话,将长度保存在booki中
(res.book.length>0)&&((state.bookShelf = res.book)&&(state.booki=res.book.length));
// 1.2将登录状态改为true
state.isLogin = true;
},
// 2.将书籍加入到书架时,更新书架
addBook(state,res){
// 2.1将添加的书籍信息保存
state.bookShelf[state.booki] = res;
// 2.2 i++
state.booki++;
}
},
actions: {
},
getters:{
// 1.返回用户登录后的信息
result(state){
return state.list;
},
// 2.获取;用户的登录状态
isLogin(state){
return state.isLogin;
},
// 3.获取书架属性
books(state){
return state.bookShelf;
}
}
})
|
#include "evas_common.h"
#include "evas_private.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <gif_lib.h>
static Eina_Bool evas_image_load_file_head_gif(Image_Entry *ie, const char *file, const char *key, int *error) EINA_ARG_NONNULL(1, 2, 4);
static Eina_Bool evas_image_load_file_data_gif(Image_Entry *ie, const char *file, const char *key, int *error) EINA_ARG_NONNULL(1, 2, 4);
static Evas_Image_Load_Func evas_image_load_gif_func =
{
EINA_TRUE,
evas_image_load_file_head_gif,
evas_image_load_file_data_gif
};
static Eina_Bool
evas_image_load_file_head_gif(Image_Entry *ie, const char *file, const char *key __UNUSED__, int *error)
{
int fd;
GifFileType *gif;
GifRecordType rec;
int done;
int w;
int h;
int alpha;
done = 0;
w = 0;
h = 0;
alpha = -1;
#ifndef __EMX__
fd = open(file, O_RDONLY);
#else
fd = open(file, O_RDONLY | O_BINARY);
#endif
if (fd < 0)
{
*error = EVAS_LOAD_ERROR_DOES_NOT_EXIST;
return EINA_FALSE;
}
gif = DGifOpenFileHandle(fd);
if (!gif)
{
close(fd);
*error = EVAS_LOAD_ERROR_UNKNOWN_FORMAT;
return EINA_FALSE;
}
do
{
if (DGifGetRecordType(gif, &rec) == GIF_ERROR)
{
/* PrintGifError(); */
rec = TERMINATE_RECORD_TYPE;
}
if ((rec == IMAGE_DESC_RECORD_TYPE) && (!done))
{
if (DGifGetImageDesc(gif) == GIF_ERROR)
{
/* PrintGifError(); */
rec = TERMINATE_RECORD_TYPE;
}
w = gif->Image.Width;
h = gif->Image.Height;
if ((w < 1) || (h < 1) || (w > IMG_MAX_SIZE) || (h > IMG_MAX_SIZE) ||
IMG_TOO_BIG(w, h))
{
DGifCloseFile(gif);
if (IMG_TOO_BIG(w, h))
*error = EVAS_LOAD_ERROR_RESOURCE_ALLOCATION_FAILED;
else
*error = EVAS_LOAD_ERROR_GENERIC;
return EINA_FALSE;
}
done = 1;
}
else if (rec == EXTENSION_RECORD_TYPE)
{
int ext_code;
GifByteType *ext;
ext = NULL;
DGifGetExtension(gif, &ext_code, &ext);
while (ext)
{
if ((ext_code == 0xf9) && (ext[1] & 1) && (alpha < 0))
{
alpha = (int)ext[4];
}
ext = NULL;
DGifGetExtensionNext(gif, &ext);
}
}
} while (rec != TERMINATE_RECORD_TYPE);
if (alpha >= 0) ie->flags.alpha = 1;
ie->w = w;
ie->h = h;
DGifCloseFile(gif);
*error = EVAS_LOAD_ERROR_NONE;
return EINA_TRUE;
}
static Eina_Bool
evas_image_load_file_data_gif(Image_Entry *ie, const char *file, const char *key __UNUSED__, int *error)
{
int intoffset[] = { 0, 4, 2, 1 };
int intjump[] = { 8, 8, 4, 2 };
double per;
double per_inc;
int fd;
GifFileType *gif;
GifRecordType rec;
GifRowType *rows;
ColorMapObject *cmap;
DATA32 *ptr;
int done;
int last_y;
int last_per;
int w;
int h;
int alpha;
int i;
int j;
int bg;
int r;
int g;
int b;
rows = NULL;
per = 0.0;
done = 0;
last_y = 0;
last_per = 0;
w = 0;
h = 0;
alpha = -1;
#ifndef __EMX__
fd = open(file, O_RDONLY);
#else
fd = open(file, O_RDONLY | O_BINARY);
#endif
if (fd < 0)
{
*error = EVAS_LOAD_ERROR_DOES_NOT_EXIST;
return EINA_FALSE;
}
gif = DGifOpenFileHandle(fd);
if (!gif)
{
close(fd);
*error = EVAS_LOAD_ERROR_UNKNOWN_FORMAT;
return EINA_FALSE;
}
do
{
if (DGifGetRecordType(gif, &rec) == GIF_ERROR)
{
/* PrintGifError(); */
rec = TERMINATE_RECORD_TYPE;
}
if ((rec == IMAGE_DESC_RECORD_TYPE) && (!done))
{
if (DGifGetImageDesc(gif) == GIF_ERROR)
{
/* PrintGifError(); */
rec = TERMINATE_RECORD_TYPE;
}
w = gif->Image.Width;
h = gif->Image.Height;
rows = malloc(h * sizeof(GifRowType *));
if (!rows)
{
DGifCloseFile(gif);
return 0;
}
for (i = 0; i < h; i++)
{
rows[i] = NULL;
}
for (i = 0; i < h; i++)
{
rows[i] = malloc(w * sizeof(GifPixelType));
if (!rows[i])
{
DGifCloseFile(gif);
for (i = 0; i < h; i++)
{
if (rows[i])
{
free(rows[i]);
}
}
free(rows);
*error = EVAS_LOAD_ERROR_RESOURCE_ALLOCATION_FAILED;
return EINA_FALSE;
}
}
if (gif->Image.Interlace)
{
for (i = 0; i < 4; i++)
{
for (j = intoffset[i]; j < h; j += intjump[i])
{
DGifGetLine(gif, rows[j], w);
}
}
}
else
{
for (i = 0; i < h; i++)
{
DGifGetLine(gif, rows[i], w);
}
}
done = 1;
}
else if (rec == EXTENSION_RECORD_TYPE)
{
int ext_code;
GifByteType *ext;
ext = NULL;
DGifGetExtension(gif, &ext_code, &ext);
while (ext)
{
if ((ext_code == 0xf9) && (ext[1] & 1) && (alpha < 0))
{
alpha = (int)ext[4];
}
ext = NULL;
DGifGetExtensionNext(gif, &ext);
}
}
} while (rec != TERMINATE_RECORD_TYPE);
if (alpha >= 0) ie->flags.alpha = 1;
evas_cache_image_surface_alloc(ie, w, h);
if (!evas_cache_image_pixels(ie))
{
DGifCloseFile(gif);
for (i = 0; i < h; i++)
{
free(rows[i]);
}
free(rows);
*error = EVAS_LOAD_ERROR_RESOURCE_ALLOCATION_FAILED;
return EINA_FALSE;
}
bg = gif->SBackGroundColor;
cmap = (gif->Image.ColorMap ? gif->Image.ColorMap : gif->SColorMap);
ptr = evas_cache_image_pixels(ie);
per_inc = 100.0 / (((double)w) * h);
for (i = 0; i < h; i++)
{
for (j = 0; j < w; j++)
{
if (rows[i][j] == alpha)
{
r = cmap->Colors[bg].Red;
g = cmap->Colors[bg].Green;
b = cmap->Colors[bg].Blue;
*ptr++ = 0x00ffffff & RGB_JOIN(r, g, b);
}
else
{
r = cmap->Colors[rows[i][j]].Red;
g = cmap->Colors[rows[i][j]].Green;
b = cmap->Colors[rows[i][j]].Blue;
*ptr++ = ARGB_JOIN(0xff, r, g, b);
}
per += per_inc;
}
}
evas_common_image_premul(ie);
DGifCloseFile(gif);
for (i = 0; i < h; i++)
{
free(rows[i]);
}
free(rows);
*error = EVAS_LOAD_ERROR_NONE;
return EINA_TRUE;
}
static int
module_open(Evas_Module *em)
{
if (!em) return 0;
em->functions = (void *)(&evas_image_load_gif_func);
return 1;
}
static void
module_close(Evas_Module *em __UNUSED__)
{
}
static Evas_Module_Api evas_modapi =
{
EVAS_MODULE_API_VERSION,
"gif",
"none",
{
module_open,
module_close
}
};
EVAS_MODULE_DEFINE(EVAS_MODULE_TYPE_IMAGE_LOADER, image_loader, gif);
#ifndef EVAS_STATIC_BUILD_GIF
EVAS_EINA_MODULE_DEFINE(image_loader, gif);
#endif
|
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#include <u8g2_port.h>
// You may reference Drivers/drv_gpio.c for pinout
// In u8x8.h #define U8X8_USE_PINS
#define ST7920_8080_PIN_D0 31 // PB15
#define ST7920_8080_PIN_D1 30 // PB14
#define ST7920_8080_PIN_D2 29 // PB13
#define ST7920_8080_PIN_D3 28 // PB12
#define ST7920_8080_PIN_D4 38 // PC6
#define ST7920_8080_PIN_D5 39 // PC7
#define ST7920_8080_PIN_D6 40 // PC8
#define ST7920_8080_PIN_D7 41 // PC9
#define ST7920_8080_PIN_EN 15 // PA15
#define ST7920_8080_PIN_CS U8X8_PIN_NONE
#define ST7920_8080_PIN_DC 11 // PA11
#define ST7920_8080_PIN_RST 12 // PA12
void u8x8_SetPin_8Bit_8080(u8x8_t *u8x8, uint8_t d0, uint8_t d1, uint8_t d2, uint8_t d3, uint8_t d4, uint8_t d5, uint8_t d6, uint8_t d7, uint8_t wr, uint8_t cs, uint8_t dc, uint8_t reset)
{
u8x8_SetPin(u8x8, U8X8_PIN_D0, d0);
u8x8_SetPin(u8x8, U8X8_PIN_D1, d1);
u8x8_SetPin(u8x8, U8X8_PIN_D2, d2);
u8x8_SetPin(u8x8, U8X8_PIN_D3, d3);
u8x8_SetPin(u8x8, U8X8_PIN_D4, d4);
u8x8_SetPin(u8x8, U8X8_PIN_D5, d5);
u8x8_SetPin(u8x8, U8X8_PIN_D6, d6);
u8x8_SetPin(u8x8, U8X8_PIN_D7, d7);
u8x8_SetPin(u8x8, U8X8_PIN_E, wr);
u8x8_SetPin(u8x8, U8X8_PIN_CS, cs);
u8x8_SetPin(u8x8, U8X8_PIN_DC, dc);
u8x8_SetPin(u8x8, U8X8_PIN_RESET, reset);
}
static void u8g2_st7920_12864_8080_example(int argc,char *argv[])
{
u8g2_t u8g2;
// Initialization
u8g2_Setup_st7920_p_128x64_f(&u8g2, U8G2_R0, u8x8_byte_8bit_8080mode, u8x8_rt_gpio_and_delay);
u8x8_SetPin_8Bit_8080(u8g2_GetU8x8(&u8g2),
ST7920_8080_PIN_D0, ST7920_8080_PIN_D1,
ST7920_8080_PIN_D2, ST7920_8080_PIN_D3,
ST7920_8080_PIN_D4, ST7920_8080_PIN_D5,
ST7920_8080_PIN_D6, ST7920_8080_PIN_D7,
ST7920_8080_PIN_EN, ST7920_8080_PIN_CS,
ST7920_8080_PIN_DC, ST7920_8080_PIN_RST);
u8g2_InitDisplay(&u8g2);
u8g2_SetPowerSave(&u8g2, 0);
// Draw Graphics
/* full buffer example, setup procedure ends in _f */
u8g2_ClearBuffer(&u8g2);
u8g2_SetFont(&u8g2, u8g2_font_baby_tf);
u8g2_DrawStr(&u8g2, 1, 18, "U8g2 on RT-Thread");
u8g2_SendBuffer(&u8g2);
u8g2_SetFont(&u8g2, u8g2_font_unifont_t_symbols);
u8g2_DrawGlyph(&u8g2, 112, 56, 0x2603 );
u8g2_SendBuffer(&u8g2);
}
MSH_CMD_EXPORT(u8g2_st7920_12864_8080_example, st7920 12864 LCD sample);
|
#!/usr/local/bin/python3.6
"""
The first pass at importing data to Wikidata.
Journal articles are definitely eligible to be included on Wikidata, and there
is plenty of rich metadata to pick from, so we can just iterate through each
entry and do it totally automatic.
"""
import codeswitch
import json
import os
import requests
import URLtoIdentifier
from wikidataintegrator import wdi_core, wdi_login
from wikidata_credentials import *
try:
from libs.BiblioWikidata import JournalArticles
except ImportError:
raise ImportError('Did you remember to `git submodule init` '
'and `git submodule update`?')
WIKI_SESSION = wdi_login.WDLogin(user=wikidata_username, pwd=wikidata_password)
def append_identifiers(wikidata_id,
doi=None,
pmid=None,
pmcid=None,
nioshtic=None):
"""
Adds identifiers such as DOI and NIOSHTIC to an existing Wikidata item.
Reconciliation of identifiers across databases helps us root out duplicates.
@param wikidata_id: the Q-number of the Wikidata item to edit
@param doi: string; defaults to None
@param pmid: string; defaults to None
@param pmcid: string; defaults to None
@param nioshtic: string; defaults to None
"""
data = []
if doi is not None:
to_append = wdi_core.WDString(value=doi, prop_nr='P356')
data.append(to_append)
if pmid is not None:
to_append = wdi_core.WDString(value=pmid, prop_nr='P698')
data.append(to_append)
if pmcid is not None:
to_append = wdi_core.WDString(value=pmcid, prop_nr='P932')
data.append(to_append)
if nioshtic is not None:
to_append = wdi_core.WDString(value=nioshtic, prop_nr='P2880')
data.append(to_append)
append_value = ['P356', 'P698', 'P932', 'P2880']
wikidata_item = wdi_core.WDItemEngine(
wd_item_id=wikidata_id, data=data, append_value=append_value)
wikidata_item.write(WIKI_SESSION)
if doi is None:
doi = ''
if pmid is None:
pmid = ''
if pmcid is None:
pmcid = ''
if nioshtic is None:
nioshtic = ''
print(wikidata_id + '|' + doi + '|' + pmid + '|' + pmcid + '|' + nioshtic)
def process_data(nioshtic_data):
"""
The main method that kicks off the Wikidata editing. Takes a big bunch of
data and goes through it.
This does not handle the data within the NIOSHTIC columns themselves. It
just looks for identifiers and then uses those identifiers to make API calls
to the appropriate databases. The integration of NIOSHTIC content itself is
handled through a separate class.
@param nioshtic_data: dictionary with "entries" and "headers" keys
"""
for entry in nioshtic_data['entries']:
if 'NN' not in entry:
continue
# If these values are populated, they were populated via the Wikidata
# item as identified via the NIOSHTIC ID, meaning the NIOSHTIC ID is
# already there and there is already an item filled out.
if 'DOI' in entry \
or 'PubMed ID' in entry \
or 'PMCID' in entry \
or 'Wikidata' in entry \
or 'LT' not in entry:
continue
wikidata_id = []
# The "interesting" factor: When the Wikidata item is known but has none
# of those other identifiers, yet Citoid turns out a result anyway.
# Meaning that the item is missing a non-NIOSHTIC identifier.
interesting = False
if 'Wikidata' in entry:
if 'DT' in entry:
if 'chapter' not in entry['DT'] and 'abstract' not in entry['DT']:
wikidata_id.append(entry['Wikidata'])
interesting = True
else:
wikidata_id.append(entry['Wikidata'])
interesting = True
ident_block = URLtoIdentifier.convert(entry['LT'])
doi = ident_block['doi'] # string or None
pmid = ident_block['pmid'] # string or None
pmcid = ident_block['pmcid'] # string or None
if doi is not None and codeswitch.doi_to_wikidata(doi) is not None:
wikidata_id.append(codeswitch.doi_to_wikidata(doi))
if pmid is not None and codeswitch.pmid_to_wikidata(pmid) is not None:
wikidata_id.append(codeswitch.pmid_to_wikidata(pmid))
if pmcid is not None and codeswitch.pmcid_to_wikidata(pmcid) is not None:
wikidata_id.append(codeswitch.pmcid_to_wikidata(pmcid))
if interesting == True \
and (doi is not None or pmid is not None or pmcid is not None):
# wikidata_id must be defined as well
for single_wikidata_id in wikidata_id:
append_identifiers(
single_wikidata_id, doi=doi, pmid=pmid, pmcid=pmcid)
else:
if wikidata_id == []:
# No Wikidata ID was found amongst the identifiers. This means
# the item truly does not exist, best we can tell.
if doi is not None or pmid is not None or pmcid is not None:
add_data = [
wdi_core.WDItemID(value='Q60346', prop_nr='P859')
]
if 'DT' in entry:
if 'abstract' in entry['DT'] or 'book' in entry['DT'] or 'chapter' in entry['DT']:
add_data.append(
wdi_core.WDString(
value=entry['NN'], prop_nr='P2880'))
else:
add_data.append(
wdi_core.WDString(
value=entry['NN'], prop_nr='P2880'))
JournalArticles.item_creator([{
'doi': doi,
'pmcid': pmcid,
'pmid': pmid,
'data': add_data
}])
# If entry['DT'] is Abstract or Chapter, the item on that
# thing will be created separately from its container.
else:
# Citoid found a DOI/PMID/PMCID that matched with an existing
# Wikidata entry, which means the Wikidata entry exists but just
# has no assigned NIOSHTIC-ID.
if 'DT' in entry:
if 'journal article' in entry['DT'] or 'book' in entry['DT']:
for single_wikidata_id in wikidata_id:
append_identifiers(
single_wikidata_id, nioshtic=entry['NN'])
else:
for single_wikidata_id in wikidata_id:
append_identifiers(
single_wikidata_id, nioshtic=entry['NN'])
def process_file(filename):
"""
Loads a JSON file and runs it through the item create/edit methods.
@param filename: name of file to process (e.g. output.txt.json)
"""
with open(filename) as f:
nioshtic_data = json.load(f)
process_data(nioshtic_data)
print("Processed: " + filename)
def main():
"""
If this file is invoked from command line, autodiscover JSON blobs in the
raw/ subdirectory and process them.
"""
for filename in os.listdir('raw/'):
if filename.lower().endswith('.json'):
process_file('raw/' + filename)
if __name__ == '__main__':
main()
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for interacting with Google Cloud Platform APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import time
from apiclient import discovery
import apiclient.errors
# Cap exponential backoff for polling APIs
MAX_POLL_INTERVAL_SECS = 15
def empty_gcs_bucket(bucket_name, credentials):
"""Attempts to delete all objects in a bucket.
If concurrent object creations occur while the bucket is being
emptied, those objects may not be deleted and may cause bucket
deletion to fail.
Args:
bucket_name: a string specifying the bucket to empty
credentials: oauth2client.Credentials to be used for
authentication
"""
logging.info("Emptying GCS bucket: %s", bucket_name)
service = discovery.build('storage', 'v1', credentials=credentials)
response = service.objects().list(bucket=bucket_name).execute()
_delete_resources(bucket_name, response.get('items', []), credentials)
while 'nextPageToken' in response:
response = service.objects().list(
bucket=bucket_name, pageToken=response['nextPageToken']).execute()
_delete_resources(bucket_name, response.get('items', []), credentials)
def _delete_resources(bucket_name, resources, credentials):
"""Deletes the specified resources from the given bucket.
Resources are represented as described in
https://cloud.google.com/storage/docs/json_api/v1/objects#resource
Args:
bucket_name: a string specifying the bucket from which to
delete
resources: a list of resources
credentials: oauth2client.Credentials to be used for
authentication
"""
logging.info("Deleting %s resources.", len(resources))
service = discovery.build('storage', 'v1', credentials=credentials)
for r in resources:
try:
service.objects().delete(
bucket=bucket_name,
object=r['name']).execute()
except apiclient.errors.HttpError as e:
logging.warning('Error deleting %s: %s', r, e)
def create_gcs_bucket(bucket_name, location, project, credentials):
"""Attempts to create a Google Cloud Storage bucket.
Args:
bucket_name: a string specifying the name of the bucket to
create
location: a string specifying the location where the bucket
should be allocated. See
https://cloud.google.com/storage/docs/bucket-locations
for an authoritative list of values.
project: a string specifying the GCP project in which to create
the bucket
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if a bucket named bucket_name was successfully created, False
otherwise. Note that False will be returned if there was already a
bucket with the provided bucket_name.
"""
service = discovery.build('storage', 'v1', credentials=credentials)
body = {'name': bucket_name, 'location': location}
try:
service.buckets().insert(project=project, body=body).execute()
logging.info('Created GCS bucket gs://%s', bucket_name)
return True
except apiclient.errors.HttpError as e:
logging.warn('Failed to create GCS bucket gs://%s. %s', bucket_name, e)
return False
def delete_gcs_bucket(bucket_name, credentials, force=False):
"""Attempts to delete a Google Cloud Storage bucket.
The REST API doesn't allow for deletion of non-empty buckets;
use force=True to attempt to empty the bucket prior to deletion.
If concurrent object creations occur while the bucket is being
emptied, those objects may not be deleted and may cause bucket
deletion to fail.
Args:
bucket_name: a string specifying the name of the bucket to
delete
credentials: oauth2client.Credentials to be used for
authentication
force: a boolean specifying whether or not to attempt to empty
the bucket prior to deletion.
Returns:
True if a bucket named bucket_name was successfully deleted, False
otherwise.
"""
if force:
empty_gcs_bucket(bucket_name, credentials)
service = discovery.build('storage', 'v1', credentials=credentials)
try:
resp = service.buckets().delete(bucket=bucket_name).execute()
# An empty response indicates a successful deletion.
# https://cloud.google.com/storage/docs/json_api/v1/buckets/delete
return not bool(resp)
except apiclient.errors.HttpError as e:
logging.warn('Error deleting GCS bucket %s: %s', bucket_name, e)
return False
def create_gke_cluster(cluster_name, project, zone, credentials):
"""Tries to create a GKE cluster.
TODO(wwlian): Expose more of the node pool's configuration as
needed.
Args:
cluster_name: string specifying the desired cluster name
project: a string specifying the GCP project in which to create
the cluster
zone: string specifying the GCE zone in which to create the
cluster
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if a new cluster with the provided name has been created,
False otherwise. Note that False will be returned if a cluster
with the provided cluster_name already existed.
"""
service = discovery.build('container', 'v1', credentials=credentials)
cluster_body = {
'name': cluster_name,
'zone': zone,
'network': 'default',
'loggingService': 'logging.googleapis.com',
'monitoringService': 'none',
'subnetwork': 'default',
'nodePools': [{
'initialNodeCount': 3,
'config': {
'machineType': 'n1-standard-1',
'imageType': 'GCI',
'diskSizeGb': 100,
'oauthScopes': [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/sqlservice.admin',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/servicecontrol',
'https://www.googleapis.com/auth/service.management.'
'readonly',
'https://www.googleapis.com/auth/trace.append',
'https://www.googleapis.com/auth/source.read_only',
'https://www.googleapis.com/auth/cloud-platform'
]
},
'autoscaling': {
'enabled': False
},
'management': {
'autoUpgrade': False,
'autoRepair': False,
'upgradeOptions': {}
},
'name': 'default-pool'
}],
'masterAuth': {
'username': 'admin'
}
}
request = service.projects().zones().clusters().create(
projectId=project, zone=zone, body={'cluster': cluster_body})
logging.info('Waiting for GKE cluster creation: %s', cluster_name)
if not _wait_for_operation(request,
_gke_op_poller_factory(service, project, zone)):
logging.warn('GKE cluster creation failed: %s', cluster_name)
return False
# Verify creation by tring to retrieve cluster info.
request = service.projects().zones().clusters().get(
projectId=project, zone=zone, clusterId=cluster_name)
try:
request.execute()
logging.info('Created GKE cluster: %s', cluster_name)
return True
except apiclient.errors.HttpError as e:
logging.warn(str(e))
return False
def delete_gke_cluster(cluster_name, project, zone, credentials):
"""Attempts to delete a GKE cluster.
Args:
cluster_name: A string specifying the cluster to delete
project: a string specifying the GCP project in which the
cluster resides
zone: The zone from which to delete the cluster
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if the specified cluster was successfully deleted from the
specified zone; False otherwise.
"""
service = discovery.build('container', 'v1', credentials=credentials)
# If the cluster is in the process of being provisioned, we have to wait
# until it is up and running before we can initiate deletion.
request = service.projects().zones().clusters().get(
projectId=project, zone=zone, clusterId=cluster_name)
while True:
try:
cluster = request.execute()
except apiclient.errors.HttpError:
# No such cluster; this will get caught when we try to delete
# it.
break
if cluster['status'] == 'RUNNING':
break
request = service.projects().zones().clusters().delete(
projectId=project, zone=zone, clusterId=cluster_name)
if not _wait_for_operation(
request, _gke_op_poller_factory(service, project, zone)):
return False
# Verify deletion by tring to retrieve cluster info.
request = service.projects().zones().clusters().get(
projectId=project, zone=zone, clusterId=cluster_name)
try:
request.execute()
return False
except apiclient.errors.HttpError as e:
return e.resp['status'] == '404'
def create_sql_instance(instance_name, db_region, db_tier, project,
credentials):
"""Creates a Cloud SQL instance and sets its root password.
If the instance already exists, the creation step is skipped, but
the root password will still be reset.
Args:
instance_name: A string specifying the name for the new instance
db_region: A string specifying the region in which the instance
should be created
db_tier: A string specifying the database tier to create. For a
list of valid tiers and the regions in which they are
available, use 'gcloud sql tiers list'.
project: a string specifying the GCP project in which to create
the instance credentials: oauth2client.Credentials to be
used for authentication
Returns:
True if the Cloud SQL instance was successfully created, and its
root password was successfully set; False otherwise.
"""
service = discovery.build('sqladmin', 'v1beta4', credentials=credentials)
request = service.instances().insert(
project=project,
body={
'name': instance_name,
'region': db_region,
'settings': {
'tier': db_tier,
'activationPolicy': 'ALWAYS'
}
}
)
logging.info('Waiting for Cloud SQL instance creation: %s', instance_name)
if not _wait_for_operation(request,
_cloud_sql_op_poller_factory(service, project)):
return False
# Verify creation by tring to retrieve instance info.
request = service.instances().get(project=project,
instance=instance_name)
try:
request.execute()
return True
except apiclient.errors.HttpError:
return False
def set_sql_root_password(root_pw, instance_name, project, credentials):
"""Attempts to set the root SQL password in a Cloud SQL instance.
Args:
root_pw: A string specifying the root password to set in the
Cloud SQL instance.
instance_name: A string specifying the name of the Cloud SQL
instance
project: a string specifying the GCP project in which to create
the instance
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if the instance's root password was successfully set; False
otherwise.
"""
service = discovery.build('sqladmin', 'v1beta4', credentials=credentials)
request = service.users().update(
project=project, instance=instance_name, host='%', name='root',
body={'password': root_pw})
logging.info('Waiting for Cloud SQL root password set: %s', instance_name)
return _wait_for_operation(request,
_cloud_sql_op_poller_factory(service, project))
def delete_sql_instance(instance_name, project, credentials):
"""Attempts to delete a Google Cloud SQL instance.
Args:
instance_name: A string specifying the name for the new instance
project: a string specifying the GCP project in which the
instance resides
credentials: oauth2client.Credentials to be used for
authentication
Returns:
True if this attempt to delete the instance succeeded, False
otherwise. Note that this means that this function may return
False if the instance did not exist in the first place or was
deleted concurrently
"""
service = discovery.build('sqladmin', 'v1beta4', credentials=credentials)
# If the instance is in the process of being provisioned, we have to
# wait until it is up and running before we can initiate deletion.
request = service.instances().get(project=project, instance=instance_name)
while True:
try:
instance = request.execute()
except apiclient.errors.HttpError:
# No such instance; this will get caught when we try to delete
# it.
break
if instance['state'] == 'RUNNABLE':
break
request = service.instances().delete(project=project,
instance=instance_name)
if not _wait_for_operation(
request, _cloud_sql_op_poller_factory(service, project)):
return False
# Verify deletion by tring to retrieve instance info.
request = service.instances().get(project=project,
instance=instance_name)
try:
request.execute()
return False
except apiclient.errors.HttpError as e:
return e.resp['status'] == '404'
def _wait_for_operation(request, op_poller):
"""Executes a request and waits for its operation to finish.
Args:
request: A apiclient.http.HttpRequest whose response is expected
to be an Operation.
op_poller: A function whose first argument is expected to be an
Operation. When called on an operation, op_poller should
poll the API and return an updated version of the same
Operation.
Returns:
True if request executed without raising an HttpError, False
otherwise
"""
try:
logging.debug('Executing synchronous request: %s', request.to_json())
start_time = time.time()
op = request.execute()
except apiclient.errors.HttpError as e:
logging.warn(str(e))
return False
poll_interval_secs = 1
while op['status'] != 'DONE':
time.sleep(poll_interval_secs)
logging.debug('Polling Operation: %s', op)
op = op_poller(op)
# Exponential backoff up to maximum.
poll_interval_secs = min(MAX_POLL_INTERVAL_SECS,
2 * poll_interval_secs)
duration = time.time() - start_time
logging.debug('Operation completed in %s seconds: %s',
duration, request.to_json())
return True
def _cloud_sql_op_poller_factory(service, project):
"""Creates a function that polls a Cloud SQL operation.
The value returned by a call to this function can be provided as the
op_poller argument to _wait_for_operation.
Args:
service: a apiclient.discovery.Resource object for interacting
with the Cloud SQL API. This is usually the same object used
to create the request that spawned the operation that will
be waited on.
project: a string specifying the GCP project in which the
operation will be executing
Returns:
a function that can be used as the second argument to
_wait_for_operation.
"""
def op_poller(op):
return (service.operations()
.get(project=project, operation=op['name']).execute())
return op_poller
def _gke_op_poller_factory(service, project, zone):
"""Creates a function that polls a GKE operation.
The value returned by a call to this function can be provided as the
op_poller argument to _wait_for_operation.
Args:
service: a apiclient.discovery.Resource object for interacting
with the GKE API. This is usually the same object used to
create the request that spawned the operation that will be
waited on.
project: a string specifying the GCP project in which the
operation will be executing
zone: a string specifying the GCE zone in which the operation
will be running
Returns:
a function that can be used as the second argument to
_wait_for_operation.
"""
def op_poller(op):
return (service.projects().zones().operations()
.get(projectId=project, zone=zone, operationId=op['name'])
.execute())
return op_poller
|
/**
* Theme: Dastone - Responsive Bootstrap 4 Admin Dashboard
* Author: Mannatthemes
* Form Repeater
*/
$(document).ready(function () {
'use strict';
$('.repeater-default').repeater();
$('.repeater-custom-show-hide').repeater({
show: function () {
$(this).slideDown();
},
hide: function (remove) {
if (confirm('Are you sure you want to remove this item?')) {
$(this).slideUp(remove);
}
}
});
});
|
'use strict'
const _ = require('lodash')
const utils = require('./utils')
const Hoek = require('hoek')
const generator = require('./generator')
let schema = require('./schema')
const Joi = require('joi')
const internals = {
prepareRequestParameters (definitions, scope, schema) {
const swaggerProperties = generator.createProperties(schema, definitions)
const paramsProperties = _.reduce(swaggerProperties, (memo, property, key) => {
// TODO: deal with nested query and formData parameters -> for now strip it!
if (property.$ref == null) {
property.name = key
property.in = scope
memo.push(property)
}
return memo
}, [])
return paramsProperties
},
preparePayloadSchema (definitions, schema) {
let swaggerSchema = generator.fromJoiSchema(schema, definitions)
swaggerSchema = generator.extractAsDefinition(schema, definitions, swaggerSchema)
swaggerSchema.in = 'body'
if (swaggerSchema.$ref) {
swaggerSchema.name = swaggerSchema.$ref.substr(14)
swaggerSchema.schema = _.pick(swaggerSchema, ['$ref'])
delete swaggerSchema.$ref
} else {
swaggerSchema.name = 'Payload'
swaggerSchema.schema = _.pick(swaggerSchema, ['type', 'description'])
delete swaggerSchema.type
}
return swaggerSchema
},
prepareResponseSchema (definitions, schema) {
const swaggerSchema = generator.fromJoiSchema(schema, definitions)
swaggerSchema.description = utils.getDescription(schema)
if (swaggerSchema.$ref) {
swaggerSchema.schema = {$ref: swaggerSchema.$ref}
} else if (swaggerSchema.type === 'array') {
const items = swaggerSchema.items
swaggerSchema.schema = {
type: 'array',
items: items,
description: swaggerSchema.description
}
delete swaggerSchema.type
delete swaggerSchema.items
} else {
swaggerSchema.schema = _.pick(swaggerSchema, ['type', 'description'])
}
if (!swaggerSchema.description) {
swaggerSchema.description = ''
}
delete swaggerSchema.$ref
delete swaggerSchema.type
delete swaggerSchema.required
return swaggerSchema
},
prepareResponses (definitions, pluginOptions, defaultResponseSchema, statusResponseSchema) {
let responses = {default: {description: ''}}
const pluginResponses = pluginOptions.responses || {}
responses = _.reduce(pluginResponses, (memo, pluginResponseType, key) => {
const responseType = utils.isSupportedSchema(pluginResponseType.schema) ? internals.prepareResponseSchema(definitions, pluginResponseType.schema) : {}
if (pluginResponseType.type != null) {
responseType.schema = { type: pluginResponseType.type }
}
responseType.description = pluginResponseType.description
memo[key] = responseType
return memo
}, responses)
const statusResponses = _.merge({default: defaultResponseSchema}, statusResponseSchema)
responses = _.reduce(statusResponses, (memo, responseSchema, key) => {
if (utils.isSupportedSchema(responseSchema)) {
const memoType = memo[key]
const responseType = internals.prepareResponseSchema(definitions, responseSchema)
memo[key] = memoType ? _.merge(memoType, responseType) : responseType
}
return memo
}, responses)
return responses
},
prepareTags (path, tags, settings) {
const tagging = settings.tagging
if (tagging.mode !== 'tags') {
return utils.getPathTags(path, tagging.pathLevel)
}
if (tagging.stripRequiredTags === true) {
tags = _.difference(tags, settings.requiredTags)
}
if (tagging.stripAdditionalTags && tagging.stripAdditionalTags.length) {
tags = _.difference(tags, tagging.stripAdditionalTags)
}
return tags
}
}
module.exports = function (settings, routes, tags) {
routes = utils.filterRoutesByRequiredTags(routes, settings.requiredTags)
if (settings.stripPrefix) {
routes = utils.stripRoutesPrefix(routes, settings.stripPrefix)
}
const parsedTags = utils.parseTags(tags)
if (parsedTags) {
routes = utils.filterRoutesByTagSelection(routes, parsedTags.included, parsedTags.excluded)
}
routes = _.sortBy(routes, 'path')
const routesByPath = utils.groupRoutesByPath(routes)
const definitions = {}
const paths = _.reduce(routesByPath, (pathsMemo, routes, path) => {
const operations = _.reduce(routes, (operationsMemo, route) => {
let parameters = []
let operation = {}
// Process request
Hoek.assert(route.method, 'Really? No HTTP Method?')
Hoek.assert(route.settings, 'Route settings missing')
const routeSettings = route.settings
Hoek.assert(
routeSettings.validate,
'Route settings incomplete (validate expected to be always present)'
)
const validations = routeSettings.validate
Hoek.assert(
routeSettings.plugins,
'Route settings incomplete (plugins expected to be always present)'
)
const query = validations.query
const params = validations.params
const header = validations.headers
const payload = validations.payload
if (params) {
const paramsProperties = internals.prepareRequestParameters(definitions, 'path', params)
parameters = parameters.concat(paramsProperties)
}
if (query) {
const queryProperties = internals.prepareRequestParameters(definitions, 'query', query)
parameters = parameters.concat(queryProperties)
}
if (header) {
const headerProperties = internals.prepareRequestParameters(definitions, 'header', header)
parameters = parameters.concat(headerProperties)
}
if (utils.isSupportedSchema(payload)) {
const allowedMimeType = _.get(routeSettings, 'payload.allow')
if (Hoek.intersect(allowedMimeType, ['application/x-www-form-urlencoded', 'multipart/form-data']).length > 0) {
const formProperties = internals.prepareRequestParameters(definitions, 'formData', payload)
parameters = parameters.concat(formProperties)
operation.consumes = allowedMimeType
} else {
const payloadSchema = internals.preparePayloadSchema(definitions, payload)
parameters = parameters.concat(payloadSchema)
operation.consumes = settings.consumes
}
utils.setNotEmpty(operation, 'consumes', allowedMimeType)
}
const routesPluginOptions = routeSettings.plugins['hapi-swaggered'] || {}
Joi.assert(routesPluginOptions, schema.RoutesPluginOptions)
const defaultResponseSchema = _.get(routeSettings, 'response.schema')
const statusResponseSchema = _.get(routeSettings, 'response.status')
// Process response
let responses = internals.prepareResponses(definitions, routesPluginOptions, defaultResponseSchema, statusResponseSchema)
operation.responses = responses
operation.produces = routesPluginOptions.produces || settings.produces
if (routesPluginOptions.operationId != null) {
operation.operationId = routesPluginOptions.operationId
}
if (routesPluginOptions.security != null) {
operation.security = routesPluginOptions.security
}
if (_.includes(routeSettings.tags, 'deprecated')) {
operation.deprecated = true
}
if (routesPluginOptions.custom) {
operation = _.merge(operation, routesPluginOptions.custom)
}
parameters = utils.adjustOptionalPathParams(path, parameters)
utils.setNotEmpty(
operation,
'tags',
internals.prepareTags(path, routeSettings.tags, settings)
)
utils.setNotEmpty(operation, 'parameters', parameters)
utils.setNotEmpty(operation, 'summary', routeSettings.description)
utils.setNotEmpty(operation, 'description', routeSettings.notes)
if (route.method === '*') {
_.each(settings.supportedMethods, (method) => {
operationsMemo[method] = operation
})
} else {
Hoek.assert(
_.includes(settings.supportedMethods, route.method.toLowerCase()),
`No supported http method: ${route.method}`
)
operationsMemo[route.method] = operation
}
return operationsMemo
}, {})
pathsMemo[path] = operations
return pathsMemo
}, {})
return {
paths: paths,
definitions: definitions
}
}
|
import logging
from typing import Any, AsyncIterator, Awaitable, Generator, Set
from async_generator import asynccontextmanager
import trio
from alexandria.abc import Endpoint, SessionAPI # noqa: F401
from alexandria.abc import (
Datagram,
EventAPI,
EventsAPI,
MessageAPI,
TAwaitable,
TEventPayload,
)
from alexandria.payloads import (
Advertise, Ack,
Retrieve, Chunk,
FindNodes, FoundNodes,
Locate, Locations,
Ping, Pong,
GraphGetIntroduction, GraphIntroduction,
GraphGetNode, GraphNode,
GraphInsert, GraphInserted,
GraphDelete, GraphDeleted,
)
class ReAwaitable(Awaitable[TAwaitable]):
_result: Generator[Any, None, TAwaitable]
def __init__(self, awaitable: Awaitable[TAwaitable]) -> None:
self._awaitable = awaitable
@property
def is_done(self) -> bool:
return hasattr(self, '_result')
def __await__(self) -> Generator[Any, None, TAwaitable]:
if not self.is_done:
self._result = self._awaitable.__await__()
return self._result
class Event(EventAPI[TEventPayload]):
logger = logging.getLogger('alexandria.events.Event')
_channels: Set[trio.abc.SendChannel[TEventPayload]]
def __init__(self, name: str) -> None:
self.name = name
self._lock = trio.Lock()
self._channels = set()
async def trigger(self, payload: TEventPayload) -> None:
self.logger.debug('Triggering event: %s(%s)', self.name, payload)
async with self._lock:
for send_channel in self._channels:
await send_channel.send(payload)
@asynccontextmanager
async def subscribe(self) -> AsyncIterator[trio.abc.ReceiveChannel[TEventPayload]]:
send_channel, receive_channel = trio.open_memory_channel[TEventPayload](256)
async with self._lock:
self._channels.add(send_channel)
try:
async with receive_channel:
yield receive_channel
finally:
async with self._lock:
self._channels.remove(send_channel)
class Events(EventsAPI):
def __init__(self) -> None:
self.listening: EventAPI[Endpoint] = Event('listening')
self.new_external_ip: EventAPI[Endpoint] = Event('new-external-ip')
self.session_created: EventAPI[SessionAPI] = Event('session-created')
self.session_idle: EventAPI[SessionAPI] = Event('session-idle')
self.handshake_complete: EventAPI[SessionAPI] = Event('handshake-complete')
self.handshake_timeout: EventAPI[SessionAPI] = Event('handshake-timeout')
self.datagram_sent: EventAPI[Datagram] = Event('datagram-sent')
self.datagram_received: EventAPI[Datagram] = Event('datagram-received')
self.sent_ping: EventAPI[MessageAPI[Ping]] = Event('sent-Ping')
self.sent_pong: EventAPI[MessageAPI[Pong]] = Event('sent-Pong')
self.sent_find_nodes: EventAPI[MessageAPI[FindNodes]] = Event('sent-FindNodes')
self.sent_found_nodes: EventAPI[MessageAPI[FoundNodes]] = Event('sent-FoundNodes')
self.sent_advertise: EventAPI[MessageAPI[Advertise]] = Event('sent-Advertise')
self.sent_ack: EventAPI[MessageAPI[Ack]] = Event('sent-Ack')
self.sent_locate: EventAPI[MessageAPI[Locate]] = Event('sent-Locate')
self.sent_locations: EventAPI[MessageAPI[Locations]] = Event('sent-Locations')
self.sent_retrieve: EventAPI[MessageAPI[Retrieve]] = Event('sent-Retrieve')
self.sent_chunk: EventAPI[MessageAPI[Chunk]] = Event('sent-Chunk')
self.sent_graph_get_introduction: EventAPI[MessageAPI[GraphGetIntroduction]] = Event('sent-GraphGetIntroduction') # noqa: E501
self.sent_graph_introduction: EventAPI[MessageAPI[GraphIntroduction]] = Event('sent-GraphIntroduction') # noqa: E501
self.sent_graph_get_node: EventAPI[MessageAPI[GraphGetNode]] = Event('sent-GraphGetNode')
self.sent_graph_node: EventAPI[MessageAPI[GraphNode]] = Event('sent-GraphNode')
self.sent_graph_insert: EventAPI[MessageAPI[GraphInsert]] = Event('sent-GraphInsert') # noqa: E501
self.sent_graph_inserted: EventAPI[MessageAPI[GraphInserted]] = Event('sent-GraphInserted')
self.sent_graph_delete: EventAPI[MessageAPI[GraphDelete]] = Event('sent-GraphDelete') # noqa: E501
self.sent_graph_deleted: EventAPI[MessageAPI[GraphDeleted]] = Event('sent-GraphDeleted')
|
# SPDX-License-Identifier: Apache-2.0
#
# http://nexb.com and https://github.com/nexB/scancode.io
# The ScanCode.io software is licensed under the Apache License version 2.0.
# Data generated with ScanCode.io is provided as-is without warranties.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# Data Generated with ScanCode.io is provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode.io should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
#
# ScanCode.io is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode.io for support and download.
from pathlib import Path
from unittest import mock
from django.apps import apps
from django.test import TestCase
from django.urls import reverse
from scanpipe.models import CodebaseResource
from scanpipe.models import Project
from scanpipe.tests import license_policies_index
scanpipe_app = apps.get_app_config("scanpipe")
class ScanPipeViewsTest(TestCase):
data_location = Path(__file__).parent / "data"
def setUp(self):
self.project1 = Project.objects.create(name="Analysis")
def test_scanpipe_views_project_list_is_archived(self):
project2 = Project.objects.create(name="project2", is_archived=True)
url = reverse("project_list")
url_with_filter = url + "?is_archived=true"
response = self.client.get(url)
self.assertContains(response, self.project1.name)
self.assertNotContains(response, project2.name)
self.assertContains(response, url)
self.assertContains(response, url_with_filter)
response = self.client.get(url_with_filter)
self.assertNotContains(response, self.project1.name)
self.assertContains(response, project2.name)
def test_scanpipe_views_project_details_is_archived(self):
url = self.project1.get_absolute_url()
expected1 = "WARNING: This project is archived and read-only."
expected2 = 'id="modal-archive"'
response = self.client.get(url)
self.assertNotContains(response, expected1)
self.assertContains(response, expected2)
self.project1.archive()
response = self.client.get(url)
self.assertContains(response, expected1)
self.assertNotContains(response, expected2)
@mock.patch("requests.get")
def test_scanpipe_views_project_details_add_inputs(self, mock_get):
url = self.project1.get_absolute_url()
data = {
"input_urls": "https://example.com/archive.zip",
"add-inputs-submit": "",
}
mock_get.side_effect = Exception
response = self.client.post(url, data, follow=True)
self.assertContains(response, "Input file addition error.")
mock_get.side_effect = None
mock_get.return_value = mock.Mock(
content=b"\x00",
headers={},
status_code=200,
url="url/archive.zip",
)
response = self.client.post(url, data, follow=True)
self.assertContains(response, "Input file(s) added.")
self.assertEqual(["archive.zip"], self.project1.input_files)
expected = {"archive.zip": "https://example.com/archive.zip"}
self.project1.refresh_from_db()
self.assertEqual(expected, self.project1.input_sources)
def test_scanpipe_views_project_details_missing_inputs(self):
self.project1.add_input_source(
filename="missing.zip", source="uploaded", save=True
)
url = self.project1.get_absolute_url()
response = self.client.get(url)
expected = (
'<div class="message-body">'
" The following input files are not available on disk anymore:<br>"
" - missing.zip"
"</div>"
)
self.assertContains(response, expected, html=True)
def test_scanpipe_views_project_details_add_pipelines(self):
url = self.project1.get_absolute_url()
data = {
"pipeline": "docker",
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, "Pipeline added.")
run = self.project1.runs.get()
self.assertEqual("docker", run.pipeline_name)
self.assertIsNone(run.task_start_date)
def test_scanpipe_views_project_details_compliance_alert(self):
url = self.project1.get_absolute_url()
expected = 'id="compliance_alert_chart"'
scanpipe_app.license_policies_index = None
response = self.client.get(url)
self.assertNotContains(response, expected)
scanpipe_app.license_policies_index = license_policies_index
response = self.client.get(url)
self.assertNotContains(response, expected)
CodebaseResource.objects.create(
project=self.project1,
compliance_alert="error",
type=CodebaseResource.Type.FILE,
)
response = self.client.get(url)
self.assertContains(response, expected)
|
from tensorflow.python.keras.layers import Conv2D
from tensorflow.python.keras.layers import AveragePooling2D
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.models import Sequential
def lenet():
model = Sequential()
model.add(Conv2D(filters=16, input_shape=(224,224,3), kernel_size=(5,5), strides=(1,1), padding='valid', data_format='channels_last'))
model.add(AveragePooling2D(pool_size=(2,2), strides=(2,2), padding='same', data_format='channels_last'))
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), activation='tanh'))
model.add(AveragePooling2D(pool_size=(2,2), strides=(2,2), padding='same',data_format='channels_last'))
model.add(Conv2D(filters=64, kernel_size=(5,5), strides=(1,1), activation='tanh'))
model.add(Flatten())
model.add(Dense(120, activation='tanh'))
model.add(Dense(84, activation='tanh'))
model.add(Dense(1000, activation='softmax'))
return model
|
import React from 'react';
const CurrentTemp = (props) => {
return (
<div>
<h2>
{Math.round(props.temperature)}˚F
</h2>
<div>
Feels like : {Math.round(props.apparentTemperature)}˚F
</div>
</div>
)
}
export default CurrentTemp;
|
/*!
* froala_editor v4.0.3 (https://www.froala.com/wysiwyg-editor)
* License https://froala.com/wysiwyg-editor/terms/
* Copyright 2014-2021 Froala Labs
*/
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(require('froala-editor')) :
typeof define === 'function' && define.amd ? define(['froala-editor'], factory) :
(factory(global.FroalaEditor));
}(this, (function (FE) { 'use strict';
FE = FE && FE.hasOwnProperty('default') ? FE['default'] : FE;
/**
* Traditional Chinese spoken in Taiwan.
*/
FE.LANGUAGE['zh_tw'] = {
translation: {
// Place holder
'Type something': "\u8F38\u5165\u4E00\u4E9B\u5167\u5BB9",
// Basic formatting
'Bold': '粗體',
'Italic': '斜體',
'Underline': '下劃線',
'Strikethrough': '刪除線',
// Main buttons
'Insert': '插入',
'Delete': '刪除',
'Cancel': '取消',
'OK': '好的',
'Back': '後退',
'Remove': '去掉',
'More': '更多',
'Update': '更新',
'Style': '樣式',
// Font
'Font Family': '字体系列',
'Font Size': "\u5B57\u578B\u5927\u5C0F",
// Colors
'Colors': '顏色',
'Background': '背景',
'Text': '文字',
'HEX Color': '十六進制顏色',
// Paragraphs
'Paragraph Format': '段落格式',
'Normal': '正常',
'Code': '代碼',
'Heading 1': '標題1',
'Heading 2': '標題2',
'Heading 3': '標題3',
'Heading 4': '標題4',
// Style
'Paragraph Style': '段落樣式',
'Inline Style': '內聯樣式',
// Alignment
'Align': '对齐',
'Align Left': '左對齊',
'Align Center': "\u7F6E\u4E2D\u5C0D\u9F4A",
'Align Right': "\u7F6E\u53F3\u5C0D\u9F4A",
'Align Justify': '兩端對齊',
'None': '没有',
// Lists
'Ordered List': '有序列表',
'Unordered List': '无序列表',
// Indent
'Decrease Indent': '減少縮進量',
'Increase Indent': '增加縮進量',
// Links
'Insert Link': "\u63D2\u5165\u9023\u7D50",
'Open in new tab': '在新標籤頁中打開',
'Open Link': "\u958B\u555F\u9023\u7D50",
'Edit Link': "\u7DE8\u8F2F\u9023\u7D50",
'Unlink': "\u79FB\u9664\u9023\u7D50",
'Choose Link': "\u9078\u64C7\u9023\u7D50",
// Images
'Insert Image': '插入圖片',
'Upload Image': '上傳圖片',
'By URL': "\u7DB2\u5740\u4E0A\u50B3",
'Browse': '瀏覽',
'Drop image': "\u5716\u7247\u62D6\u66F3",
'or click': '或點擊',
'Manage Images': '管理圖片',
'Loading': '加載中',
'Deleting': '刪除中',
'Tags': '標籤',
'Are you sure? Image will be deleted.': "\u78BA\u5B9A\u522A\u9664\u5716\u7247\uFF1F",
'Replace': '替換',
'Uploading': '上傳中',
'Loading image': '圖片加載中',
'Display': '顯示',
'Inline': "\u5D4C\u5165",
'Break Text': '打破文字',
'Alternative Text': '替換文字',
'Change Size': '改變大小',
'Width': '寬度',
'Height': '高度',
'Something went wrong. Please try again.': '發生錯誤,請重試。',
'Image Caption': '圖片標題',
'Advanced Edit': '高級編輯',
// Video
'Insert Video': '插入影片',
'Embedded Code': '嵌入式代码',
'Paste in a video URL': '粘貼在視頻網址',
'Drop video': '放下視頻',
'Your browser does not support HTML5 video.': '您的瀏覽器不支持html5視頻。',
'Upload Video': '上傳影片',
// Tables
'Insert Table': '插入表格',
'Table Header': '表頭',
'Remove Table': "\u522A\u9664\u8868",
'Table Style': '表格樣式',
'Horizontal Align': '水平對齊方式',
'Row': '行',
'Insert row above': '在上方插入',
'Insert row below': '在下方插入',
'Delete row': '刪除行',
'Column': '列',
'Insert column before': "\u5411\u5DE6\u63D2\u5165\u4E00\u5217",
'Insert column after': "\u5411\u53F3\u63D2\u5165\u4E00\u5217",
'Delete column': '刪除列',
'Cell': '單元格',
'Merge cells': '合併單元格',
'Horizontal split': '水平分割',
'Vertical split': '垂直分割',
'Cell Background': '單元格背景',
'Vertical Align': '垂直對齊方式',
'Top': "\u4E0A",
'Middle': "\u4E2D",
'Bottom': '底部',
'Align Top': '靠上對齊',
'Align Middle': "\u4E2D\u9593\u5C0D\u9F4A",
'Align Bottom': '靠下對齊',
'Cell Style': '單元格樣式',
// Files
'Upload File': '上傳文件',
'Drop file': '拖入文件',
// Emoticons
'Emoticons': '表情符號',
'Grinning face': "\u81C9\u4E0A\u7B11\u563B\u563B",
'Grinning face with smiling eyes': "\u7B11\u563B\u563B\u7684\u81C9\uFF0C\u542B\u7B11\u7684\u773C\u775B",
'Face with tears of joy': "\u81C9\u4E0A\u5E36\u8457\u559C\u6085\u7684\u6DDA\u6C34",
'Smiling face with open mouth': "\u7B11\u81C9\u5F35\u958B\u5634",
'Smiling face with open mouth and smiling eyes': "\u7B11\u81C9\u5F35\u958B\u5634\u5FAE\u7B11\u7684\u773C\u775B",
'Smiling face with open mouth and cold sweat': '帶冷汗的張嘴微笑',
'Smiling face with open mouth and tightly-closed eyes': "\u7B11\u81C9\u5F35\u958B\u5634\uFF0C\u7DCA\u7DCA\u9589\u8457\u773C\u775B",
'Smiling face with halo': '帶光環微笑',
'Smiling face with horns': '帶牛角的微笑',
'Winking face': "\u7728\u773C\u8868\u60C5",
'Smiling face with smiling eyes': "\u9762\u5E36\u5FAE\u7B11\u7684\u773C\u775B",
'Face savoring delicious food': "\u9762\u5C0D\u54C1\u5690\u7F8E\u5473\u7684\u98DF\u7269",
'Relieved face': '如釋重負',
'Smiling face with heart-shaped eyes': "\u5FAE\u7B11\u7684\u81C9\uFF0C\u5FC3\u81DF\u5F62\u7684\u773C\u775B",
'Smiling face with sunglasses': "\u7B11\u81C9\u592A\u967D\u93E1",
'Smirking face': "\u9762\u5C0D\u9762\u5E36\u7B11\u5BB9",
'Neutral face': '中性臉',
'Expressionless face': '无表情的脸',
'Unamused face': "\u4E00\u81C9\u4E0D\u5FEB\u7684\u81C9",
'Face with cold sweat': "\u9762\u5C0D\u51B7\u6C57",
'Pensive face': "\u6C89\u601D\u7684\u81C9",
'Confused face': "\u9762\u5C0D\u56F0\u60D1",
'Confounded face': "\u8A72\u6B7B\u7684\u81C9",
'Kissing face': '接吻的脸',
'Face throwing a kiss': '扔一个吻',
'Kissing face with smiling eyes': '带着微笑的眼睛接吻的脸',
'Kissing face with closed eyes': '閉眼接吻',
'Face with stuck out tongue': '舌头伸出来的脸',
'Face with stuck out tongue and winking eye': '眨眼吐舌\'',
'Face with stuck out tongue and tightly-closed eyes': '脸上伸出舌头和眨眼的眼睛',
'Disappointed face': '失望',
'Worried face': '担心的脸',
'Angry face': '生氣的',
'Pouting face': '撅嘴',
'Crying face': "\u54ED\u6CE3\u7684\u81C9",
'Persevering face': "\u600E\u5948\u81C9",
'Face with look of triumph': '胜利的脸',
'Disappointed but relieved face': '失望但释然的脸',
'Frowning face with open mouth': '皺眉',
'Anguished face': '痛苦的脸',
'Fearful face': '害怕',
'Weary face': "\u9762\u5C0D\u53AD\u5026",
'Sleepy face': '困了',
'Tired face': '累了',
'Grimacing face': '鬼脸',
'Loudly crying face': '大声哭泣的脸',
'Face with open mouth': '張開嘴',
'Hushed face': "\u5B89\u975C\u7684\u81C9",
'Face with open mouth and cold sweat': "\u9762\u5C0D\u5F35\u958B\u5634\uFF0C\u4E00\u8EAB\u51B7\u6C57",
'Face screaming in fear': "\u9762\u5C0D\u5C16\u53EB\u5728\u6050\u61FC\u4E2D",
'Astonished face': "\u9762\u5C0D\u9A5A\u8A1D",
'Flushed face': '臉紅',
'Sleeping face': "\u719F\u7761\u7684\u81C9",
'Dizzy face': "\u9762\u5C0D\u7729",
'Face without mouth': '沒有嘴的臉',
'Face with medical mask': "\u9762\u5C0D\u91AB\u7642\u53E3\u7F69",
// Line breaker
'Break': '換行',
// Math
'Subscript': '下標',
'Superscript': '上標',
// Full screen
'Fullscreen': '全屏',
// Horizontal line
'Insert Horizontal Line': '插入水平線',
// Clear formatting
'Clear Formatting': '清除格式',
// Save
'Save': '保存',
// Undo, redo
'Undo': '撤消',
'Redo': '重做',
// Select all
'Select All': '全選',
// Code view
'Code View': '代码视图',
// Quote
'Quote': '引用',
'Increase': "\u7E2E\u6392",
'Decrease': "\u53BB\u9664\u7E2E\u6392",
// Quick Insert
'Quick Insert': '快速插入',
// Spcial Characters
'Special Characters': '特殊字符',
'Latin': '拉丁',
'Greek': '希臘語',
'Cyrillic': '西里爾',
'Punctuation': '標點',
'Currency': '貨幣',
'Arrows': '箭頭',
'Math': '數學',
'Misc': '雜項',
// Print.
'Print': '列印',
// Spell Checker.
'Spell Checker': '拼寫檢查器',
// Help
'Help': '幫助',
'Shortcuts': '快捷鍵',
'Inline Editor': '內聯編輯器',
'Show the editor': '顯示編輯器',
'Common actions': '常用操作',
'Copy': '複製',
'Cut': '切',
'Paste': '貼上',
'Basic Formatting': '基本格式',
'Increase quote level': '增加报价水平',
'Decrease quote level': '降低报价水平',
'Image / Video': '圖像/影片',
'Resize larger': '調整大小更大',
'Resize smaller': '調整大小更小',
'Table': '表',
'Select table cell': '選擇表單元格',
'Extend selection one cell': '增加選中的單元格',
'Extend selection one row': '增加選中的行',
'Navigation': '導航',
'Focus popup / toolbar': '焦點彈出/工具欄',
'Return focus to previous position': '將焦點返回到上一個位置',
// Embed.ly
'Embed URL': '嵌入網址',
'Paste in a URL to embed': '貼上要嵌入的網址',
// Word Paste.
'The pasted content is coming from a Microsoft Word document. Do you want to keep the format or clean it up?': '粘貼的內容來自微軟Word文檔。你想保留格式還是清理它?',
'Keep': '保留',
'Clean': '清潔',
'Word Paste Detected': '檢測到貼上自 Word 的內容',
// Character Counter
'Characters': '人物',
// More Buttons
'More Text': '更多文字',
'More Paragraph': '更多段落',
'More Rich': '更多豐富',
'More Misc': '更多雜項'
},
direction: 'ltr'
};
})));
//# sourceMappingURL=zh_tw.js.map
|
var sdg=window.sdg||{};!function(i){"use strict";"function"==typeof define&&define.amd?define(["jquery"],i):"undefined"!=typeof exports?module.exports=i(require("jquery")):i(jQuery)}(function(i){"use strict";var e=window.Slick||{};e=function(){function e(e,o){var s,n,l,r=this;if(r.defaults={accessibility:!0,adaptiveHeight:!1,appendArrows:i(e),appendDots:i(e),arrows:!0,asNavFor:null,prevArrow:'<button type="button" data-role="none" class="slick-prev" aria-label="previous">Previous</button>',nextArrow:'<button type="button" data-role="none" class="slick-next" aria-label="next">Next</button>',autoplay:!1,autoplaySpeed:3e3,centerMode:!1,centerPadding:"50px",cssEase:"ease",customPaging:function(i,e){return'<button type="button" data-role="none">'+(e+1)+"</button>"},dots:!1,dotsClass:"slick-dots",draggable:!0,easing:"linear",edgeFriction:.35,fade:!1,focusOnSelect:!1,infinite:!0,initialSlide:0,lazyLoad:"ondemand",mobileFirst:!1,pauseOnHover:!0,pauseOnDotsHover:!1,respondTo:"window",responsive:null,rtl:!1,slide:"",slidesToShow:1,slidesToScroll:1,speed:500,swipe:!0,swipeToSlide:!1,touchMove:!0,touchThreshold:5,useCSS:!0,variableWidth:!1,vertical:!1,waitForAnimate:!0},r.initials={animating:!1,dragging:!1,autoPlayTimer:null,currentDirection:0,currentLeft:null,currentSlide:0,direction:1,$dots:null,listWidth:null,listHeight:null,loadIndex:0,$nextArrow:null,$prevArrow:null,slideCount:null,slideWidth:null,$slideTrack:null,$slides:null,sliding:!1,slideOffset:0,swipeLeft:null,$list:null,touchObject:{},transformsEnabled:!1},i.extend(r,r.initials),r.activeBreakpoint=null,r.animType=null,r.animProp=null,r.breakpoints=[],r.breakpointSettings=[],r.cssTransitions=!1,r.hidden="hidden",r.paused=!1,r.positionProp=null,r.respondTo=null,r.shouldClick=!0,r.$slider=i(e),r.$slidesCache=null,r.transformType=null,r.transitionType=null,r.visibilityChange="visibilitychange",r.windowWidth=0,r.windowTimer=null,s=i(e).data("slick")||{},r.options=i.extend({},r.defaults,s,o),r.currentSlide=r.options.initialSlide,r.originalSettings=r.options,n=r.options.responsive||null,n&&n.length>-1){r.respondTo=r.options.respondTo||"window";for(l in n)n.hasOwnProperty(l)&&(r.breakpoints.push(n[l].breakpoint),r.breakpointSettings[n[l].breakpoint]=n[l].settings);r.breakpoints.sort(function(i,e){return r.options.mobileFirst===!0?i-e:e-i})}"undefined"!=typeof document.mozHidden?(r.hidden="mozHidden",r.visibilityChange="mozvisibilitychange"):"undefined"!=typeof document.msHidden?(r.hidden="msHidden",r.visibilityChange="msvisibilitychange"):"undefined"!=typeof document.webkitHidden&&(r.hidden="webkitHidden",r.visibilityChange="webkitvisibilitychange"),r.autoPlay=i.proxy(r.autoPlay,r),r.autoPlayClear=i.proxy(r.autoPlayClear,r),r.changeSlide=i.proxy(r.changeSlide,r),r.clickHandler=i.proxy(r.clickHandler,r),r.selectHandler=i.proxy(r.selectHandler,r),r.setPosition=i.proxy(r.setPosition,r),r.swipeHandler=i.proxy(r.swipeHandler,r),r.dragHandler=i.proxy(r.dragHandler,r),r.keyHandler=i.proxy(r.keyHandler,r),r.autoPlayIterator=i.proxy(r.autoPlayIterator,r),r.instanceUid=t++,r.htmlExpr=/^(?:\s*(<[\w\W]+>)[^>]*)$/,r.init(),r.checkResponsive(!0)}var t=0;return e}(),e.prototype.addSlide=e.prototype.slickAdd=function(e,t,o){var s=this;if("boolean"==typeof t)o=t,t=null;else if(0>t||t>=s.slideCount)return!1;s.unload(),"number"==typeof t?0===t&&0===s.$slides.length?i(e).appendTo(s.$slideTrack):o?i(e).insertBefore(s.$slides.eq(t)):i(e).insertAfter(s.$slides.eq(t)):o===!0?i(e).prependTo(s.$slideTrack):i(e).appendTo(s.$slideTrack),s.$slides=s.$slideTrack.children(this.options.slide),s.$slideTrack.children(this.options.slide).detach(),s.$slideTrack.append(s.$slides),s.$slides.each(function(e,t){i(t).attr("data-slick-index",e)}),s.$slidesCache=s.$slides,s.reinit()},e.prototype.animateHeight=function(){var i=this;if(1===i.options.slidesToShow&&i.options.adaptiveHeight===!0&&i.options.vertical===!1){var e=i.$slides.eq(i.currentSlide).outerHeight(!0);i.$list.animate({height:e},i.options.speed)}},e.prototype.animateSlide=function(e,t){var o={},s=this;s.animateHeight(),s.options.rtl===!0&&s.options.vertical===!1&&(e=-e),s.transformsEnabled===!1?s.options.vertical===!1?s.$slideTrack.animate({left:e},s.options.speed,s.options.easing,t):s.$slideTrack.animate({top:e},s.options.speed,s.options.easing,t):s.cssTransitions===!1?(s.options.rtl===!0&&(s.currentLeft=-s.currentLeft),i({animStart:s.currentLeft}).animate({animStart:e},{duration:s.options.speed,easing:s.options.easing,step:function(i){i=Math.ceil(i),s.options.vertical===!1?(o[s.animType]="translate("+i+"px, 0px)",s.$slideTrack.css(o)):(o[s.animType]="translate(0px,"+i+"px)",s.$slideTrack.css(o))},complete:function(){t&&t.call()}})):(s.applyTransition(),e=Math.ceil(e),o[s.animType]=s.options.vertical===!1?"translate3d("+e+"px, 0px, 0px)":"translate3d(0px,"+e+"px, 0px)",s.$slideTrack.css(o),t&&setTimeout(function(){s.disableTransition(),t.call()},s.options.speed))},e.prototype.asNavFor=function(e){var t=this,o=null!==t.options.asNavFor?i(t.options.asNavFor).slick("getSlick"):null;null!==o&&o.slideHandler(e,!0)},e.prototype.applyTransition=function(i){var e=this,t={};t[e.transitionType]=e.options.fade===!1?e.transformType+" "+e.options.speed+"ms "+e.options.cssEase:"opacity "+e.options.speed+"ms "+e.options.cssEase,e.options.fade===!1?e.$slideTrack.css(t):e.$slides.eq(i).css(t)},e.prototype.autoPlay=function(){var i=this;i.autoPlayTimer&&clearInterval(i.autoPlayTimer),i.slideCount>i.options.slidesToShow&&i.paused!==!0&&(i.autoPlayTimer=setInterval(i.autoPlayIterator,i.options.autoplaySpeed))},e.prototype.autoPlayClear=function(){var i=this;i.autoPlayTimer&&clearInterval(i.autoPlayTimer)},e.prototype.autoPlayIterator=function(){var i=this;i.options.infinite===!1?1===i.direction?(i.currentSlide+1===i.slideCount-1&&(i.direction=0),i.slideHandler(i.currentSlide+i.options.slidesToScroll)):(i.currentSlide-1===0&&(i.direction=1),i.slideHandler(i.currentSlide-i.options.slidesToScroll)):i.slideHandler(i.currentSlide+i.options.slidesToScroll)},e.prototype.buildArrows=function(){var e=this;e.options.arrows===!0&&e.slideCount>e.options.slidesToShow&&(e.$prevArrow=i(e.options.prevArrow),e.$nextArrow=i(e.options.nextArrow),e.htmlExpr.test(e.options.prevArrow)&&e.$prevArrow.appendTo(e.options.appendArrows),e.htmlExpr.test(e.options.nextArrow)&&e.$nextArrow.appendTo(e.options.appendArrows),e.options.infinite!==!0&&e.$prevArrow.addClass("slick-disabled"))},e.prototype.buildDots=function(){var e,t,o=this;if(o.options.dots===!0&&o.slideCount>o.options.slidesToShow){for(t='<ul class="'+o.options.dotsClass+'">',e=0;e<=o.getDotCount();e+=1)t+="<li>"+o.options.customPaging.call(this,o,e)+"</li>";t+="</ul>",o.$dots=i(t).appendTo(o.options.appendDots),o.$dots.find("li").first().addClass("slick-active").attr("aria-hidden","false")}},e.prototype.buildOut=function(){var e=this;e.$slides=e.$slider.children(e.options.slide+":not(.slick-cloned)").addClass("slick-slide"),e.slideCount=e.$slides.length,e.$slides.each(function(e,t){i(t).attr("data-slick-index",e)}),e.$slidesCache=e.$slides,e.$slider.addClass("slick-slider"),e.$slideTrack=0===e.slideCount?i('<div class="slick-track"/>').appendTo(e.$slider):e.$slides.wrapAll('<div class="slick-track"/>').parent(),e.$list=e.$slideTrack.wrap('<div aria-live="polite" class="slick-list"/>').parent(),e.$slideTrack.css("opacity",0),(e.options.centerMode===!0||e.options.swipeToSlide===!0)&&(e.options.slidesToScroll=1),i("img[data-lazy]",e.$slider).not("[src]").addClass("slick-loading"),e.setupInfinite(),e.buildArrows(),e.buildDots(),e.updateDots(),e.options.accessibility===!0&&e.$list.prop("tabIndex",0),e.setSlideClasses("number"==typeof this.currentSlide?this.currentSlide:0),e.options.draggable===!0&&e.$list.addClass("draggable")},e.prototype.checkResponsive=function(e){var t,o,s,n=this,l=n.$slider.width(),r=window.innerWidth||i(window).width();if("window"===n.respondTo?s=r:"slider"===n.respondTo?s=l:"min"===n.respondTo&&(s=Math.min(r,l)),n.originalSettings.responsive&&n.originalSettings.responsive.length>-1&&null!==n.originalSettings.responsive){o=null;for(t in n.breakpoints)n.breakpoints.hasOwnProperty(t)&&(n.originalSettings.mobileFirst===!1?s<n.breakpoints[t]&&(o=n.breakpoints[t]):s>n.breakpoints[t]&&(o=n.breakpoints[t]));null!==o?null!==n.activeBreakpoint?o!==n.activeBreakpoint&&(n.activeBreakpoint=o,"unslick"===n.breakpointSettings[o]?n.unslick():(n.options=i.extend({},n.originalSettings,n.breakpointSettings[o]),e===!0&&(n.currentSlide=n.options.initialSlide),n.refresh())):(n.activeBreakpoint=o,"unslick"===n.breakpointSettings[o]?n.unslick():(n.options=i.extend({},n.originalSettings,n.breakpointSettings[o]),e===!0&&(n.currentSlide=n.options.initialSlide),n.refresh())):null!==n.activeBreakpoint&&(n.activeBreakpoint=null,n.options=n.originalSettings,e===!0&&(n.currentSlide=n.options.initialSlide),n.refresh())}},e.prototype.changeSlide=function(e,t){var o,s,n,l=this,r=i(e.target);switch(r.is("a")&&e.preventDefault(),n=l.slideCount%l.options.slidesToScroll!==0,o=n?0:(l.slideCount-l.currentSlide)%l.options.slidesToScroll,e.data.message){case"previous":s=0===o?l.options.slidesToScroll:l.options.slidesToShow-o,l.slideCount>l.options.slidesToShow&&l.slideHandler(l.currentSlide-s,!1,t);break;case"next":s=0===o?l.options.slidesToScroll:o,l.slideCount>l.options.slidesToShow&&l.slideHandler(l.currentSlide+s,!1,t);break;case"index":var d=0===e.data.index?0:e.data.index||i(e.target).parent().index()*l.options.slidesToScroll;l.slideHandler(l.checkNavigable(d),!1,t);break;default:return}},e.prototype.checkNavigable=function(i){var e,t,o=this;if(e=o.getNavigableIndexes(),t=0,i>e[e.length-1])i=e[e.length-1];else for(var s in e){if(i<e[s]){i=t;break}t=e[s]}return i},e.prototype.clickHandler=function(i){var e=this;e.shouldClick===!1&&(i.stopImmediatePropagation(),i.stopPropagation(),i.preventDefault())},e.prototype.destroy=function(){var e=this;e.autoPlayClear(),e.touchObject={},i(".slick-cloned",e.$slider).remove(),e.$dots&&e.$dots.remove(),e.$prevArrow&&"object"!=typeof e.options.prevArrow&&e.$prevArrow.remove(),e.$nextArrow&&"object"!=typeof e.options.nextArrow&&e.$nextArrow.remove(),e.$slides.removeClass("slick-slide slick-active slick-center slick-visible").attr("aria-hidden","true").removeAttr("data-slick-index").css({position:"",left:"",top:"",zIndex:"",opacity:"",width:""}),e.$slider.removeClass("slick-slider"),e.$slider.removeClass("slick-initialized"),e.$list.off(".slick"),i(window).off(".slick-"+e.instanceUid),i(document).off(".slick-"+e.instanceUid),e.$slider.html(e.$slides)},e.prototype.disableTransition=function(i){var e=this,t={};t[e.transitionType]="",e.options.fade===!1?e.$slideTrack.css(t):e.$slides.eq(i).css(t)},e.prototype.fadeSlide=function(i,e){var t=this;t.cssTransitions===!1?(t.$slides.eq(i).css({zIndex:1e3}),t.$slides.eq(i).animate({opacity:1},t.options.speed,t.options.easing,e)):(t.applyTransition(i),t.$slides.eq(i).css({opacity:1,zIndex:1e3}),e&&setTimeout(function(){t.disableTransition(i),e.call()},t.options.speed))},e.prototype.filterSlides=e.prototype.slickFilter=function(i){var e=this;null!==i&&(e.unload(),e.$slideTrack.children(this.options.slide).detach(),e.$slidesCache.filter(i).appendTo(e.$slideTrack),e.reinit())},e.prototype.getCurrent=e.prototype.slickCurrentSlide=function(){var i=this;return i.currentSlide},e.prototype.getDotCount=function(){var i=this,e=0,t=0,o=0;if(i.options.infinite===!0)o=Math.ceil(i.slideCount/i.options.slidesToScroll);else if(i.options.centerMode===!0)o=i.slideCount;else for(;e<i.slideCount;)++o,e=t+i.options.slidesToShow,t+=i.options.slidesToScroll<=i.options.slidesToShow?i.options.slidesToScroll:i.options.slidesToShow;return o-1},e.prototype.getLeft=function(i){var e,t,o,s=this,n=0;return s.slideOffset=0,t=s.$slides.first().outerHeight(),s.options.infinite===!0?(s.slideCount>s.options.slidesToShow&&(s.slideOffset=s.slideWidth*s.options.slidesToShow*-1,n=t*s.options.slidesToShow*-1),s.slideCount%s.options.slidesToScroll!==0&&i+s.options.slidesToScroll>s.slideCount&&s.slideCount>s.options.slidesToShow&&(i>s.slideCount?(s.slideOffset=(s.options.slidesToShow-(i-s.slideCount))*s.slideWidth*-1,n=(s.options.slidesToShow-(i-s.slideCount))*t*-1):(s.slideOffset=s.slideCount%s.options.slidesToScroll*s.slideWidth*-1,n=s.slideCount%s.options.slidesToScroll*t*-1))):i+s.options.slidesToShow>s.slideCount&&(s.slideOffset=(i+s.options.slidesToShow-s.slideCount)*s.slideWidth,n=(i+s.options.slidesToShow-s.slideCount)*t),s.slideCount<=s.options.slidesToShow&&(s.slideOffset=0,n=0),s.options.centerMode===!0&&s.options.infinite===!0?s.slideOffset+=s.slideWidth*Math.floor(s.options.slidesToShow/2)-s.slideWidth:s.options.centerMode===!0&&(s.slideOffset=0,s.slideOffset+=s.slideWidth*Math.floor(s.options.slidesToShow/2)),e=s.options.vertical===!1?i*s.slideWidth*-1+s.slideOffset:i*t*-1+n,s.options.variableWidth===!0&&(o=s.$slideTrack.children(".slick-slide").eq(s.slideCount<=s.options.slidesToShow||s.options.infinite===!1?i:i+s.options.slidesToShow),e=o[0]?-1*o[0].offsetLeft:0,s.options.centerMode===!0&&(o=s.$slideTrack.children(".slick-slide").eq(s.options.infinite===!1?i:i+s.options.slidesToShow+1),e=o[0]?-1*o[0].offsetLeft:0,e+=(s.$list.width()-o.outerWidth())/2)),e},e.prototype.getOption=e.prototype.slickGetOption=function(i){var e=this;return e.options[i]},e.prototype.getNavigableIndexes=function(){var i,e=this,t=0,o=0,s=[];for(e.options.infinite===!1?(i=e.slideCount-e.options.slidesToShow+1,e.options.centerMode===!0&&(i=e.slideCount)):(t=-1*e.slideCount,o=-1*e.slideCount,i=2*e.slideCount);i>t;)s.push(t),t=o+e.options.slidesToScroll,o+=e.options.slidesToScroll<=e.options.slidesToShow?e.options.slidesToScroll:e.options.slidesToShow;return s},e.prototype.getSlick=function(){return this},e.prototype.getSlideCount=function(){var e,t,o,s=this;return o=s.options.centerMode===!0?s.slideWidth*Math.floor(s.options.slidesToShow/2):0,s.options.swipeToSlide===!0?(s.$slideTrack.find(".slick-slide").each(function(e,n){return n.offsetLeft-o+i(n).outerWidth()/2>-1*s.swipeLeft?(t=n,!1):void 0}),e=Math.abs(i(t).attr("data-slick-index")-s.currentSlide)||1):s.options.slidesToScroll},e.prototype.goTo=e.prototype.slickGoTo=function(i,e){var t=this;t.changeSlide({data:{message:"index",index:parseInt(i)}},e)},e.prototype.init=function(){var e=this;i(e.$slider).hasClass("slick-initialized")||(i(e.$slider).addClass("slick-initialized"),e.buildOut(),e.setProps(),e.startLoad(),e.loadSlider(),e.initializeEvents(),e.updateArrows(),e.updateDots()),e.$slider.trigger("init",[e])},e.prototype.initArrowEvents=function(){var i=this;i.options.arrows===!0&&i.slideCount>i.options.slidesToShow&&(i.$prevArrow.on("click.slick",{message:"previous"},i.changeSlide),i.$nextArrow.on("click.slick",{message:"next"},i.changeSlide))},e.prototype.initDotEvents=function(){var e=this;e.options.dots===!0&&e.slideCount>e.options.slidesToShow&&i("li",e.$dots).on("click.slick",{message:"index"},e.changeSlide),e.options.dots===!0&&e.options.pauseOnDotsHover===!0&&e.options.autoplay===!0&&i("li",e.$dots).on("mouseenter.slick",function(){e.paused=!0,e.autoPlayClear()}).on("mouseleave.slick",function(){e.paused=!1,e.autoPlay()})},e.prototype.initializeEvents=function(){var e=this;e.initArrowEvents(),e.initDotEvents(),e.$list.on("touchstart.slick mousedown.slick",{action:"start"},e.swipeHandler),e.$list.on("touchmove.slick mousemove.slick",{action:"move"},e.swipeHandler),e.$list.on("touchend.slick mouseup.slick",{action:"end"},e.swipeHandler),e.$list.on("touchcancel.slick mouseleave.slick",{action:"end"},e.swipeHandler),e.$list.on("click.slick",e.clickHandler),e.options.autoplay===!0&&(i(document).on(e.visibilityChange,function(){e.visibility()}),e.options.pauseOnHover===!0&&(e.$list.on("mouseenter.slick",function(){e.paused=!0,e.autoPlayClear()}),e.$list.on("mouseleave.slick",function(){e.paused=!1,e.autoPlay()}))),e.options.accessibility===!0&&e.$list.on("keydown.slick",e.keyHandler),e.options.focusOnSelect===!0&&i(e.$slideTrack).children().on("click.slick",e.selectHandler),i(window).on("orientationchange.slick.slick-"+e.instanceUid,function(){e.checkResponsive(),e.setPosition()}),i(window).on("resize.slick.slick-"+e.instanceUid,function(){i(window).width()!==e.windowWidth&&(clearTimeout(e.windowDelay),e.windowDelay=window.setTimeout(function(){e.windowWidth=i(window).width(),e.checkResponsive(),e.setPosition()},50))}),i("*[draggable!=true]",e.$slideTrack).on("dragstart",function(i){i.preventDefault()}),i(window).on("load.slick.slick-"+e.instanceUid,e.setPosition),i(document).on("ready.slick.slick-"+e.instanceUid,e.setPosition)},e.prototype.initUI=function(){var i=this;i.options.arrows===!0&&i.slideCount>i.options.slidesToShow&&(i.$prevArrow.show(),i.$nextArrow.show()),i.options.dots===!0&&i.slideCount>i.options.slidesToShow&&i.$dots.show(),i.options.autoplay===!0&&i.autoPlay()},e.prototype.keyHandler=function(i){var e=this;37===i.keyCode&&e.options.accessibility===!0?e.changeSlide({data:{message:"previous"}}):39===i.keyCode&&e.options.accessibility===!0&&e.changeSlide({data:{message:"next"}})},e.prototype.lazyLoad=function(){function e(e){i("img[data-lazy]",e).each(function(){var e=i(this),t=i(this).attr("data-lazy");e.load(function(){e.animate({opacity:1},200)}).css({opacity:0}).attr("src",t).removeAttr("data-lazy").removeClass("slick-loading")})}var t,o,s,n,l=this;l.options.centerMode===!0?l.options.infinite===!0?(s=l.currentSlide+(l.options.slidesToShow/2+1),n=s+l.options.slidesToShow+2):(s=Math.max(0,l.currentSlide-(l.options.slidesToShow/2+1)),n=2+(l.options.slidesToShow/2+1)+l.currentSlide):(s=l.options.infinite?l.options.slidesToShow+l.currentSlide:l.currentSlide,n=s+l.options.slidesToShow,l.options.fade===!0&&(s>0&&s--,n<=l.slideCount&&n++)),t=l.$slider.find(".slick-slide").slice(s,n),e(t),l.slideCount<=l.options.slidesToShow?(o=l.$slider.find(".slick-slide"),e(o)):l.currentSlide>=l.slideCount-l.options.slidesToShow?(o=l.$slider.find(".slick-cloned").slice(0,l.options.slidesToShow),e(o)):0===l.currentSlide&&(o=l.$slider.find(".slick-cloned").slice(-1*l.options.slidesToShow),e(o))},e.prototype.loadSlider=function(){var i=this;i.setPosition(),i.$slideTrack.css({opacity:1}),i.$slider.removeClass("slick-loading"),i.initUI(),"progressive"===i.options.lazyLoad&&i.progressiveLazyLoad()},e.prototype.next=e.prototype.slickNext=function(){var i=this;i.changeSlide({data:{message:"next"}})},e.prototype.pause=e.prototype.slickPause=function(){var i=this;i.autoPlayClear(),i.paused=!0},e.prototype.play=e.prototype.slickPlay=function(){var i=this;i.paused=!1,i.autoPlay()},e.prototype.postSlide=function(i){var e=this;e.$slider.trigger("afterChange",[e,i]),e.animating=!1,e.setPosition(),e.swipeLeft=null,e.options.autoplay===!0&&e.paused===!1&&e.autoPlay()},e.prototype.prev=e.prototype.slickPrev=function(){var i=this;i.changeSlide({data:{message:"previous"}})},e.prototype.progressiveLazyLoad=function(){var e,t,o=this;e=i("img[data-lazy]",o.$slider).length,e>0&&(t=i("img[data-lazy]",o.$slider).first(),t.attr("src",t.attr("data-lazy")).removeClass("slick-loading").load(function(){t.removeAttr("data-lazy"),o.progressiveLazyLoad(),o.options.adaptiveHeight===!0&&o.setPosition()}).error(function(){t.removeAttr("data-lazy"),o.progressiveLazyLoad()}))},e.prototype.refresh=function(){var e=this,t=e.currentSlide;e.destroy(),i.extend(e,e.initials),e.init(),e.changeSlide({data:{message:"index",index:t}},!0)},e.prototype.reinit=function(){var e=this;e.$slides=e.$slideTrack.children(e.options.slide).addClass("slick-slide"),e.slideCount=e.$slides.length,e.currentSlide>=e.slideCount&&0!==e.currentSlide&&(e.currentSlide=e.currentSlide-e.options.slidesToScroll),e.slideCount<=e.options.slidesToShow&&(e.currentSlide=0),e.setProps(),e.setupInfinite(),e.buildArrows(),e.updateArrows(),e.initArrowEvents(),e.buildDots(),e.updateDots(),e.initDotEvents(),e.options.focusOnSelect===!0&&i(e.$slideTrack).children().on("click.slick",e.selectHandler),e.setSlideClasses(0),e.setPosition(),e.$slider.trigger("reInit",[e])},e.prototype.removeSlide=e.prototype.slickRemove=function(i,e,t){var o=this;return"boolean"==typeof i?(e=i,i=e===!0?0:o.slideCount-1):i=e===!0?--i:i,o.slideCount<1||0>i||i>o.slideCount-1?!1:(o.unload(),t===!0?o.$slideTrack.children().remove():o.$slideTrack.children(this.options.slide).eq(i).remove(),o.$slides=o.$slideTrack.children(this.options.slide),o.$slideTrack.children(this.options.slide).detach(),o.$slideTrack.append(o.$slides),o.$slidesCache=o.$slides,void o.reinit())},e.prototype.setCSS=function(i){var e,t,o=this,s={};o.options.rtl===!0&&(i=-i),e="left"==o.positionProp?Math.ceil(i)+"px":"0px",t="top"==o.positionProp?Math.ceil(i)+"px":"0px",s[o.positionProp]=i,o.transformsEnabled===!1?o.$slideTrack.css(s):(s={},o.cssTransitions===!1?(s[o.animType]="translate("+e+", "+t+")",o.$slideTrack.css(s)):(s[o.animType]="translate3d("+e+", "+t+", 0px)",o.$slideTrack.css(s)))},e.prototype.setDimensions=function(){var i=this;if(i.options.vertical===!1?i.options.centerMode===!0&&i.$list.css({padding:"0px "+i.options.centerPadding}):(i.$list.height(i.$slides.first().outerHeight(!0)*i.options.slidesToShow),i.options.centerMode===!0&&i.$list.css({padding:i.options.centerPadding+" 0px"})),i.listWidth=i.$list.width(),i.listHeight=i.$list.height(),i.options.vertical===!1&&i.options.variableWidth===!1)i.slideWidth=Math.ceil(i.listWidth/i.options.slidesToShow),i.$slideTrack.width(Math.ceil(i.slideWidth*i.$slideTrack.children(".slick-slide").length));else if(i.options.variableWidth===!0){var e=0;i.slideWidth=Math.ceil(i.listWidth/i.options.slidesToShow),i.$slideTrack.children(".slick-slide").each(function(){e+=i.listWidth}),i.$slideTrack.width(Math.ceil(e)+1)}else i.slideWidth=Math.ceil(i.listWidth),i.$slideTrack.height(Math.ceil(i.$slides.first().outerHeight(!0)*i.$slideTrack.children(".slick-slide").length));var t=i.$slides.first().outerWidth(!0)-i.$slides.first().width();i.options.variableWidth===!1&&i.$slideTrack.children(".slick-slide").width(i.slideWidth-t)},e.prototype.setFade=function(){var e,t=this;t.$slides.each(function(o,s){e=t.slideWidth*o*-1,i(s).css(t.options.rtl===!0?{position:"relative",right:e,top:0,zIndex:800,opacity:0}:{position:"relative",left:e,top:0,zIndex:800,opacity:0})}),t.$slides.eq(t.currentSlide).css({zIndex:900,opacity:1})},e.prototype.setHeight=function(){var i=this;if(1===i.options.slidesToShow&&i.options.adaptiveHeight===!0&&i.options.vertical===!1){var e=i.$slides.eq(i.currentSlide).outerHeight(!0);i.$list.css("height",e)}},e.prototype.setOption=e.prototype.slickSetOption=function(i,e,t){var o=this;o.options[i]=e,t===!0&&(o.unload(),o.reinit())},e.prototype.setPosition=function(){var i=this;i.setDimensions(),i.setHeight(),i.options.fade===!1?i.setCSS(i.getLeft(i.currentSlide)):i.setFade(),i.$slider.trigger("setPosition",[i])},e.prototype.setProps=function(){var i=this,e=document.body.style;i.positionProp=i.options.vertical===!0?"top":"left","top"===i.positionProp?i.$slider.addClass("slick-vertical"):i.$slider.removeClass("slick-vertical"),(void 0!==e.WebkitTransition||void 0!==e.MozTransition||void 0!==e.msTransition)&&i.options.useCSS===!0&&(i.cssTransitions=!0),void 0!==e.OTransform&&(i.animType="OTransform",i.transformType="-o-transform",i.transitionType="OTransition",void 0===e.perspectiveProperty&&void 0===e.webkitPerspective&&(i.animType=!1)),void 0!==e.MozTransform&&(i.animType="MozTransform",i.transformType="-moz-transform",i.transitionType="MozTransition",void 0===e.perspectiveProperty&&void 0===e.MozPerspective&&(i.animType=!1)),void 0!==e.webkitTransform&&(i.animType="webkitTransform",i.transformType="-webkit-transform",i.transitionType="webkitTransition",void 0===e.perspectiveProperty&&void 0===e.webkitPerspective&&(i.animType=!1)),void 0!==e.msTransform&&(i.animType="msTransform",i.transformType="-ms-transform",i.transitionType="msTransition",void 0===e.msTransform&&(i.animType=!1)),void 0!==e.transform&&i.animType!==!1&&(i.animType="transform",i.transformType="transform",i.transitionType="transition"),i.transformsEnabled=null!==i.animType&&i.animType!==!1},e.prototype.setSlideClasses=function(i){var e,t,o,s,n=this;n.$slider.find(".slick-slide").removeClass("slick-active").attr("aria-hidden","true").removeClass("slick-center"),t=n.$slider.find(".slick-slide"),n.options.centerMode===!0?(e=Math.floor(n.options.slidesToShow/2),n.options.infinite===!0&&(i>=e&&i<=n.slideCount-1-e?n.$slides.slice(i-e,i+e+1).addClass("slick-active").attr("aria-hidden","false"):(o=n.options.slidesToShow+i,t.slice(o-e+1,o+e+2).addClass("slick-active").attr("aria-hidden","false")),0===i?t.eq(t.length-1-n.options.slidesToShow).addClass("slick-center"):i===n.slideCount-1&&t.eq(n.options.slidesToShow).addClass("slick-center")),n.$slides.eq(i).addClass("slick-center")):i>=0&&i<=n.slideCount-n.options.slidesToShow?n.$slides.slice(i,i+n.options.slidesToShow).addClass("slick-active").attr("aria-hidden","false"):t.length<=n.options.slidesToShow?t.addClass("slick-active").attr("aria-hidden","false"):(s=n.slideCount%n.options.slidesToShow,o=n.options.infinite===!0?n.options.slidesToShow+i:i,n.options.slidesToShow==n.options.slidesToScroll&&n.slideCount-i<n.options.slidesToShow?t.slice(o-(n.options.slidesToShow-s),o+s).addClass("slick-active").attr("aria-hidden","false"):t.slice(o,o+n.options.slidesToShow).addClass("slick-active").attr("aria-hidden","false")),"ondemand"===n.options.lazyLoad&&n.lazyLoad()},e.prototype.setupInfinite=function(){var e,t,o,s=this;if(s.options.fade===!0&&(s.options.centerMode=!1),s.options.infinite===!0&&s.options.fade===!1&&(t=null,s.slideCount>s.options.slidesToShow)){for(o=s.options.centerMode===!0?s.options.slidesToShow+1:s.options.slidesToShow,e=s.slideCount;e>s.slideCount-o;e-=1)t=e-1,i(s.$slides[t]).clone(!0).attr("id","").attr("data-slick-index",t-s.slideCount).prependTo(s.$slideTrack).addClass("slick-cloned");for(e=0;o>e;e+=1)t=e,i(s.$slides[t]).clone(!0).attr("id","").attr("data-slick-index",t+s.slideCount).appendTo(s.$slideTrack).addClass("slick-cloned");s.$slideTrack.find(".slick-cloned").find("[id]").each(function(){i(this).attr("id","")})}},e.prototype.selectHandler=function(e){var t=this,o=parseInt(i(e.target).parents(".slick-slide").attr("data-slick-index"));return o||(o=0),t.slideCount<=t.options.slidesToShow?(t.$slider.find(".slick-slide").removeClass("slick-active").attr("aria-hidden","true"),t.$slides.eq(o).addClass("slick-active").attr("aria-hidden","false"),t.options.centerMode===!0&&(t.$slider.find(".slick-slide").removeClass("slick-center"),t.$slides.eq(o).addClass("slick-center")),void t.asNavFor(o)):void t.slideHandler(o)},e.prototype.slideHandler=function(i,e,t){var o,s,n,l,r=null,d=this;return e=e||!1,d.animating===!0&&d.options.waitForAnimate===!0||d.options.fade===!0&&d.currentSlide===i||d.slideCount<=d.options.slidesToShow?void 0:(e===!1&&d.asNavFor(i),o=i,r=d.getLeft(o),l=d.getLeft(d.currentSlide),d.currentLeft=null===d.swipeLeft?l:d.swipeLeft,d.options.infinite===!1&&d.options.centerMode===!1&&(0>i||i>d.getDotCount()*d.options.slidesToScroll)?void(d.options.fade===!1&&(o=d.currentSlide,t!==!0?d.animateSlide(l,function(){d.postSlide(o)}):d.postSlide(o))):d.options.infinite===!1&&d.options.centerMode===!0&&(0>i||i>d.slideCount-d.options.slidesToScroll)?void(d.options.fade===!1&&(o=d.currentSlide,t!==!0?d.animateSlide(l,function(){d.postSlide(o)}):d.postSlide(o))):(d.options.autoplay===!0&&clearInterval(d.autoPlayTimer),s=0>o?d.slideCount%d.options.slidesToScroll!==0?d.slideCount-d.slideCount%d.options.slidesToScroll:d.slideCount+o:o>=d.slideCount?d.slideCount%d.options.slidesToScroll!==0?0:o-d.slideCount:o,d.animating=!0,d.$slider.trigger("beforeChange",[d,d.currentSlide,s]),n=d.currentSlide,d.currentSlide=s,d.setSlideClasses(d.currentSlide),d.updateDots(),d.updateArrows(),d.options.fade===!0?(t!==!0?d.fadeSlide(s,function(){d.postSlide(s)}):d.postSlide(s),void d.animateHeight()):void(t!==!0?d.animateSlide(r,function(){d.postSlide(s)}):d.postSlide(s))))},e.prototype.startLoad=function(){var i=this;i.options.arrows===!0&&i.slideCount>i.options.slidesToShow&&(i.$prevArrow.hide(),i.$nextArrow.hide()),i.options.dots===!0&&i.slideCount>i.options.slidesToShow&&i.$dots.hide(),i.$slider.addClass("slick-loading")},e.prototype.swipeDirection=function(){var i,e,t,o,s=this;return i=s.touchObject.startX-s.touchObject.curX,e=s.touchObject.startY-s.touchObject.curY,t=Math.atan2(e,i),o=Math.round(180*t/Math.PI),0>o&&(o=360-Math.abs(o)),45>=o&&o>=0?s.options.rtl===!1?"left":"right":360>=o&&o>=315?s.options.rtl===!1?"left":"right":o>=135&&225>=o?s.options.rtl===!1?"right":"left":"vertical"},e.prototype.swipeEnd=function(){var i,e=this;if(e.dragging=!1,e.shouldClick=e.touchObject.swipeLength>10?!1:!0,void 0===e.touchObject.curX)return!1;if(e.touchObject.edgeHit===!0&&e.$slider.trigger("edge",[e,e.swipeDirection()]),e.touchObject.swipeLength>=e.touchObject.minSwipe)switch(e.swipeDirection()){case"left":i=e.options.swipeToSlide?e.checkNavigable(e.currentSlide+e.getSlideCount()):e.currentSlide+e.getSlideCount(),e.slideHandler(i),e.currentDirection=0,e.touchObject={},e.$slider.trigger("swipe",[e,"left"]);break;case"right":i=e.options.swipeToSlide?e.checkNavigable(e.currentSlide-e.getSlideCount()):e.currentSlide-e.getSlideCount(),e.slideHandler(i),e.currentDirection=1,e.touchObject={},e.$slider.trigger("swipe",[e,"right"])}else e.touchObject.startX!==e.touchObject.curX&&(e.slideHandler(e.currentSlide),e.touchObject={})},e.prototype.swipeHandler=function(i){var e=this;if(!(e.options.swipe===!1||"ontouchend"in document&&e.options.swipe===!1||e.options.draggable===!1&&-1!==i.type.indexOf("mouse")))switch(e.touchObject.fingerCount=i.originalEvent&&void 0!==i.originalEvent.touches?i.originalEvent.touches.length:1,e.touchObject.minSwipe=e.listWidth/e.options.touchThreshold,i.data.action){case"start":e.swipeStart(i);break;case"move":e.swipeMove(i);break;case"end":e.swipeEnd(i)}},e.prototype.swipeMove=function(i){var e,t,o,s,n,l=this;return n=void 0!==i.originalEvent?i.originalEvent.touches:null,!l.dragging||n&&1!==n.length?!1:(e=l.getLeft(l.currentSlide),l.touchObject.curX=void 0!==n?n[0].pageX:i.clientX,l.touchObject.curY=void 0!==n?n[0].pageY:i.clientY,l.touchObject.swipeLength=Math.round(Math.sqrt(Math.pow(l.touchObject.curX-l.touchObject.startX,2))),t=l.swipeDirection(),"vertical"!==t?(void 0!==i.originalEvent&&l.touchObject.swipeLength>4&&i.preventDefault(),s=(l.options.rtl===!1?1:-1)*(l.touchObject.curX>l.touchObject.startX?1:-1),o=l.touchObject.swipeLength,l.touchObject.edgeHit=!1,l.options.infinite===!1&&(0===l.currentSlide&&"right"===t||l.currentSlide>=l.getDotCount()&&"left"===t)&&(o=l.touchObject.swipeLength*l.options.edgeFriction,l.touchObject.edgeHit=!0),l.swipeLeft=l.options.vertical===!1?e+o*s:e+o*(l.$list.height()/l.listWidth)*s,l.options.fade===!0||l.options.touchMove===!1?!1:l.animating===!0?(l.swipeLeft=null,!1):void l.setCSS(l.swipeLeft)):void 0)},e.prototype.swipeStart=function(i){var e,t=this;return 1!==t.touchObject.fingerCount||t.slideCount<=t.options.slidesToShow?(t.touchObject={},!1):(void 0!==i.originalEvent&&void 0!==i.originalEvent.touches&&(e=i.originalEvent.touches[0]),t.touchObject.startX=t.touchObject.curX=void 0!==e?e.pageX:i.clientX,t.touchObject.startY=t.touchObject.curY=void 0!==e?e.pageY:i.clientY,void(t.dragging=!0))},e.prototype.unfilterSlides=e.prototype.slickUnfilter=function(){var i=this;null!==i.$slidesCache&&(i.unload(),i.$slideTrack.children(this.options.slide).detach(),i.$slidesCache.appendTo(i.$slideTrack),i.reinit())},e.prototype.unload=function(){var e=this;i(".slick-cloned",e.$slider).remove(),e.$dots&&e.$dots.remove(),e.$prevArrow&&"object"!=typeof e.options.prevArrow&&e.$prevArrow.remove(),e.$nextArrow&&"object"!=typeof e.options.nextArrow&&e.$nextArrow.remove(),e.$slides.removeClass("slick-slide slick-active slick-visible").attr("aria-hidden","true").css("width","")},e.prototype.unslick=function(){var i=this;i.destroy()},e.prototype.updateArrows=function(){var i,e=this;i=Math.floor(e.options.slidesToShow/2),e.options.arrows===!0&&e.options.infinite!==!0&&e.slideCount>e.options.slidesToShow&&(e.$prevArrow.removeClass("slick-disabled"),e.$nextArrow.removeClass("slick-disabled"),0===e.currentSlide?(e.$prevArrow.addClass("slick-disabled"),e.$nextArrow.removeClass("slick-disabled")):e.currentSlide>=e.slideCount-e.options.slidesToShow&&e.options.centerMode===!1?(e.$nextArrow.addClass("slick-disabled"),e.$prevArrow.removeClass("slick-disabled")):e.currentSlide>=e.slideCount-1&&e.options.centerMode===!0&&(e.$nextArrow.addClass("slick-disabled"),e.$prevArrow.removeClass("slick-disabled")))},e.prototype.updateDots=function(){var i=this;null!==i.$dots&&(i.$dots.find("li").removeClass("slick-active").attr("aria-hidden","true"),i.$dots.find("li").eq(Math.floor(i.currentSlide/i.options.slidesToScroll)).addClass("slick-active").attr("aria-hidden","false"))},e.prototype.visibility=function(){var i=this;document[i.hidden]?(i.paused=!0,i.autoPlayClear()):(i.paused=!1,i.autoPlay())},i.fn.slick=function(){var i,t=this,o=arguments[0],s=Array.prototype.slice.call(arguments,1),n=t.length,l=0;for(l;n>l;l++)if("object"==typeof o||"undefined"==typeof o?t[l].slick=new e(t[l],o):i=t[l].slick[o].apply(t[l].slick,s),"undefined"!=typeof i)return i;return t},i(function(){i("[data-slick]").slick()})}),function(i){"use strict";sdg.slider={defaults:{accessibility:!1,arrows:!0,autoplay:!0,autoplaySpeed:5e3,centerMode:!1,centerPadding:"",dots:!1,draggable:!1,fade:!1,infinite:!0,lazyLoad:"",onAfterChange:null,onBeforeChange:null,onInit:null,onReInit:null,nextArrow:'<button type="button" data-role="none" class="slick-next">Next</button>',pauseOnHover:!0,prevArrow:'<button type="button" data-role="none" class="slick-prev">Previous</button>',slide:"div",responsive:null,slidesToShow:1,slidesToScroll:1,speed:300,target:"sliderTarget",template:"sliderTemplate",variableWidth:!1,vertical:!1}},sdg.slider.init=function(i){this.optionConfig(i)},sdg.slider.optionConfig=function(e){var t=i.extend(this.defaults,e),o=t.target,s=i("#"+t.template).html(),n=i("#"+o);this.addTemplate(t,o,s,n)},sdg.slider.addTemplate=function(i,e,t,o){var s=o.html();""!==t&&void 0!==t&&null!==t&&(s+=t,o.html(s)),this.initSlider(10,i,e,o)},sdg.slider.initSlider=function(i,e,t,o){var s=this;o.find(e.slide).length>0?(this.lazyLoad(e,o),o.slick({accessibility:e.accessibility,arrows:e.arrows,autoplay:e.autoplay,autoplaySpeed:e.autoplaySpeed,centerMode:e.centerMode,centerPadding:e.centerPadding,dots:e.dots,draggable:e.draggable,fade:e.fade,infinite:e.infinite,lazyLoad:e.lazyLoad,onAfterChange:e.onAfterChange,onBeforeChange:e.onBeforeChange,onInit:e.onInit,onReInit:e.onReInit,nextArrow:e.nextArrow,pauseOnHover:e.pauseOnHover,prevArrow:e.prevArrow,responsive:e.responsive,slide:e.slide,slidesToShow:e.slidesToShow,slidesToScroll:e.slidesToScroll,speed:e.speed,variableWidth:e.variableWidth,vertical:e.vertical}).css("overflow","visible"),o.find("button").delay(200).animate({opacity:1},500),o.find("a").length>0&&this.analytics(10,o)):5e3>i&&setTimeout(function(){s.initSlider(2*i,e,t,o)},i)},sdg.slider.lazyLoad=function(e,t){var o,s,n,l,r=t.find("img");if("ondemand"===e.lazyLoad&&r.length>0)for(o=0;o<r.length;o+=1)s=r[o],n=i(s),l=i(s).attr("src"),n.removeAttr("src"),n.attr("data-lazy",l)},sdg.slider.analytics=function(i,e){var t,o=e.find("a"),s=this;if(window.sdg&&sdg.analytics&&sdg.analytics.realEstateEvent)for(t=0;t<o.length;t+=1)o.eq(t).on("click",sdg.analytics.realEstateEvent);else 5e3>i&&setTimeout(function(){s.analytics(2*i,e)},i)}}(jQuery);
|
from aiocloudflare.commons.auth import Auth
class References(Auth):
_endpoint1 = "user/load_balancers/pools"
_endpoint2 = "references"
_endpoint3 = None
|
import React, {Component} from 'react'
import Helmet from 'react-helmet';
import {Row, Col, Card, Button, message, Form, Input, Breadcrumb} from 'antd';
const FormItem = Form.Item;
const TextArea = Input.TextArea;
import PageTitle from '../components/PageTitle';
class UpdateInformation extends Component {
constructor(props) {
super(props);
this.state = {
formLoading: false,
formDisabled: false
};
this.formSubmission = this.formSubmission.bind(this);
}
formSubmission(e) {
e.preventDefault();
this.setState({formLoading: true});
this.props.form.validateFields((err, values) => {
if (!err) {
console.log('Received values of form: ', values);
}
fetch(`/api/alumni/update`, {
credentials: 'same-origin',
method: 'POST',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
},
body: JSON.stringify({
name: values.name,
inumber: values.inumber,
email: values.email,
phone: values.phone,
address: values.address,
city: values.city,
zip: values.zip
})
})
.then(response => response.json())
.then(json => {
this.setState({formDisabled: true, formLoading: false});
message.success(json.message);
})
.catch(err => {
message.error(json.error);
})
});
}
render() {
const formItemLayout = {
labelCol: {
xs: { span: 24 },
sm: { span: 8 },
md: { span: 6 },
lg: { span: 4 }
},
wrapperCol: {
xs: { span: 24 },
sm: { span: 16 },
md: { span: 18 },
lg: { span: 20 }
}
};
const { getFieldDecorator } = this.props.form;
const { formLoading, formDisabled } = this.state;
return (
<div>
<Helmet>
<title>Pi Kappa Phi | Update Information</title>
</Helmet>
<PageTitle name="Update Information"/>
<Row type="flex" justify="center" align="top">
<p></p>
<Col span={12}>
<Card style={{width: '100%'}}>
<Form layout="horizontal" onSubmit={this.formSubmission}>
<FormItem {...formItemLayout} label="Name">
{getFieldDecorator('name', {
rules: [{
required: true, message: 'Please enter your name.',
}],
})(
<Input disabled={formLoading || formDisabled} placeholder="George Burdell" />
)}
</FormItem>
<FormItem {...formItemLayout} label="I-Number">
{getFieldDecorator('inumber')(
<Input disabled={formLoading || formDisabled} placeholder="I-1654" />
)}
</FormItem>
<FormItem {...formItemLayout} label="Email">
{getFieldDecorator('email', {
rules: [{
type: 'email', message: 'The input is not valid E-mail!',
}, {
required: true, message: 'Please enter your email.',
}],
})(
<Input disabled={formLoading || formDisabled} placeholder="gburdell3@gatech.edu" />
)}
</FormItem>
<FormItem {...formItemLayout} label="Phone">
{getFieldDecorator('phone', {
rules: [{
required: true, message: 'Please enter your phone number.',
}],
})(
<Input disabled={formLoading || formDisabled} placeholder="516-123-1992" />
)}
</FormItem>
<FormItem {...formItemLayout} label="Address">
{getFieldDecorator('address', {
rules: [{
required: true, message: 'Please enter your street address.',
}],
})(
<Input disabled={formLoading || formDisabled} placeholder="220 Ferst Drive NW" />
)}
</FormItem>
<FormItem {...formItemLayout} label="City">
{getFieldDecorator('city', {
rules: [{
required: true, message: 'Please enter your city.',
}],
})(
<Input disabled={formLoading || formDisabled} placeholder="Atlanta" />
)}
</FormItem>
<FormItem {...formItemLayout} label="Zip Code">
{getFieldDecorator('zip', {
rules: [{
required: true, message: 'Please enter your zip code.',
}],
})(
<Input disabled={formLoading || formDisabled} placeholder="30318" />
)}
</FormItem>
<Button type="primary" loading={formLoading} disabled={formLoading || formDisabled} onClick={this.formSubmission} htmlType="submit" style={{float: 'right'}}>Update Information</Button>
</Form>
</Card>
</Col>
</Row>
</div>
);
}
}
export default Form.create()(UpdateInformation);
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the avoid_reuse and setwalletflag features."""
from test_framework.test_framework import BaddcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
def reset_balance(node, discardaddr):
'''Throw away all owned coins by the node so it gets a balance of 0.'''
balance = node.getbalance(avoid_reuse=False)
if balance > 0.5:
node.sendtoaddress(address=discardaddr, amount=balance, subtractfeefromamount=True, avoid_reuse=False)
def count_unspent(node):
'''Count the unspent outputs for the given node and return various statistics'''
r = {
"total": {
"count": 0,
"sum": 0,
},
"reused": {
"count": 0,
"sum": 0,
},
}
supports_reused = True
for utxo in node.listunspent(minconf=0):
r["total"]["count"] += 1
r["total"]["sum"] += utxo["amount"]
if supports_reused and "reused" in utxo:
if utxo["reused"]:
r["reused"]["count"] += 1
r["reused"]["sum"] += utxo["amount"]
else:
supports_reused = False
r["reused"]["supported"] = supports_reused
return r
def assert_unspent(node, total_count=None, total_sum=None, reused_supported=None, reused_count=None, reused_sum=None):
'''Make assertions about a node's unspent output statistics'''
stats = count_unspent(node)
if total_count is not None:
assert_equal(stats["total"]["count"], total_count)
if total_sum is not None:
assert_approx(stats["total"]["sum"], total_sum, 0.001)
if reused_supported is not None:
assert_equal(stats["reused"]["supported"], reused_supported)
if reused_count is not None:
assert_equal(stats["reused"]["count"], reused_count)
if reused_sum is not None:
assert_approx(stats["reused"]["sum"], reused_sum, 0.001)
def assert_balances(node, mine):
'''Make assertions about a node's getbalances output'''
got = node.getbalances()["mine"]
for k,v in mine.items():
assert_approx(got[k], v, 0.001)
class AvoidReuseTest(BaddcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 2
# This test isn't testing txn relay/timing, so set whitelist on the
# peers for instant txn relay. This speeds up the test run time 2-3x.
self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
'''Set up initial chain and run tests defined below'''
self.test_persistence()
self.test_immutable()
self.nodes[0].generate(110)
self.sync_all()
self.test_change_remains_change(self.nodes[1])
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_sending_from_reused_address_without_avoid_reuse()
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_sending_from_reused_address_fails("legacy")
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_sending_from_reused_address_fails("p2sh-segwit")
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_sending_from_reused_address_fails("bech32")
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_getbalances_used()
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_full_destination_group_is_preferred()
reset_balance(self.nodes[1], self.nodes[0].getnewaddress())
self.test_all_destination_groups_are_used()
def test_persistence(self):
'''Test that wallet files persist the avoid_reuse flag.'''
self.log.info("Test wallet files persist avoid_reuse flag")
# Configure node 1 to use avoid_reuse
self.nodes[1].setwalletflag('avoid_reuse')
# Flags should be node1.avoid_reuse=false, node2.avoid_reuse=true
assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False)
assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True)
self.restart_node(1)
connect_nodes(self.nodes[0], 1)
# Flags should still be node1.avoid_reuse=false, node2.avoid_reuse=true
assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False)
assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True)
# Attempting to set flag to its current state should throw
assert_raises_rpc_error(-8, "Wallet flag is already set to false", self.nodes[0].setwalletflag, 'avoid_reuse', False)
assert_raises_rpc_error(-8, "Wallet flag is already set to true", self.nodes[1].setwalletflag, 'avoid_reuse', True)
def test_immutable(self):
'''Test immutable wallet flags'''
self.log.info("Test immutable wallet flags")
# Attempt to set the disable_private_keys flag; this should not work
assert_raises_rpc_error(-8, "Wallet flag is immutable", self.nodes[1].setwalletflag, 'disable_private_keys')
tempwallet = ".wallet_avoidreuse.py_test_immutable_wallet.dat"
# Create a wallet with disable_private_keys set; this should work
self.nodes[1].createwallet(wallet_name=tempwallet, disable_private_keys=True)
w = self.nodes[1].get_wallet_rpc(tempwallet)
# Attempt to unset the disable_private_keys flag; this should not work
assert_raises_rpc_error(-8, "Wallet flag is immutable", w.setwalletflag, 'disable_private_keys', False)
# Unload temp wallet
self.nodes[1].unloadwallet(tempwallet)
def test_change_remains_change(self, node):
self.log.info("Test that change doesn't turn into non-change when spent")
reset_balance(node, node.getnewaddress())
addr = node.getnewaddress()
txid = node.sendtoaddress(addr, 1)
out = node.listunspent(minconf=0, query_options={'minimumAmount': 2})
assert_equal(len(out), 1)
assert_equal(out[0]['txid'], txid)
changeaddr = out[0]['address']
# Make sure it's starting out as change as expected
assert node.getaddressinfo(changeaddr)['ischange']
for logical_tx in node.listtransactions():
assert logical_tx.get('address') != changeaddr
# Spend it
reset_balance(node, node.getnewaddress())
# It should still be change
assert node.getaddressinfo(changeaddr)['ischange']
for logical_tx in node.listtransactions():
assert logical_tx.get('address') != changeaddr
def test_sending_from_reused_address_without_avoid_reuse(self):
'''
Test the same as test_sending_from_reused_address_fails, except send the 10 BADD with
the avoid_reuse flag set to false. This means the 10 BADD send should succeed,
where it fails in test_sending_from_reused_address_fails.
'''
self.log.info("Test sending from reused address with avoid_reuse=false")
fundaddr = self.nodes[1].getnewaddress()
retaddr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(fundaddr, 10)
self.nodes[0].generate(1)
self.sync_all()
# listunspent should show 1 single, unused 10 badd output
assert_unspent(self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0)
# getbalances should show no used, 10 badd trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10})
# node 0 should not show a used entry, as it does not enable avoid_reuse
assert("used" not in self.nodes[0].getbalances()["mine"])
self.nodes[1].sendtoaddress(retaddr, 5)
self.nodes[0].generate(1)
self.sync_all()
# listunspent should show 1 single, unused 5 badd output
assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0)
# getbalances should show no used, 5 badd trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})
self.nodes[0].sendtoaddress(fundaddr, 10)
self.nodes[0].generate(1)
self.sync_all()
# listunspent should show 2 total outputs (5, 10 badd), one unused (5), one reused (10)
assert_unspent(self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10)
# getbalances should show 10 used, 5 badd trusted
assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5})
self.nodes[1].sendtoaddress(address=retaddr, amount=10, avoid_reuse=False)
# listunspent should show 1 total outputs (5 badd), unused
assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_count=0)
# getbalances should show no used, 5 badd trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})
# node 1 should now have about 5 badd left (for both cases)
assert_approx(self.nodes[1].getbalance(), 5, 0.001)
assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 5, 0.001)
def test_sending_from_reused_address_fails(self, second_addr_type):
'''
Test the simple case where [1] generates a new address A, then
[0] sends 10 BADD to A.
[1] spends 5 BADD from A. (leaving roughly 5 BADD useable)
[0] sends 10 BADD to A again.
[1] tries to spend 10 BADD (fails; dirty).
[1] tries to spend 4 BADD (succeeds; change address sufficient)
'''
self.log.info("Test sending from reused {} address fails".format(second_addr_type))
fundaddr = self.nodes[1].getnewaddress(label="", address_type="legacy")
retaddr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(fundaddr, 10)
self.nodes[0].generate(1)
self.sync_all()
# listunspent should show 1 single, unused 10 badd output
assert_unspent(self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0)
# getbalances should show no used, 10 badd trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10})
self.nodes[1].sendtoaddress(retaddr, 5)
self.nodes[0].generate(1)
self.sync_all()
# listunspent should show 1 single, unused 5 badd output
assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0)
# getbalances should show no used, 5 badd trusted
assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})
if not self.options.descriptors:
# For the second send, we transmute it to a related single-key address
# to make sure it's also detected as re-use
fund_spk = self.nodes[0].getaddressinfo(fundaddr)["scriptPubKey"]
fund_decoded = self.nodes[0].decodescript(fund_spk)
if second_addr_type == "p2sh-segwit":
new_fundaddr = fund_decoded["segwit"]["p2sh-segwit"]
elif second_addr_type == "bech32":
new_fundaddr = fund_decoded["segwit"]["addresses"][0]
else:
new_fundaddr = fundaddr
assert_equal(second_addr_type, "legacy")
self.nodes[0].sendtoaddress(new_fundaddr, 10)
self.nodes[0].generate(1)
self.sync_all()
# listunspent should show 2 total outputs (5, 10 badd), one unused (5), one reused (10)
assert_unspent(self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10)
# getbalances should show 10 used, 5 badd trusted
assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5})
# node 1 should now have a balance of 5 (no dirty) or 15 (including dirty)
assert_approx(self.nodes[1].getbalance(), 5, 0.001)
assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 15, 0.001)
assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[1].sendtoaddress, retaddr, 10)
self.nodes[1].sendtoaddress(retaddr, 4)
# listunspent should show 2 total outputs (1, 10 badd), one unused (1), one reused (10)
assert_unspent(self.nodes[1], total_count=2, total_sum=11, reused_count=1, reused_sum=10)
# getbalances should show 10 used, 1 badd trusted
assert_balances(self.nodes[1], mine={"used": 10, "trusted": 1})
# node 1 should now have about 1 badd left (no dirty) and 11 (including dirty)
assert_approx(self.nodes[1].getbalance(), 1, 0.001)
assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 11, 0.001)
def test_getbalances_used(self):
'''
getbalances and listunspent should pick up on reused addresses
immediately, even for address reusing outputs created before the first
transaction was spending from that address
'''
self.log.info("Test getbalances used category")
# node under test should be completely empty
assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)
new_addr = self.nodes[1].getnewaddress()
ret_addr = self.nodes[0].getnewaddress()
# send multiple transactions, reusing one address
for _ in range(11):
self.nodes[0].sendtoaddress(new_addr, 1)
self.nodes[0].generate(1)
self.sync_all()
# send transaction that should not use all the available outputs
# per the current coin selection algorithm
self.nodes[1].sendtoaddress(ret_addr, 5)
# getbalances and listunspent should show the remaining outputs
# in the reused address as used/reused
assert_unspent(self.nodes[1], total_count=2, total_sum=6, reused_count=1, reused_sum=1)
assert_balances(self.nodes[1], mine={"used": 1, "trusted": 5})
def test_full_destination_group_is_preferred(self):
'''
Test the case where [1] only has 11 outputs of 1 BADD in the same reused
address and tries to send a small payment of 0.5 BADD. The wallet
should use 10 outputs from the reused address as inputs and not a
single 1 BADD input, in order to join several outputs from the reused
address.
'''
self.log.info("Test that full destination groups are preferred in coin selection")
# Node under test should be empty
assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)
new_addr = self.nodes[1].getnewaddress()
ret_addr = self.nodes[0].getnewaddress()
# Send 11 outputs of 1 BADD to the same, reused address in the wallet
for _ in range(11):
self.nodes[0].sendtoaddress(new_addr, 1)
self.nodes[0].generate(1)
self.sync_all()
# Sending a transaction that is smaller than each one of the
# available outputs
txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=0.5)
inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"]
# The transaction should use 10 inputs exactly
assert_equal(len(inputs), 10)
def test_all_destination_groups_are_used(self):
'''
Test the case where [1] only has 22 outputs of 1 BADD in the same reused
address and tries to send a payment of 20.5 BADD. The wallet
should use all 22 outputs from the reused address as inputs.
'''
self.log.info("Test that all destination groups are used")
# Node under test should be empty
assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)
new_addr = self.nodes[1].getnewaddress()
ret_addr = self.nodes[0].getnewaddress()
# Send 22 outputs of 1 BADD to the same, reused address in the wallet
for _ in range(22):
self.nodes[0].sendtoaddress(new_addr, 1)
self.nodes[0].generate(1)
self.sync_all()
# Sending a transaction that needs to use the full groups
# of 10 inputs but also the incomplete group of 2 inputs.
txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=20.5)
inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"]
# The transaction should use 22 inputs exactly
assert_equal(len(inputs), 22)
if __name__ == '__main__':
AvoidReuseTest().main()
|
const paths = {
scripts: {
src: 'index.js',
all: './elements/*.js',
build: '../talis-ui/src/main/content/jcr_root/etc/designs/talis/build.js',
dest: {
path: '../talis-ui/src/main/content/jcr_root/etc/designs/talis',
fileName: 'build.js',
}
},
styles: {
src: 'main.css',
all: './*.css',
build: '../talis-ui/src/main/content/jcr_root/etc/designs/talis/build.css',
dest: {
path: '../talis-ui/src/main/content/jcr_root/etc/designs/talis',
fileName: 'build.css',
}
}
};
const webpackConfig = {
entry: `./${paths.scripts.src}`,
output: {
path: `${__dirname}/${paths.scripts.dest.path}`,
filename: "build.js"
},
/*module: {
rules: [{
test: /\.jsx?$/,
exclude: [/node_modules/],
use: [{
loader: 'babel-loader',
options: { presets: ['babel-preset-env'].map(require.resolve) }
}]
}]
},*/
resolve: {
modules: ['./node_modules/']
},
mode: 'development',
devtool: 'source-map'
};
module.exports = { paths: paths, webpack: webpackConfig };
|
import React, { useState } from "react";
import EducationCard from "./EducationCard";
import EducationEditForm from "./EducationEditForm";
//EducationCard, EditForm 임포트
function Education({ isEditable, education, setEducations}) {
const [edit, setEdit] = useState(false);
return (
<>
{edit ?
<EducationEditForm setEdit={setEdit} education={education} setEducations={setEducations}/>
: <EducationCard isEditable={isEditable} setEdit={setEdit} education={education}/>}
</>
)
}
export default Education;
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from SimPEG import Utils
from SimPEG.EM.Base import BaseEMProblem
from .SurveyDC import Survey_ky
from .FieldsDC_2D import Fields_ky, Fields_ky_CC, Fields_ky_N
from .FieldsDC import FieldsDC, Fields_CC, Fields_N
import numpy as np
from SimPEG.Utils import Zero
from .BoundaryUtils import getxBCyBC_CC
from scipy.special import kn
class BaseDCProblem_2D(BaseEMProblem):
"""
Base 2.5D DC problem
"""
surveyPair = Survey_ky
fieldsPair = Fields_ky # SimPEG.EM.Static.Fields_2D
fieldsPair_fwd = FieldsDC
nky = 15
kys = np.logspace(-4, 1, nky)
Ainv = [None for i in range(nky)]
nT = nky # Only for using TimeFields
storeJ = False
_Jmatrix = None
fix_Jmatrix = False
def fields(self, m):
print ("Compute fields")
if m is not None:
self.model = m
if self.Ainv[0] is not None:
for i in range(self.nky):
self.Ainv[i].clean()
f = self.fieldsPair(self.mesh, self.survey)
Srcs = self.survey.srcList
for iky in range(self.nky):
ky = self.kys[iky]
A = self.getA(ky)
self.Ainv[iky] = self.Solver(A, **self.solverOpts)
RHS = self.getRHS(ky)
u = self.Ainv[iky] * RHS
f[Srcs, self._solutionType, iky] = u
return f
def fields_to_space(self, f, y=0.):
f_fwd = self.fieldsPair_fwd(self.mesh, self.survey)
# Evaluating Integration using Trapezoidal rules
nky = self.kys.size
dky = np.diff(self.kys)
dky = np.r_[dky[0], dky]
phi0 = 1./np.pi*f[:, self._solutionType, 0]
phi = np.zeros_like(phi0)
for iky in range(nky):
phi1 = 1./np.pi*f[:, self._solutionType, iky]
phi += phi1*dky[iky]/2.*np.cos(self.kys[iky]*y)
phi += phi0*dky[iky]/2.*np.cos(self.kys[iky]*y)
phi0 = phi1.copy()
f_fwd[:, self._solutionType] = phi
return f_fwd
def getJ(self, m, f=None):
"""
Generate Full sensitivity matrix
"""
if self._Jmatrix is not None:
return self._Jmatrix
else:
if self.verbose:
print("Calculating J and storing")
self.model = m
if f is None:
f = self.fields(m)
self._Jmatrix = (self._Jtvec(m, v=None, f=f)).T
return self._Jmatrix
def Jvec(self, m, v, f=None):
"""
Compute sensitivity matrix (J) and vector (v) product.
"""
if self.storeJ:
J = self.getJ(m, f=f)
Jv = Utils.mkvc(np.dot(J, v))
return Jv
self.model = m
if f is None:
f = self.fields(m)
# TODO: This is not a good idea !! should change that as a list
Jv = self.dataPair(self.survey) # same size as the data
Jv0 = self.dataPair(self.survey)
# Assume y=0.
# This needs some thoughts to implement in general when src is dipole
dky = np.diff(self.kys)
dky = np.r_[dky[0], dky]
y = 0.
# TODO: this loop is pretty slow .. (Parellize)
for iky in range(self.nky):
ky = self.kys[iky]
for src in self.survey.srcList:
u_src = f[src, self._solutionType, iky] # solution vector
dA_dm_v = self.getADeriv(ky, u_src, v, adjoint=False)
dRHS_dm_v = self.getRHSDeriv(ky, src, v)
du_dm_v = self.Ainv[iky] * (- dA_dm_v + dRHS_dm_v)
for rx in src.rxList:
df_dmFun = getattr(f, '_{0!s}Deriv'.format(rx.projField),
None)
df_dm_v = df_dmFun(iky, src, du_dm_v, v, adjoint=False)
# Trapezoidal intergration
Jv1_temp = 1./np.pi*rx.evalDeriv(ky, src, self.mesh, f,
df_dm_v)
if iky == 0:
# First assigment
Jv[src, rx] = Jv1_temp*dky[iky]*np.cos(ky*y)
else:
Jv[src, rx] += Jv1_temp*dky[iky]/2.*np.cos(ky*y)
Jv[src, rx] += Jv0[src, rx]*dky[iky]/2.*np.cos(ky*y)
Jv0[src, rx] = Jv1_temp.copy()
return Utils.mkvc(Jv)
def Jtvec(self, m, v, f=None):
"""
Compute adjoint sensitivity matrix (J^T) and vector (v) product.
"""
if self.storeJ:
J = self.getJ(m, f=f)
Jtv = Utils.mkvc(np.dot(J.T, v))
return Jtv
self.model = m
if f is None:
f = self.fields(m)
return self._Jtvec(m, v=v, f=f)
def _Jtvec(self, m, v=None, f=None):
"""
Compute adjoint sensitivity matrix (J^T) and vector (v) product.
Full J matrix can be computed by inputing v=None
"""
if v is not None:
# Ensure v is a data object.
if not isinstance(v, self.dataPair):
v = self.dataPair(self.survey, v)
Jtv = np.zeros(m.size, dtype=float)
# Assume y=0.
dky = np.diff(self.kys)
dky = np.r_[dky[0], dky]
y = 0.
for src in self.survey.srcList:
for rx in src.rxList:
Jtv_temp1 = np.zeros(m.size, dtype=float)
Jtv_temp0 = np.zeros(m.size, dtype=float)
# TODO: this loop is pretty slow .. (Parellize)
for iky in range(self.nky):
u_src = f[src, self._solutionType, iky]
ky = self.kys[iky]
# wrt f, need possibility wrt m
PTv = rx.evalDeriv(ky, src, self.mesh, f, v[src, rx],
adjoint=True)
df_duTFun = getattr(
f, '_{0!s}Deriv'.format(rx.projField), None
)
df_duT, df_dmT = df_duTFun(iky, src, None, PTv,
adjoint=True)
ATinvdf_duT = self.Ainv[iky] * df_duT
dA_dmT = self.getADeriv(ky, u_src, ATinvdf_duT,
adjoint=True)
dRHS_dmT = self.getRHSDeriv(ky, src, ATinvdf_duT,
adjoint=True)
du_dmT = -dA_dmT + dRHS_dmT
Jtv_temp1 = 1./np.pi*(df_dmT + du_dmT).astype(float)
# Trapezoidal intergration
if iky == 0:
# First assigment
Jtv += Jtv_temp1*dky[iky]*np.cos(ky*y)
else:
Jtv += Jtv_temp1*dky[iky]/2.*np.cos(ky*y)
Jtv += Jtv_temp0*dky[iky]/2.*np.cos(ky*y)
Jtv_temp0 = Jtv_temp1.copy()
return Utils.mkvc(Jtv)
# This is for forming full sensitivity
else:
# This is for forming full sensitivity matrix
Jt = np.zeros((self.model.size, self.survey.nD), order='F')
istrt = int(0)
iend = int(0)
# Assume y=0.
dky = np.diff(self.kys)
dky = np.r_[dky[0], dky]
y = 0.
for src in self.survey.srcList:
for rx in src.rxList:
iend = istrt + rx.nD
Jtv_temp1 = np.zeros((m.size, rx.nD), dtype=float)
Jtv_temp0 = np.zeros((m.size, rx.nD), dtype=float)
# TODO: this loop is pretty slow .. (Parellize)
for iky in range(self.nky):
u_src = f[src, self._solutionType, iky]
ky = self.kys[iky]
# wrt f, need possibility wrt m
P = rx.getP(self.mesh, rx.projGLoc(f)).toarray()
ATinvdf_duT = self.Ainv[iky] * (P.T)
dA_dmT = self.getADeriv(ky, u_src, ATinvdf_duT,
adjoint=True)
Jtv_temp1 = 1./np.pi*(-dA_dmT)
# Trapezoidal intergration
if iky == 0:
# First assigment
if rx.nD == 1:
Jt[:, istrt] += Jtv_temp1*dky[iky]*np.cos(ky*y)
else:
Jt[:, istrt:iend] += Jtv_temp1*dky[iky]*np.cos(ky*y)
else:
if rx.nD == 1:
Jt[:, istrt] += Jtv_temp1*dky[iky]/2.*np.cos(ky*y)
Jt[:, istrt] += Jtv_temp0*dky[iky]/2.*np.cos(ky*y)
else:
Jt[:, istrt:iend] += Jtv_temp1*dky[iky]/2.*np.cos(ky*y)
Jt[:, istrt:iend] += Jtv_temp0*dky[iky]/2.*np.cos(ky*y)
Jtv_temp0 = Jtv_temp1.copy()
istrt += rx.nD
return Jt
def getSourceTerm(self, ky):
"""
takes concept of source and turns it into a matrix
"""
"""
Evaluates the sources, and puts them in matrix form
:rtype: (numpy.ndarray, numpy.ndarray)
:return: q (nC or nN, nSrc)
"""
Srcs = self.survey.srcList
if self._formulation == 'EB':
n = self.mesh.nN
# return NotImplementedError
elif self._formulation == 'HJ':
n = self.mesh.nC
q = np.zeros((n, len(Srcs)))
for i, src in enumerate(Srcs):
q[:, i] = src.eval(self)
return q
@property
def deleteTheseOnModelUpdate(self):
toDelete = super(BaseDCProblem_2D, self).deleteTheseOnModelUpdate
if self.sigmaMap is not None:
toDelete += [
'_MnSigma', '_MnSigmaDerivMat',
'_MccRhoi', '_MccRhoiDerivMat'
]
if self.fix_Jmatrix:
return toDelete
if self._Jmatrix is not None:
toDelete += ['_Jmatrix']
return toDelete
####################################################
# Mass Matrices
####################################################
@property
def MnSigma(self):
"""
Node inner product matrix for \\(\\sigma\\). Used in the E-B
formulation
"""
# TODO: only works isotropic sigma
if getattr(self, '_MnSigma', None) is None:
sigma = self.sigma
vol = self.mesh.vol
self._MnSigma = Utils.sdiag(
self.mesh.aveN2CC.T*(Utils.sdiag(vol)*sigma)
)
return self._MnSigma
@property
def MnSigmaDerivMat(self):
"""
Derivative of MnSigma with respect to the model
"""
if getattr(self, '_MnSigmaDerivMat', None) is None:
sigma = self.sigma
vol = self.mesh.vol
self._MnSigmaDerivMat = (
self.mesh.aveN2CC.T * Utils.sdiag(vol) * self.sigmaDeriv
)
return self._MnSigmaDerivMat
def MnSigmaDeriv(self, u, v, adjoint=False):
"""
Derivative of MnSigma with respect to the model times a vector (u)
"""
if self.storeInnerProduct:
if adjoint:
return self.MnSigmaDerivMat.T * (
Utils.sdiag(u)*v
)
else:
return u*(self.MnSigmaDerivMat * v)
else:
sigma = self.sigma
vol = self.mesh.vol
if adjoint:
return self.sigmaDeriv.T * (
Utils.sdiag(vol) * (self.mesh.aveN2CC * (Utils.sdiag(u)*v))
)
else:
dsig_dm_v = self.sigmaDeriv * v
return (
u * (self.mesh.aveN2CC.T * (vol * dsig_dm_v))
)
@property
def MccRhoi(self):
"""
Cell inner product matrix for \\(\\rho^{-1}\\). Used in the H-J
formulation
"""
# TODO: only works isotropic rho
if getattr(self, '_MccRhoi', None) is None:
self._MccRhoi = Utils.sdiag(
self.mesh.vol/self.rho
)
return self._MccRhoi
@property
def MccRhoiDerivMat(self):
"""
Derivative of MccRho with respect to the model
"""
if getattr(self, '_MccRhoiDerivMat', None) is None:
rho = self.rho
vol = self.mesh.vol
self._MccRhoiDerivMat = (
Utils.sdiag(vol*(-1./rho**2))*self.rhoDeriv
)
return self._MccRhoiDerivMat
def MccRhoiDeriv(self, u, v, adjoint=False):
"""
Derivative of :code:`MccRhoi` with respect to the model.
"""
if self.rhoMap is None:
return Utils.Zero()
if len(self.rho.shape) > 1:
if self.rho.shape[1] > self.mesh.dim:
raise NotImplementedError(
"Full anisotropy is not implemented for MccRhoiDeriv."
)
if self.storeInnerProduct:
if adjoint:
return self.MccRhoiDerivMat.T * (Utils.sdiag(u) * v)
else:
return Utils.sdiag(u) * (self.MccRhoiDerivMat * v)
else:
vol = self.mesh.vol
rho = self.rho
if adjoint:
return self.rhoDeriv.T * (Utils.sdiag(u*vol*(-1./rho**2)) * v)
else:
return (Utils.sdiag(u*vol*(-1./rho**2)))*(self.rhoDeriv * v)
class Problem2D_CC(BaseDCProblem_2D):
"""
2.5D cell centered DC problem
"""
_solutionType = 'phiSolution'
_formulation = 'HJ' # CC potentials means J is on faces
fieldsPair = Fields_ky_CC
fieldsPair_fwd = Fields_CC
bc_type = 'Mixed'
def __init__(self, mesh, **kwargs):
BaseDCProblem_2D.__init__(self, mesh, **kwargs)
def getA(self, ky):
"""
Make the A matrix for the cell centered DC resistivity problem
A = D MfRhoI G
"""
# To handle Mixed boundary condition
self.setBC(ky=ky)
D = self.Div
G = self.Grad
vol = self.mesh.vol
MfRhoI = self.MfRhoI
# Get resistivity rho
rho = self.rho
A = D * MfRhoI * G + ky**2 * self.MccRhoi
if self.bc_type == "Neumann":
A[0, 0] = A[0, 0] + 1.
return A
def getADeriv(self, ky, u, v, adjoint=False):
# To handle Mixed boundary condition
# self.setBC(ky=ky)
D = self.Div
G = self.Grad
vol = self.mesh.vol
if adjoint:
return (
self.MfRhoIDeriv(G*u.flatten(), D.T*v, adjoint=adjoint) +
ky**2 * self.MccRhoiDeriv(u.flatten(), v, adjoint=adjoint)
)
else:
return (
D * self.MfRhoIDeriv(G*u.flatten(), v, adjoint=adjoint) +
ky**2 * self.MccRhoiDeriv(u.flatten(), v, adjoint=adjoint)
)
def getRHS(self, ky):
"""
RHS for the DC problem
q
"""
RHS = self.getSourceTerm(ky)
return RHS
def getRHSDeriv(self, ky, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
"""
# TODO: add qDeriv for RHS depending on m
# qDeriv = src.evalDeriv(self, ky, adjoint=adjoint)
# return qDeriv
return Zero()
def setBC(self, ky=None):
fxm, fxp, fym, fyp = self.mesh.faceBoundaryInd
gBFxm = self.mesh.gridFx[fxm, :]
gBFxp = self.mesh.gridFx[fxp, :]
gBFym = self.mesh.gridFy[fym, :]
gBFyp = self.mesh.gridFy[fyp, :]
# Setup Mixed B.C (alpha, beta, gamma)
temp_xm = np.ones_like(gBFxm[:, 0])
temp_xp = np.ones_like(gBFxp[:, 0])
temp_ym = np.ones_like(gBFym[:, 1])
temp_yp = np.ones_like(gBFyp[:, 1])
if self.bc_type == "Neumann":
alpha_xm, alpha_xp = temp_xm*0., temp_xp*0.
alpha_ym, alpha_yp = temp_ym*0., temp_yp*0.
beta_xm, beta_xp = temp_xm, temp_xp
beta_ym, beta_yp = temp_ym, temp_yp
gamma_xm, gamma_xp = temp_xm*0., temp_xp*0.
gamma_ym, gamma_yp = temp_ym*0., temp_yp*0.
elif self.bc_type == "Dirichlet":
alpha_xm, alpha_xp = temp_xm, temp_xp
alpha_ym, alpha_yp = temp_ym, temp_yp
beta_xm, beta_xp = temp_xm*0., temp_xp*0.
beta_ym, beta_yp = temp_ym*0., temp_yp*0.
gamma_xm, gamma_xp = temp_xm*0., temp_xp*0.
gamma_ym, gamma_yp = temp_ym*0., temp_yp*0.
elif self.bc_type == "Mixed":
xs = np.median(self.mesh.vectorCCx)
ys = np.median(self.mesh.vectorCCy[-1])
def r_boundary(x, y):
return 1./np.sqrt(
(x - xs)**2 + (y - ys)**2
)
rxm = r_boundary(gBFxm[:, 0], gBFxm[:, 1])
rxp = r_boundary(gBFxp[:, 0], gBFxp[:, 1])
rym = r_boundary(gBFym[:, 0], gBFym[:, 1])
alpha_xm = ky*(
kn(1, ky*rxm) / kn(0, ky*rxm) * (gBFxm[:, 0]-xs)
)
alpha_xp = ky*(
kn(1, ky*rxp) / kn(0, ky*rxp) * (gBFxp[:, 0]-xs)
)
alpha_ym = ky*(
kn(1, ky*rym) / kn(0, ky*rym) * (gBFym[:, 0]-ys)
)
alpha_yp = temp_yp*0.
beta_xm, beta_xp = temp_xm, temp_xp
beta_ym, beta_yp = temp_ym, temp_yp
gamma_xm, gamma_xp = temp_xm*0., temp_xp*0.
gamma_ym, gamma_yp = temp_ym*0., temp_yp*0.
alpha = [alpha_xm, alpha_xp, alpha_ym, alpha_yp]
beta = [beta_xm, beta_xp, beta_ym, beta_yp]
gamma = [gamma_xm, gamma_xp, gamma_ym, gamma_yp]
x_BC, y_BC = getxBCyBC_CC(self.mesh, alpha, beta, gamma)
V = self.Vol
self.Div = V * self.mesh.faceDiv
P_BC, B = self.mesh.getBCProjWF_simple()
M = B*self.mesh.aveCC2F
self.Grad = self.Div.T - P_BC*Utils.sdiag(y_BC)*M
class Problem2D_N(BaseDCProblem_2D):
"""
2.5D nodal DC problem
"""
_solutionType = 'phiSolution'
_formulation = 'EB' # CC potentials means J is on faces
fieldsPair = Fields_ky_N
fieldsPair_fwd = Fields_N
def __init__(self, mesh, **kwargs):
BaseDCProblem_2D.__init__(self, mesh, **kwargs)
# self.setBC()
def getA(self, ky):
"""
Make the A matrix for the cell centered DC resistivity problem
A = D MfRhoI G
"""
MeSigma = self.MeSigma
MnSigma = self.MnSigma
Grad = self.mesh.nodalGrad
# Get conductivity sigma
sigma = self.sigma
A = Grad.T * MeSigma * Grad + ky**2*MnSigma
# This seems not required for 2.5D problem
# Handling Null space of A
# A[0, 0] = A[0, 0] + 1.
# print (A.shape, 'N')
return A
def getADeriv(self, ky, u, v, adjoint=False):
MeSigma = self.MeSigma
Grad = self.mesh.nodalGrad
sigma = self.sigma
vol = self.mesh.vol
if adjoint:
return (
self.MeSigmaDeriv(Grad*u.flatten(), Grad*v, adjoint=adjoint) +
ky**2*self.MnSigmaDeriv(u.flatten(), v, adjoint=adjoint)
)
else:
return (
Grad.T*self.MeSigmaDeriv(Grad*u.flatten(), v, adjoint=adjoint) +
ky**2*self.MnSigmaDeriv(u.flatten(), v, adjoint=adjoint)
)
# return (Grad.T*(self.MeSigmaDeriv(Grad*u.flatten(), v, adjoint)) +
# ky**2*self.MnSigmaDeriv(u.flatten())*v)
def getRHS(self, ky):
"""
RHS for the DC problem
q
"""
RHS = self.getSourceTerm(ky)
return RHS
def getRHSDeriv(self, ky, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
"""
# TODO: add qDeriv for RHS depending on m
# qDeriv = src.evalDeriv(self, ky, adjoint=adjoint)
# return qDeriv
return Zero()
|
import unittest
import numpy as np
import torch
from torch import nn
import torch_testing as tt
from gym.spaces import Box
from all.core import State
from all.policies import GaussianPolicy
STATE_DIM = 2
ACTION_DIM = 3
class TestGaussian(unittest.TestCase):
def setUp(self):
torch.manual_seed(2)
self.space = Box(np.array([-1, -1, -1]), np.array([1, 1, 1]))
self.model = nn.Sequential(
nn.Linear(STATE_DIM, ACTION_DIM * 2)
)
optimizer = torch.optim.RMSprop(self.model.parameters(), lr=0.01)
self.policy = GaussianPolicy(self.model, optimizer, self.space)
def test_output_shape(self):
state = State(torch.randn(1, STATE_DIM))
action = self.policy(state).sample()
self.assertEqual(action.shape, (1, ACTION_DIM))
state = State(torch.randn(5, STATE_DIM))
action = self.policy(state).sample()
self.assertEqual(action.shape, (5, ACTION_DIM))
def test_reinforce_one(self):
state = State(torch.randn(1, STATE_DIM))
dist = self.policy(state)
action = dist.sample()
log_prob1 = dist.log_prob(action)
loss = -log_prob1.mean()
self.policy.reinforce(loss)
dist = self.policy(state)
log_prob2 = dist.log_prob(action)
self.assertGreater(log_prob2.item(), log_prob1.item())
def test_converge(self):
state = State(torch.randn(1, STATE_DIM))
target = torch.tensor([1., 2., -1.])
for _ in range(0, 1000):
dist = self.policy(state)
action = dist.sample()
log_prob = dist.log_prob(action)
error = ((target - action) ** 2).mean()
loss = (error * log_prob).mean()
self.policy.reinforce(loss)
self.assertTrue(error < 1)
def test_eval(self):
state = State(torch.randn(1, STATE_DIM))
dist = self.policy.no_grad(state)
tt.assert_almost_equal(dist.mean, torch.tensor([[-0.233, 0.459, -0.058]]), decimal=3)
tt.assert_almost_equal(dist.entropy(), torch.tensor([4.251]), decimal=3)
best = self.policy.eval(state)
tt.assert_almost_equal(best, torch.tensor([[-0.233, 0.459, -0.058]]), decimal=3)
if __name__ == '__main__':
unittest.main()
|
import {expect} from 'chai';
import {spec} from 'modules/pubmaticBidAdapter';
import * as utils from 'src/utils';
const constants = require('src/constants.json');
describe('PubMatic adapter', () => {
let bidRequests;
let bidResponses;
beforeEach(() => {
bidRequests = [
{
bidder: 'pubmatic',
params: {
publisherId: '301',
adSlot: '/15671365/DMDemo@300x250:0',
kadfloor: '1.2',
pmzoneid: 'aabc, ddef',
kadpageurl: 'www.publisher.com',
yob: '1986',
gender: 'M',
lat: '12.3',
lon: '23.7',
wiid: '1234567890',
profId: '100',
verId: '200'
},
placementCode: '/19968336/header-bid-tag-1',
sizes: [[300, 250], [300, 600]],
bidId: '23acc48ad47af5',
requestId: '0fb4905b-9456-4152-86be-c6f6d259ba99',
bidderRequestId: '1c56ad30b9b8ca8',
transactionId: '92489f71-1bf2-49a0-adf9-000cea934729'
}
];
bidResponses = {
'body': {
'id': '93D3BAD6-E2E2-49FB-9D89-920B1761C865',
'seatbid': [{
'bid': [{
'id': '74858439-49D7-4169-BA5D-44A046315B2F',
'impid': '22bddb28db77d',
'price': 1.3,
'adm': 'image3.pubmatic.com Layer based creative',
'h': 250,
'w': 300,
'ext': {
'deal_channel': 6
}
}]
}, {
'bid': [{
'id': '74858439-49D7-4169-BA5D-44A046315BEF',
'impid': '22bddb28db77e',
'price': 1.7,
'adm': 'image3.pubmatic.com Layer based creative',
'h': 250,
'w': 300,
'ext': {
'deal_channel': 5
}
}]
}]
}
};
});
describe('implementation', () => {
describe('Bid validations', () => {
it('valid bid case', () => {
let validBid = {
bidder: 'pubmatic',
params: {
publisherId: '301',
adSlot: '/15671365/DMDemo@300x250:0'
}
},
isValid = spec.isBidRequestValid(validBid);
expect(isValid).to.equal(true);
});
it('invalid bid case: publisherId not passed', () => {
let validBid = {
bidder: 'pubmatic',
params: {
adSlot: '/15671365/DMDemo@300x250:0'
}
},
isValid = spec.isBidRequestValid(validBid);
expect(isValid).to.equal(false);
});
it('invalid bid case: publisherId is not string', () => {
let validBid = {
bidder: 'pubmatic',
params: {
publisherId: 301,
adSlot: '/15671365/DMDemo@300x250:0'
}
},
isValid = spec.isBidRequestValid(validBid);
expect(isValid).to.equal(false);
});
it('invalid bid case: adSlot not passed', () => {
let validBid = {
bidder: 'pubmatic',
params: {
publisherId: '301'
}
},
isValid = spec.isBidRequestValid(validBid);
expect(isValid).to.equal(false);
});
it('invalid bid case: adSlot is not string', () => {
let validBid = {
bidder: 'pubmatic',
params: {
publisherId: '301',
adSlot: 15671365
}
},
isValid = spec.isBidRequestValid(validBid);
expect(isValid).to.equal(false);
});
});
describe('Request formation', () => {
it('Endpoint checking', () => {
let request = spec.buildRequests(bidRequests);
expect(request.url).to.equal('//hbopenbid.pubmatic.com/translator?source=prebid-client');
expect(request.method).to.equal('POST');
});
it('Request params check', () => {
let request = spec.buildRequests(bidRequests);
let data = JSON.parse(request.data);
expect(data.at).to.equal(1); // auction type
expect(data.cur[0]).to.equal('USD'); // currency
expect(data.site.domain).to.be.a('string'); // domain should be set
expect(data.site.page).to.equal(bidRequests[0].params.kadpageurl); // forced pageURL
expect(data.site.publisher.id).to.equal(bidRequests[0].params.publisherId); // publisher Id
expect(data.user.yob).to.equal(parseInt(bidRequests[0].params.yob)); // YOB
expect(data.user.gender).to.equal(bidRequests[0].params.gender); // Gender
expect(data.device.geo.lat).to.equal(parseFloat(bidRequests[0].params.lat)); // Latitude
expect(data.device.geo.lon).to.equal(parseFloat(bidRequests[0].params.lon)); // Lognitude
expect(data.user.geo.lat).to.equal(parseFloat(bidRequests[0].params.lat)); // Latitude
expect(data.user.geo.lon).to.equal(parseFloat(bidRequests[0].params.lon)); // Lognitude
expect(data.ext.wrapper.wv).to.equal(constants.REPO_AND_VERSION); // Wrapper Version
expect(data.ext.wrapper.transactionId).to.equal(bidRequests[0].transactionId); // Prebid TransactionId
expect(data.ext.wrapper.wiid).to.equal(bidRequests[0].params.wiid); // OpenWrap: Wrapper Impression ID
expect(data.ext.wrapper.profile).to.equal(parseInt(bidRequests[0].params.profId)); // OpenWrap: Wrapper Profile ID
expect(data.ext.wrapper.version).to.equal(parseInt(bidRequests[0].params.verId)); // OpenWrap: Wrapper Profile Version ID
expect(data.imp[0].id).to.equal(bidRequests[0].bidId); // Prebid bid id is passed as id
expect(data.imp[0].bidfloor).to.equal(parseFloat(bidRequests[0].params.kadfloor)); // kadfloor
expect(data.imp[0].tagid).to.equal('/15671365/DMDemo'); // tagid
expect(data.imp[0].banner.w).to.equal(300); // width
expect(data.imp[0].banner.h).to.equal(250); // height
expect(data.imp[0].ext.pmZoneId).to.equal(bidRequests[0].params.pmzoneid.split(',').slice(0, 50).map(id => id.trim()).join()); // pmzoneid
});
it('Request params check with GDPR Consent', () => {
let bidRequest = {
gdprConsent: {
consentString: 'kjfdniwjnifwenrif3',
gdprApplies: true
}
};
let request = spec.buildRequests(bidRequests, bidRequest);
let data = JSON.parse(request.data);
expect(data.user.ext.consent).to.equal('kjfdniwjnifwenrif3');
expect(data.regs.ext.gdpr).to.equal(1);
expect(data.at).to.equal(1); // auction type
expect(data.cur[0]).to.equal('USD'); // currency
expect(data.site.domain).to.be.a('string'); // domain should be set
expect(data.site.page).to.equal(bidRequests[0].params.kadpageurl); // forced pageURL
expect(data.site.publisher.id).to.equal(bidRequests[0].params.publisherId); // publisher Id
expect(data.user.yob).to.equal(parseInt(bidRequests[0].params.yob)); // YOB
expect(data.user.gender).to.equal(bidRequests[0].params.gender); // Gender
expect(data.device.geo.lat).to.equal(parseFloat(bidRequests[0].params.lat)); // Latitude
expect(data.device.geo.lon).to.equal(parseFloat(bidRequests[0].params.lon)); // Lognitude
expect(data.user.geo.lat).to.equal(parseFloat(bidRequests[0].params.lat)); // Latitude
expect(data.user.geo.lon).to.equal(parseFloat(bidRequests[0].params.lon)); // Lognitude
expect(data.ext.wrapper.wv).to.equal(constants.REPO_AND_VERSION); // Wrapper Version
expect(data.ext.wrapper.transactionId).to.equal(bidRequests[0].transactionId); // Prebid TransactionId
expect(data.ext.wrapper.wiid).to.equal(bidRequests[0].params.wiid); // OpenWrap: Wrapper Impression ID
expect(data.ext.wrapper.profile).to.equal(parseInt(bidRequests[0].params.profId)); // OpenWrap: Wrapper Profile ID
expect(data.ext.wrapper.version).to.equal(parseInt(bidRequests[0].params.verId)); // OpenWrap: Wrapper Profile Version ID
expect(data.imp[0].id).to.equal(bidRequests[0].bidId); // Prebid bid id is passed as id
expect(data.imp[0].bidfloor).to.equal(parseFloat(bidRequests[0].params.kadfloor)); // kadfloor
expect(data.imp[0].tagid).to.equal('/15671365/DMDemo'); // tagid
expect(data.imp[0].banner.w).to.equal(300); // width
expect(data.imp[0].banner.h).to.equal(250); // height
expect(data.imp[0].ext.pmZoneId).to.equal(bidRequests[0].params.pmzoneid.split(',').slice(0, 50).map(id => id.trim()).join()); // pmzoneid
});
it('invalid adslot', () => {
bidRequests[0].params.adSlot = '/15671365/DMDemo';
let request = spec.buildRequests(bidRequests);
expect(request).to.equal(undefined);
});
});
describe('Response checking', () => {
it('should check for valid response values', () => {
let request = spec.buildRequests(bidRequests);
let response = spec.interpretResponse(bidResponses, request);
expect(response).to.be.an('array').with.length.above(0);
expect(response[0].requestId).to.equal(bidResponses.body.seatbid[0].bid[0].impid);
expect(response[0].cpm).to.equal((bidResponses.body.seatbid[0].bid[0].price).toFixed(2));
expect(response[0].width).to.equal(bidResponses.body.seatbid[0].bid[0].w);
expect(response[0].height).to.equal(bidResponses.body.seatbid[0].bid[0].h);
if (bidResponses.body.seatbid[0].bid[0].crid) {
expect(response[0].creativeId).to.equal(bidResponses.body.seatbid[0].bid[0].crid);
} else {
expect(response[0].creativeId).to.equal(bidResponses.body.seatbid[0].bid[0].id);
}
expect(response[0].dealId).to.equal(bidResponses.body.seatbid[0].bid[0].dealid);
expect(response[0].currency).to.equal('USD');
expect(response[0].netRevenue).to.equal(false);
expect(response[0].ttl).to.equal(300);
expect(response[0].referrer).to.include(utils.getTopWindowUrl());
expect(response[0].ad).to.equal(bidResponses.body.seatbid[0].bid[0].adm);
expect(response[1].requestId).to.equal(bidResponses.body.seatbid[1].bid[0].impid);
expect(response[1].cpm).to.equal((bidResponses.body.seatbid[1].bid[0].price).toFixed(2));
expect(response[1].width).to.equal(bidResponses.body.seatbid[1].bid[0].w);
expect(response[1].height).to.equal(bidResponses.body.seatbid[1].bid[0].h);
if (bidResponses.body.seatbid[1].bid[0].crid) {
expect(response[1].creativeId).to.equal(bidResponses.body.seatbid[1].bid[0].crid);
} else {
expect(response[1].creativeId).to.equal(bidResponses.body.seatbid[1].bid[0].id);
}
expect(response[1].dealId).to.equal(bidResponses.body.seatbid[1].bid[0].dealid);
expect(response[1].currency).to.equal('USD');
expect(response[1].netRevenue).to.equal(false);
expect(response[1].ttl).to.equal(300);
expect(response[1].referrer).to.include(utils.getTopWindowUrl());
expect(response[1].ad).to.equal(bidResponses.body.seatbid[1].bid[0].adm);
});
it('should check for dealChannel value selection', () => {
let request = spec.buildRequests(bidRequests);
let response = spec.interpretResponse(bidResponses, request);
expect(response).to.be.an('array').with.length.above(0);
expect(response[0].dealChannel).to.equal('PMPG');
expect(response[1].dealChannel).to.equal('PREF');
});
it('should check for unexpected dealChannel value selection', () => {
let request = spec.buildRequests(bidRequests);
let updateBiResponse = bidResponses;
updateBiResponse.body.seatbid[0].bid[0].ext.deal_channel = 11;
let response = spec.interpretResponse(updateBiResponse, request);
expect(response).to.be.an('array').with.length.above(0);
expect(response[0].dealChannel).to.equal(null);
});
});
});
});
|
define(["jquery","core/ajax","core/config","block_navigation/ajax_response_renderer"],function(a,b,c,d){function e(a){return a.closest("[data-block]").attr("data-instanceid")}function f(a){return a.closest("[data-block]").attr("data-block")}var g=c.wwwroot+"/lib/ajax/getnavbranch.php";return{load:function(b){b=a(b);var h=a.Deferred(),i={elementid:b.attr("data-node-id"),id:b.attr("data-node-key"),type:b.attr("data-node-type"),sesskey:c.sesskey,instance:e(b),blocktype:f(b)},j={type:"POST",dataType:"json",data:i};return a.ajax(g,j).done(function(a){d.render(b,a),h.resolve()}),h}}});
|
#!/usr/bin/env python2
"""
builtin_def.py
Metadata:
- Is used for lookup in cmd_eval.py
- Should be used for completion
- complete names of builtins
- complete flags they take
- handle aliases : . and source, [ and test
- Should be reflected in the contents of the 'help' builtin
NOTE: bash has help -d -m -s. Default is -s, like a man page.
Links on special builtins:
http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_14
"""
from __future__ import print_function
from typing import Dict, List, Optional, Any
# Special builtins can't be redefined by functions. On the other hand, 'cd'
# CAN be redefined.
#
# http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_14
# https://www.gnu.org/software/bash/manual/html_node/Special-Builtins.html
_NORMAL_BUILTINS = [
'read', 'echo', 'printf', 'mapfile', 'readarray',
'cd', 'pushd', 'popd', 'dirs', 'pwd',
'source', # note that . alias is special
'umask', 'wait', 'jobs', 'fg', 'bg',
'shopt',
'complete', 'compgen', 'compopt', 'compadjust',
'getopts',
# introspection
'command', 'type', 'hash', 'help', 'history',
'alias', 'unalias',
'bind',
# Oil only
'append',
'shvar',
# push-registers added below
'write', 'json', 'pp',
'module', 'use',
'argparse', # not implemented
'fopen',
'fork', 'forkwait',
'runproc',
]
class _Builtin(object):
def __init__(self, index, name, enum_name=None, kind='normal'):
# type: (int, str, Optional[str], str) -> None
"""
kind: normal, special, assign
"""
self.index = index
self.name = name # e.g. : or [
self.enum_name = enum_name or name # e.g. builtin_num::colon
self.kind = kind
class _BuiltinDef(object):
"""
NOTE: This isn't used anywhere! We're registering nothing.
We want to complete the flags to builtins. So this is a mapping from name
to arg spec. There might not be any flags.
"""
def __init__(self):
# type: () -> None
self.builtins = [] # type: List[_Builtin]
self.index = 1 # start with 1
def Add(self, *posargs, **kwargs):
# type: (Any, Any) -> None
# NOTE: *posargs works around flake8/pyflakes bug!
self.builtins.append(_Builtin(self.index, *posargs, **kwargs))
self.index += 1
def _Init(b):
# type: (_BuiltinDef) -> None
#
# Special builtins
#
b.Add(':', enum_name='colon', kind='special')
b.Add('.', enum_name='dot', kind='special')
# Python keyword
b.Add('exec', enum_name='exec_', kind='special')
for name in [
'eval', 'set', 'shift', 'times', 'trap', 'unset', 'builtin']:
b.Add(name, kind='special')
#
# Assignment builtins.
# Note: control flow aren't builtins in OSH: break continue return
#
for name in ["readonly", "local", "declare", "typeset"]:
b.Add(name, kind='assign')
b.Add('export', enum_name='export_', kind='assign')
# Normal builtins
# Slight variants
b.Add('test')
b.Add('[', enum_name='bracket')
b.Add('true', enum_name='true_')
b.Add('false', enum_name='false_')
b.Add('try', enum_name='try_')
for name in _NORMAL_BUILTINS:
b.Add(name)
b.Add('push-registers', enum_name='push_registers')
# Implementation detail of $(<file)
b.Add('__cat', enum_name='cat')
_BUILTIN_DEF = _BuiltinDef()
_Init(_BUILTIN_DEF)
# Exposed in consts.py for completion
BUILTIN_NAMES = [b.name for b in _BUILTIN_DEF.builtins]
def All():
# type: () -> List[_Builtin]
return _BUILTIN_DEF.builtins
def BuiltinDict():
# type: () -> Dict[str, _Builtin]
"""For the slow path in frontend/match.py."""
return dict((b.name, b) for b in _BUILTIN_DEF.builtins)
|
function openNav(){
document.getElementById("sidenav").style.width = "100%";
document.getElementById("menu-button").style.display = 'none';
}
function closeNav(){
document.getElementById("sidenav").style.width = '0';
document.getElementById("menu-button").style.display = 'block';
}
|
import Vue from 'vue'
import Router from 'vue-router'
Vue.use(Router)
/* Layout */
import Layout from '@/layout'
/* Router Modules */
import chartsRouter from './modules/charts'
import tableRouter from './modules/table'
/**
*注意:子菜单仅在route children.length>=1时出现
*详情见:https://panjiachen.github.io/vue-element-admin-site/guide/essentials/router-and-nav.html
*
*隐藏:true如果设置为true,项目将不会显示在侧栏中(默认值为false)
*alwaysShow:true如果设置为true,将始终显示根菜单
*如果未设置alwaysShow,则当项目有多个子路线时,
*它将变为嵌套模式,否则不显示根菜单
*重定向:如果设置为noRedirect,noRedirect将不会在面包屑中重定向
*“必须由名称设置”<路由器名称:!)
*元:{
角色:['admin','editor']控制页面角色(您可以设置多个角色)
标题:“标题”侧边栏和面包屑中显示的名称(推荐设置)
图标:“svg名称”/“el-icon-x”侧边栏中显示的图标
noCache:true如果设置为true,则不会缓存页面(默认值为false)
粘贴:true如果设置为true,则标签将粘贴在“标签”视图中
breadcrumb:false如果设置为false,则项目将隐藏在breadcrumb中(默认为true)
activeMenu:“/example/list”如果设置路径,侧边栏将突出显示您设置的路径
}
*/
/**
* constantRoutes
* 没有权限要求的基页
* 可以访问所有角色
*/
export const constantRoutes = [
{
path: '/redirect',
component: Layout,
hidden: true,
children: [
{
path: '/redirect/:path(.*)',
component: () => import('@/views/redirect/index')
}
]
},
{
path: '/login',
component: () => import('@/views/login/index'),
hidden: true
},
{
path: '/auth-redirect',
component: () => import('@/views/login/auth-redirect'),
hidden: true
},
{
path: '/404',
component: () => import('@/views/error-page/404'),
hidden: true
},
{
path: '/401',
component: () => import('@/views/error-page/401'),
hidden: true
},
{
path: '/',
component: Layout,
redirect: '/dashboard',
children: [
{
path: 'dashboard',
component: () => import('@/views/dashboard/index'),
name: 'Dashboard',
meta: { title: '首页', icon: 'dashboard', affix: true }
}
]
},
{
path: '/profile',
component: Layout,
redirect: '/profile/index',
hidden: true,
children: [
{
path: 'index',
component: () => import('@/views/profile/index'),
name: 'Profile',
meta: { title: 'Profile', icon: 'user', noCache: true }
}
]
}
]
/**
* asyncRoutes
* 需要根据用户角色动态加载的路由
*/
export const asyncRoutes = [
{
path: '/permission',
component: Layout,
hidden: true,
redirect: '/permission/page',
alwaysShow: true, // will always show the root menu
name: 'Permission',
meta: {
title: 'Permission',
icon: 'lock',
roles: ['admin', 'editor'] // you can set roles in root nav
},
children: [
{
path: 'page',
component: () => import('@/views/permission/page'),
name: 'PagePermission',
meta: {
title: 'Page Permission',
roles: ['admin'] // or you can only set roles in sub nav
}
},
{
path: 'directive',
component: () => import('@/views/permission/directive'),
name: 'DirectivePermission',
meta: {
title: 'Directive Permission'
// if do not set roles, means: this page does not require permission
}
},
{
path: 'role',
component: () => import('@/views/permission/role'),
name: 'RolePermission',
meta: {
title: 'Role Permission',
roles: ['admin']
}
}
]
},
{
path: '/icon',
component: Layout,
hidden: true,
children: [
{
path: 'index',
component: () => import('@/views/icons/index'),
name: 'Icons',
meta: { title: 'Icons', icon: 'icon', noCache: true }
}
]
},
/** 当路由图太长时,可以将其拆分为小模块 **/
chartsRouter,
// 隐藏
tableRouter,
// 隐藏
{
path: '/tab',
hidden: true,
component: Layout,
children: [
{
path: 'index',
component: () => import('@/views/tab/index'),
name: 'Tab',
meta: { title: 'Tab', icon: 'tab' }
}
]
},
// 隐藏
{
path: '/error',
component: Layout,
redirect: 'noRedirect',
name: 'ErrorPages',
hidden: true,
meta: {
title: 'Error Pages',
icon: '404'
},
children: [
{
path: '401',
component: () => import('@/views/error-page/401'),
name: 'Page401',
meta: { title: '401', noCache: true }
},
{
path: '404',
component: () => import('@/views/error-page/404'),
name: 'Page404',
meta: { title: '404', noCache: true }
}
]
},
// 隐藏
{
path: '/excel',
component: Layout,
redirect: '/excel/export-excel',
name: 'Excel',
hidden: true,
meta: {
title: 'Excel',
icon: 'excel'
},
children: [
{
path: 'export-excel',
component: () => import('@/views/excel/export-excel'),
name: 'ExportExcel',
meta: { title: 'Export Excel' }
},
{
path: 'export-selected-excel',
component: () => import('@/views/excel/select-excel'),
name: 'SelectExcel',
meta: { title: 'Export Selected' }
},
{
path: 'export-merge-header',
component: () => import('@/views/excel/merge-header'),
name: 'MergeHeader',
meta: { title: 'Merge Header' }
},
{
path: 'upload-excel',
component: () => import('@/views/excel/upload-excel'),
name: 'UploadExcel',
meta: { title: 'Upload Excel' }
}
]
},
{
path: '/dataManagement',
component: Layout,
redirect: '/dataManagement/data',
alwaysShow: true,
name: 'DataManagement',
meta: { title: '数据管理', icon: 'chart' },
children: [
{
path: 'data',
component: () => import('@/views/dataManagement/data'),
name: 'Data',
meta: { title: '导入管理' }
},
{
path: 'batch',
component: () => import('@/views/dataManagement/batch'),
name: 'Batch',
meta: { title: '批次管理' }
},
{
path: 'case',
component: () => import('@/views/dataManagement/case'),
name: 'Case',
meta: { title: '案件管理' }
}
]
},
{
path: '/electricityManagement',
component: Layout,
redirect: '/electricityManagement/case',
alwaysShow: true,
name: 'ElectricityManagement',
meta: { title: '电催管理', icon: 'international' },
children: [
{
path: 'case',
component: () => import('@/views/theCase/index'),
name: 'Case',
meta: { title: '我的案件' }
}
]
},
{
path: '/systemSettings',
component: Layout,
redirect: '/systemSettings/accountSettings',
alwaysShow: true,
name: 'SystemSettings',
meta: { title: '系统设置', icon: 'tree' },
children: [
{
path: 'accountSettings',
component: () => import('@/views/systemSettings/accountSettings'),
name: 'AccountSettings',
meta: { title: '账号设置' }
},
{
path: 'accountLog',
component: () => import('@/views/systemSettings/accountLog'),
name: 'AccountLog',
meta: { title: '账号日志' }
},
{
path: 'permissionGroupSetting',
component: () => import('@/views/systemSettings/permissionGroupSetting'),
name: 'PermissionGroupSetting',
meta: { title: '权限组设置' }
},
{
path: 'positionSetting',
component: () => import('@/views/systemSettings/positionSetting'),
name: 'PositionSetting',
meta: { title: '职位设置' }
},
{
path: 'departmentSetUp',
component: () => import('@/views/systemSettings/departmentSetUp'),
name: 'DepartmentSetUp',
meta: { title: '部门设置' }
}
//
//
]
},
// 404 page must be placed at the end !!!
{ path: '*', redirect: '/404', hidden: true }
]
const createRouter = () => new Router({
// mode: 'history', // require service support
scrollBehavior: () => ({ y: 0 }),
routes: constantRoutes
})
const router = createRouter()
// Detail see: https://github.com/vuejs/vue-router/issues/1234#issuecomment-357941465
export function resetRouter() {
const newRouter = createRouter()
router.matcher = newRouter.matcher // reset router
}
export default router
|
import React, {Component} from 'react'
import {AppRegistry, View, Text, Animated} from 'react-native'
class FadeInView extends Component {
constructor(props) {
super(props)
this.state = {
fadeAnim: new Animated.Value(0)
};
}
componentDidMount(){
Animated.timing(
this.state.fadeAnim,
{
toValue:1,
}
).start();
}
render(){
return(
<Animated.View style={{
...this.props.style,
opacity: this.state.fadeAnim
}}>
{this.props.children}
</Animated.View>
)
}
}
class Tutorial extends Component {
render(){
return(
<FadeInView style={{
width:250,
height:50,
backgroundColor:'powderblue',
justifyContent: 'center',
alignItems: 'center',
}}>
<Text>Fade In View</Text>
</FadeInView>
)
};
}
AppRegistry.registerComponent('Tutorial', ()=>Tutorial);
|
import React from 'react'
import { Preloader, Placeholder } from 'react-preloading-screen'
import Header from '../components/hashcraft/Header'
import Footer from '../components/hashcraft/Footer'
import GoTop from '../components/hashcraft/GoTop'
import PageTitle from '../components/hashcraft/PageTitle'
import ProjectContent from '../components/hashcraft/solutions/ProjectContent'
import CtaArea from "../components/hashcraft/AgencyCtaArea";
class Project extends React.Component {
render() {
return (
<Preloader>
<Placeholder>
<div className="preloader">
<div className="spinner"></div>
</div>
</Placeholder>
<Header />
<PageTitle title='Our Solutions'/>
<ProjectContent />
<CtaArea />
<Footer />
<GoTop scrollStepInPx="50" delayInMs="16.66" />
</Preloader>
)
}
}
export default Project
|
import { startOfDay, endOfDay, parseISO } from 'date-fns';
import { Op } from 'sequelize';
import Appointment from '../models/Appointment';
import User from '../models/User';
class ScheduleController {
async index(req, res) {
const userIsProvider = await User.findOne({
where: {
id: req.userId,
provider: true,
},
});
if (!userIsProvider) {
return res.status(401).json({ error: 'User is not a provider.' });
}
const { date } = req.query;
const parsedDate = parseISO(date);
const appointments = await Appointment.findAll({
where: {
provider_id: req.userId,
canceled_at: null,
date: { [Op.between]: [startOfDay(parsedDate), endOfDay(parsedDate)] },
},
});
return res.json(appointments);
}
}
export default new ScheduleController();
|
import { observable, action } from 'mobx';
import pinyin from '../han';
import contacts from './contacts';
import wfc from '../wfc/wfc'
import UserInfo from '../wfc/model/userInfo';
import GroupInfo from '../wfc/model/groupInfo';
import Conversation from '../wfc/model/conversation';
import ConversationType from '../wfc/model/conversationType';
import Message from '../wfc/messages/message'
class Forward {
@observable show = false;
@observable message = {};
@observable list = [];
@observable query = '';
@action async toggle(show = self.show, message = {}) {
self.show = show;
if (show) {
self.message = message;
}
if (show === false) {
self.query = '';
self.list.replace([]);
}
}
@action search(text = '') {
var list;
self.query = text;
if (text) {
list = contacts.memberList.filter(e => {
let displayName = contacts.contactItemName(e);
if (e.uid === wfc.getUserId()) {
return false;
}
return pinyin.letter(displayName, '', null).toLowerCase().indexOf(pinyin.letter(text.toLocaleLowerCase(), '', null)) > -1;
});
self.list.replace(list);
return;
}
self.list.replace([]);
}
@action async send(userid) {
var contact = await contacts.getUser(userid);
let msg = new Message();
msg.messageContent = self.message.messageContent;
if (contact instanceof UserInfo) {
msg.conversation = new Conversation(ConversationType.Single, userid, 0);
} else if (contact instanceof GroupInfo) {
msg.conversation = new Conversation(ConversationType.Group, userid, 0);
}
wfc.sendMessage(msg)
}
}
const self = new Forward();
export default self;
|
/*
* sophia database
* sphia.org
*
* Copyright (c) Dmitry Simonenko
* BSD License
*/
/*
* Copyright (c) 2008-2010 Massachusetts Institute of Technology
* Copyright (c) 2004-2006 Intel Corporation
*
* This software program is licensed subject to the BSD License,
* available at http://www.opensource.org/licenses/bsd-license.html
*/
#include <libss.h>
static const uint32_t crc_tableil8_o32[256] =
{
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
};
static const uint32_t crc_tableil8_o40[256] =
{
0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483
};
static const uint32_t crc_tableil8_o48[256] =
{
0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8
};
static const uint32_t crc_tableil8_o56[256] =
{
0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842
};
static const uint32_t crc_tableil8_o64[256] =
{
0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3
};
static const uint32_t crc_tableil8_o72[256] =
{
0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C
};
static const uint32_t crc_tableil8_o80[256] =
{
0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F
};
static const uint32_t crc_tableil8_o88[256] =
{
0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
};
static uint32_t
ss_crc32c_sw(uint32_t crc, const void *buf, int len)
{
const char *p_buf = (const char*)buf;
int initial_bytes = (sizeof(int32_t) - (intptr_t)p_buf) & (sizeof(int32_t) - 1);
if (len < initial_bytes)
initial_bytes = len;
int li;
for (li = 0; li < initial_bytes; li++)
crc = crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^ (crc >> 8);
len -= initial_bytes;
int running_len = len & ~(sizeof(uint64_t) - 1);
int end_bytes = len - running_len;
for (li = 0; li < running_len / 8; li++) {
crc ^= *(uint32_t*)p_buf;
p_buf += 4;
uint32_t term1 = crc_tableil8_o88[(crc) & 0x000000FF] ^
crc_tableil8_o80[(crc >> 8) & 0x000000FF];
uint32_t term2 = crc >> 16;
crc = term1 ^
crc_tableil8_o72[term2 & 0x000000FF] ^
crc_tableil8_o64[(term2 >> 8) & 0x000000FF];
term1 = crc_tableil8_o56[(*(uint32_t*)p_buf) & 0x000000FF] ^
crc_tableil8_o48[((*(uint32_t*)p_buf) >> 8) & 0x000000FF];
term2 = (*(uint32_t*)p_buf) >> 16;
crc = crc ^ term1 ^
crc_tableil8_o40[term2 & 0x000000FF] ^
crc_tableil8_o32[(term2 >> 8) & 0x000000FF];
p_buf += 4;
}
for (li = 0; li < end_bytes; li++)
crc = crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^ (crc >> 8);
return crc;
}
#if defined (__x86_64__) || defined (__i386__)
/*
* Using hardware provided CRC32 instruction to accelerate the CRC32 disposal.
*
* CRC32 is a new instruction in Intel SSE4.2, the reference can be found at:
* http://www.intel.com/products/processor/manuals/
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual
* Volume 2A: Instruction Set Reference, A-M
*/
#if defined (__x86_64__)
#define REX_PRE "0x48, "
#elif defined (__i386__)
#define REX_PRE
#endif
static uint32_t
ss_crc32c_hw_byte(uint32_t crc, unsigned char const *data, unsigned int length)
{
while (length--) {
__asm__ __volatile__(
".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1"
:"=S"(crc)
:"0"(crc), "c"(*data)
);
data++;
}
return crc;
}
static uint32_t
ss_crc32c_hw(uint32_t crc, const void *buf, int len)
{
unsigned int iquotient = len / sizeof(unsigned long);
unsigned int iremainder = len % sizeof(unsigned long);
unsigned long *ptmp = (unsigned long *)buf;
while (iquotient--) {
__asm__ __volatile__(
".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;"
:"=S"(crc)
:"0"(crc), "c"(*ptmp)
);
ptmp++;
}
if (iremainder) {
crc = ss_crc32c_hw_byte(crc, (unsigned char const*)ptmp, iremainder);
}
return crc;
}
#undef REX_PRE
static int
ss_crc32c_hw_enabled(void)
{
unsigned int ax, bx, cx, dx;
if (__get_cpuid(1, &ax, &bx, &cx, &dx) == 0)
return 0;
return (cx & (1 << 20)) != 0;
}
#endif
sscrcf ss_crc32c_function(void)
{
#if defined (__x86_64__) || defined (__i386__)
if (ss_crc32c_hw_enabled())
return ss_crc32c_hw;
#endif
return ss_crc32c_sw;
}
|
module.exports = {
parser: '@typescript-eslint/parser',
extends: [
'typescript',
'typescript/react',
'typescript/prettier',
'typescript/prettier-react',
'plugin:react/recommended',
'plugin:@typescript-eslint/recommended',
'prettier/@typescript-eslint',
'plugin:prettier/recommended',
],
parserOptions: {
exmaVersion: 2018,
sourceType: 'module',
ecmaFeatures: {
jsx: true,
},
},
rules: {
"@typescript-eslint/explicit-function-return-type": "off"
},
settings: {
react: {
version: 'detect',
},
},
env: {
browser: true,
node: true
}
};
|
import asyncio
import re
from unittest import mock
from kafka.protocol.group import (
JoinGroupRequest_v0 as JoinGroupRequest,
SyncGroupResponse_v0 as SyncGroupResponse,
LeaveGroupRequest_v0 as LeaveGroupRequest,
HeartbeatRequest_v0 as HeartbeatRequest,
)
from kafka.protocol.commit import (
OffsetCommitRequest, OffsetCommitResponse_v2,
OffsetFetchRequest_v1 as OffsetFetchRequest
)
import kafka.errors as Errors
from ._testutil import KafkaIntegrationTestCase, run_until_complete
from aiokafka import ConsumerRebalanceListener
from aiokafka.client import AIOKafkaClient
from aiokafka.structs import OffsetAndMetadata, TopicPartition
from aiokafka.consumer.group_coordinator import (
GroupCoordinator, CoordinatorGroupRebalance, NoGroupCoordinator)
from aiokafka.consumer.subscription_state import SubscriptionState
from aiokafka.util import create_future, create_task, get_running_loop
UNKNOWN_MEMBER_ID = JoinGroupRequest.UNKNOWN_MEMBER_ID
class RebalanceListenerForTest(ConsumerRebalanceListener):
def __init__(self):
self.revoked = []
self.assigned = []
def on_partitions_revoked(self, revoked):
self.revoked.append(revoked)
raise Exception("coordinator should ignore this exception")
def on_partitions_assigned(self, assigned):
self.assigned.append(assigned)
raise Exception("coordinator should ignore this exception")
class TestKafkaCoordinatorIntegration(KafkaIntegrationTestCase):
@run_until_complete
async def test_coordinator_workflow(self):
# Check if 2 coordinators will coordinate rebalances correctly
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
# Check if the initial group join is performed correctly with minimal
# setup
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1', 'topic2'})
coordinator = GroupCoordinator(
client, subscription,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
self.assertEqual(coordinator.coordinator_id, None)
self.assertTrue(coordinator.need_rejoin(subscription.subscription))
await coordinator.ensure_coordinator_known()
self.assertNotEqual(coordinator.coordinator_id, None)
if subscription.subscription.assignment is None:
await subscription.wait_for_assignment()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertFalse(coordinator.need_rejoin(subscription.subscription))
tp_list = subscription.assigned_partitions()
self.assertEqual(
tp_list,
{
('topic1', 0),
('topic1', 1),
('topic2', 0),
('topic2', 1)
}
)
# Check if adding an additional coordinator will rebalance correctly
client2 = AIOKafkaClient(bootstrap_servers=self.hosts)
await client2.bootstrap()
subscription2 = SubscriptionState()
subscription2.subscribe(topics={'topic1', 'topic2'})
coordinator2 = GroupCoordinator(
client2, subscription2,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
await asyncio.gather(
subscription.wait_for_assignment(),
subscription2.wait_for_assignment()
)
tp_list = subscription.assigned_partitions()
self.assertEqual(len(tp_list), 2)
tp_list2 = subscription2.assigned_partitions()
self.assertEqual(len(tp_list2), 2)
tp_list |= tp_list2
self.assertEqual(
tp_list,
{
('topic1', 0),
('topic1', 1),
('topic2', 0),
('topic2', 1)
}
)
# Check is closing the first coordinator will rebalance the second
await coordinator.close()
await client.close()
await subscription2.wait_for_assignment()
tp_list = subscription2.assigned_partitions()
self.assertEqual(
tp_list,
{
('topic1', 0),
('topic1', 1),
('topic2', 0),
('topic2', 1)
}
)
await coordinator2.close()
await client2.close()
@run_until_complete
async def test_failed_group_join(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
self.add_cleanup(client.close)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
retry_backoff_ms=10)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
coordinator.coordinator_id = 15
self.add_cleanup(coordinator.close)
async def _on_join_leader(resp):
return b"123"
_on_join_leader_mock = mock.Mock()
_on_join_leader_mock.side_effect = _on_join_leader
async def do_rebalance():
rebalance = CoordinatorGroupRebalance(
coordinator, coordinator.group_id, coordinator.coordinator_id,
subscription.subscription, coordinator._assignors,
coordinator._session_timeout_ms,
coordinator._retry_backoff_ms)
rebalance._on_join_leader = _on_join_leader_mock
return (await rebalance.perform_group_join())
mocked = mock.MagicMock()
coordinator._client = mocked
coordinator._client.api_version = (0, 10, 1)
error_type = Errors.NoError
async def send(*agrs, **kw):
resp = JoinGroupRequest.RESPONSE_TYPE(
error_code=error_type.errno,
generation_id=-1, # generation_id
group_protocol="roundrobin",
leader_id="111", # leader_id
member_id="111", # member_id
members=[]
)
return resp
mocked.send.side_effect = send
subsc = subscription.subscription
# Success case, joined successfully
resp = await do_rebalance()
self.assertEqual(resp, ("roundrobin", b"123"))
self.assertEqual(_on_join_leader_mock.call_count, 1)
# no exception expected, just wait
error_type = Errors.GroupLoadInProgressError
resp = await do_rebalance()
self.assertIsNone(resp)
self.assertEqual(coordinator.need_rejoin(subsc), True)
error_type = Errors.InvalidGroupIdError
with self.assertRaises(Errors.InvalidGroupIdError):
await do_rebalance()
self.assertEqual(coordinator.need_rejoin(subsc), True)
# no exception expected, member_id should be reset
coordinator.member_id = 'some_invalid_member_id'
error_type = Errors.UnknownMemberIdError
resp = await do_rebalance()
self.assertIsNone(resp)
self.assertEqual(coordinator.need_rejoin(subsc), True)
self.assertEqual(
coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID)
error_type = Errors.UnknownError()
with self.assertRaises(Errors.KafkaError): # Masked as unknown error
await do_rebalance()
# no exception expected, coordinator_id should be reset
error_type = Errors.GroupCoordinatorNotAvailableError
resp = await do_rebalance()
self.assertIsNone(resp)
self.assertEqual(coordinator.need_rejoin(subsc), True)
self.assertEqual(coordinator.coordinator_id, None)
coordinator.coordinator_id = 15
coordinator._coordinator_dead_fut = create_future()
async def _on_join_leader(resp):
return None
# Sync group fails case
error_type = Errors.NoError
_on_join_leader_mock.side_effect = _on_join_leader
resp = await do_rebalance()
self.assertEqual(coordinator.coordinator_id, 15)
self.assertIsNone(resp)
self.assertEqual(_on_join_leader_mock.call_count, 2)
# Subscription changes before rebalance finishes
async def send_change_sub(*args, **kw):
subscription.subscribe(topics={'topic2'})
return (await send(*args, **kw))
mocked.send.side_effect = send_change_sub
resp = await do_rebalance()
self.assertEqual(resp, None)
self.assertEqual(_on_join_leader_mock.call_count, 2)
# `_send_req` itself raises an error
mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
resp = await do_rebalance()
self.assertIsNone(resp)
self.assertEqual(coordinator.need_rejoin(subsc), True)
self.assertEqual(coordinator.coordinator_id, None)
@run_until_complete
async def test_failed_sync_group(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
coordinator.coordinator_id = 15
self.add_cleanup(coordinator.close)
async def do_sync_group():
rebalance = CoordinatorGroupRebalance(
coordinator, coordinator.group_id, coordinator.coordinator_id,
subscription.subscription, coordinator._assignors,
coordinator._session_timeout_ms,
coordinator._retry_backoff_ms)
await rebalance._on_join_follower()
mocked = mock.MagicMock()
coordinator._client = mocked
coordinator._client.api_version = (0, 10, 1)
subsc = subscription.subscription
error_type = None
async def send(*agrs, **kw):
resp = SyncGroupResponse(
error_code=error_type.errno,
member_assignment=b"123"
)
return resp
mocked.send.side_effect = send
coordinator.member_id = 'some_invalid_member_id'
error_type = Errors.RebalanceInProgressError
await do_sync_group()
self.assertEqual(coordinator.member_id, 'some_invalid_member_id')
self.assertEqual(coordinator.need_rejoin(subsc), True)
error_type = Errors.UnknownMemberIdError
await do_sync_group()
self.assertEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
self.assertEqual(coordinator.need_rejoin(subsc), True)
error_type = Errors.NotCoordinatorForGroupError
await do_sync_group()
self.assertEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.need_rejoin(subsc), True)
coordinator.coordinator_id = 15
coordinator._coordinator_dead_fut = create_future()
error_type = Errors.UnknownError()
with self.assertRaises(Errors.KafkaError): # Masked as some KafkaError
await do_sync_group()
self.assertEqual(coordinator.need_rejoin(subsc), True)
error_type = Errors.GroupAuthorizationFailedError()
with self.assertRaises(Errors.GroupAuthorizationFailedError) as cm:
await do_sync_group()
self.assertEqual(coordinator.need_rejoin(subsc), True)
self.assertEqual(cm.exception.args[0], coordinator.group_id)
# If ``send()`` itself raises an error
mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
await do_sync_group()
self.assertEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.need_rejoin(subsc), True)
@run_until_complete
async def test_generation_change_during_rejoin_sync(self):
coordinator = mock.MagicMock()
subscription = mock.MagicMock()
assignors = mock.MagicMock()
member_assignment = mock.Mock()
rebalance = CoordinatorGroupRebalance(
coordinator, "group_id", "coordinator_id", subscription,
assignors, 1000, 1000)
async def send_req(request):
await asyncio.sleep(0.1)
resp = mock.MagicMock()
resp.member_assignment = member_assignment
resp.error_code = 0
return resp
coordinator._send_req.side_effect = send_req
request = mock.MagicMock()
coordinator.generation = 1
coordinator.member_id = "member_id"
sync_req = asyncio.ensure_future(rebalance._send_sync_group_request(request))
await asyncio.sleep(0.05)
coordinator.generation = -1
coordinator.member_id = "member_id-changed"
assert await sync_req == member_assignment
# make sure values are set correctly
assert coordinator.generation == 1
assert coordinator.member_id == "member_id"
@run_until_complete
async def test_subscribe_pattern(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
test_listener = RebalanceListenerForTest()
subscription = SubscriptionState()
coordinator = GroupCoordinator(
client, subscription,
group_id='subs-pattern-group')
await self.wait_topic(client, 'st-topic1')
await self.wait_topic(client, 'st-topic2')
subscription.subscribe_pattern(
re.compile('st-topic*'), listener=test_listener)
client.set_topics([])
await subscription.wait_for_assignment()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertFalse(coordinator.need_rejoin(subscription.subscription))
tp_list = subscription.assigned_partitions()
assigned = {
('st-topic1', 0),
('st-topic1', 1),
('st-topic2', 0),
('st-topic2', 1),
}
self.assertEqual(tp_list, assigned)
self.assertEqual(test_listener.revoked, [set()])
self.assertEqual(test_listener.assigned, [assigned])
await coordinator.close()
await client.close()
@run_until_complete
async def test_commit_failed_scenarios(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='test-offsets-group')
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
await coordinator.commit_offsets(assignment, offsets)
_orig_send_req = coordinator._send_req
with mock.patch.object(coordinator, "_send_req") as mocked:
commit_error = None
async def mock_send_req(request):
if request.API_KEY == OffsetCommitRequest[0].API_KEY:
if isinstance(commit_error, list):
error_code = commit_error.pop(0).errno
else:
error_code = commit_error.errno
resp_topics = [("topic1", [(0, error_code)])]
return OffsetCommitResponse_v2(resp_topics)
return (await _orig_send_req(request))
mocked.side_effect = mock_send_req
# Not retriable errors are propagated
commit_error = Errors.GroupAuthorizationFailedError
with self.assertRaises(Errors.GroupAuthorizationFailedError):
await coordinator.commit_offsets(assignment, offsets)
commit_error = Errors.TopicAuthorizationFailedError
with self.assertRaises(Errors.TopicAuthorizationFailedError):
await coordinator.commit_offsets(assignment, offsets)
commit_error = Errors.InvalidCommitOffsetSizeError
with self.assertRaises(Errors.InvalidCommitOffsetSizeError):
await coordinator.commit_offsets(assignment, offsets)
commit_error = Errors.OffsetMetadataTooLargeError
with self.assertRaises(Errors.OffsetMetadataTooLargeError):
await coordinator.commit_offsets(assignment, offsets)
# retriable errors should be retried
commit_error = [
Errors.GroupLoadInProgressError,
Errors.GroupLoadInProgressError,
Errors.NoError,
]
await coordinator.commit_offsets(assignment, offsets)
# If rebalance is needed we can't commit offset
commit_error = Errors.RebalanceInProgressError
with self.assertRaises(Errors.CommitFailedError):
await coordinator.commit_offsets(assignment, offsets)
self.assertTrue(coordinator.need_rejoin(subscription.subscription))
self.assertNotEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
commit_error = Errors.UnknownMemberIdError
was_member_id = coordinator.member_id
with self.assertRaises(Errors.CommitFailedError):
await coordinator.commit_offsets(assignment, offsets)
self.assertTrue(coordinator.need_rejoin(subscription.subscription))
self.assertEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
# NOTE: Reconnecting with unknown ID will force a
# session_timeout_ms wait on broker, so we leave group to avoid
# that. Hack for test purposes)
request = LeaveGroupRequest(coordinator.group_id, was_member_id)
await coordinator._send_req(request)
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
# Coordinator errors should be retried after it was found again
commit_error = [
Errors.GroupCoordinatorNotAvailableError,
Errors.NoError
]
await coordinator.commit_offsets(assignment, offsets)
commit_error = [
Errors.NotCoordinatorForGroupError,
Errors.NoError
]
await coordinator.commit_offsets(assignment, offsets)
commit_error = [
Errors.RequestTimedOutError,
Errors.NoError
]
await coordinator.commit_offsets(assignment, offsets)
# Make sure coordinator_id is reset properly each retry
for retriable_error in (
Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError,
Errors.RequestTimedOutError,
):
self.assertIsNotNone(coordinator.coordinator_id)
commit_error = retriable_error
with self.assertRaises(retriable_error):
await coordinator._do_commit_offsets(assignment, offsets)
self.assertIsNone(coordinator.coordinator_id)
# ask coordinator to refresh coordinator_id value
await coordinator.ensure_coordinator_known()
# Unknown errors are just propagated too
commit_error = Errors.UnknownError
with self.assertRaises(Errors.UnknownError):
await coordinator.commit_offsets(assignment, offsets)
await coordinator.close()
await client.close()
@run_until_complete
async def test_fetchoffsets_failed_scenarios(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='fetch-offsets-group')
await subscription.wait_for_assignment()
tp = TopicPartition('topic1', 0)
partitions = {tp}
_orig_send_req = coordinator._send_req
with mock.patch.object(coordinator, "_send_req") as mocked:
fetch_error = None
async def mock_send_req(request):
if request.API_KEY == OffsetFetchRequest.API_KEY:
if isinstance(fetch_error, list):
error_code = fetch_error.pop(0).errno
else:
error_code = fetch_error.errno
if error_code == Errors.NoError.errno:
offset = 10
else:
offset = -1
resp_topics = [("topic1", [(0, offset, "", error_code)])]
return request.RESPONSE_TYPE(resp_topics)
return (await _orig_send_req(request))
mocked.side_effect = mock_send_req
# 0 partitions call should just fast return
res = await coordinator.fetch_committed_offsets({})
self.assertEqual(res, {})
self.assertEqual(mocked.call_count, 0)
fetch_error = [
Errors.GroupLoadInProgressError,
Errors.GroupLoadInProgressError,
Errors.NoError,
Errors.NoError,
Errors.NoError
]
res = await coordinator.fetch_committed_offsets(partitions)
self.assertEqual(res, {tp: OffsetAndMetadata(10, "")})
# Just omit the topic with a warning
fetch_error = Errors.UnknownTopicOrPartitionError
res = await coordinator.fetch_committed_offsets(partitions)
self.assertEqual(res, {})
fetch_error = [
Errors.NotCoordinatorForGroupError,
Errors.NotCoordinatorForGroupError,
Errors.NoError,
Errors.NoError,
Errors.NoError
]
r = await coordinator.fetch_committed_offsets(partitions)
self.assertEqual(r, {tp: OffsetAndMetadata(10, "")})
fetch_error = Errors.GroupAuthorizationFailedError
with self.assertRaises(Errors.GroupAuthorizationFailedError) as cm:
await coordinator.fetch_committed_offsets(partitions)
self.assertEqual(cm.exception.args[0], coordinator.group_id)
fetch_error = Errors.UnknownError
with self.assertRaises(Errors.KafkaError):
await coordinator.fetch_committed_offsets(partitions)
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator_subscription_replace_on_rebalance(self):
# See issue #88
client = AIOKafkaClient(
metadata_max_age_ms=2000,
bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
client.set_topics(('topic1', ))
coordinator = GroupCoordinator(
client, subscription,
group_id='race-rebalance-subscribe-replace',
heartbeat_interval_ms=1000)
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
async def _new(*args, **kw):
# Change the subscription to different topic before we finish
# rebalance
res = await _perform_assignment(*args, **kw)
if subscription.subscription.topics == {"topic1"}:
subscription.subscribe(topics={'topic2'})
client.set_topics(('topic2', ))
return res
mocked.side_effect = _new
await subscription.wait_for_assignment()
topics = {
tp.topic for tp in subscription.assigned_partitions()}
self.assertEqual(topics, {'topic2'})
# There should only be 2 rebalances to finish the task
self.assertEqual(mocked.call_count, 2)
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator_subscription_append_on_rebalance(self):
# same as above, but with adding topics instead of replacing them
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='race-rebalance-subscribe-append',
heartbeat_interval_ms=20000000)
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
async def _new(*args, **kw):
# Change the subscription to different topic before we finish
# rebalance
res = await _perform_assignment(*args, **kw)
if subscription.subscription.topics == {"topic1"}:
subscription.subscribe(topics={'topic1', 'topic2'})
client.set_topics(('topic1', 'topic2', ))
return res
mocked.side_effect = _new
await subscription.wait_for_assignment()
topics = {
tp.topic for tp in subscription.assigned_partitions()}
self.assertEqual(topics, {'topic1', 'topic2'})
# There should only be 2 rebalances to finish the task
self.assertEqual(mocked.call_count, 2)
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator_metadata_update_during_rebalance(self):
# Race condition where client.set_topics start MetadataUpdate, but it
# fails to arrive before leader performed assignment
# Just ensure topics are created
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
await client.close()
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState()
client.set_topics(("topic1", ))
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='race-rebalance-metadata-update',
heartbeat_interval_ms=20000000)
self.add_cleanup(coordinator.close)
await subscription.wait_for_assignment()
# Check that topic's partitions are properly assigned
self.assertEqual(
subscription.assigned_partitions(),
{TopicPartition("topic1", 0), TopicPartition("topic1", 1)})
_metadata_update = client._metadata_update
with mock.patch.object(client, '_metadata_update') as mocked:
async def _new(*args, **kw):
# Just make metadata updates a bit more slow for test
# robustness
await asyncio.sleep(0.5)
res = await _metadata_update(*args, **kw)
return res
mocked.side_effect = _new
# This case will assure, that the started metadata update will be
# waited for before assigning partitions. ``set_topics`` will start
# the metadata update
subscription.subscribe(topics={'topic2'})
client.set_topics(('topic2', ))
await subscription.wait_for_assignment()
self.assertEqual(
subscription.assigned_partitions(),
{TopicPartition("topic2", 0), TopicPartition("topic2", 1)})
@run_until_complete
async def test_coordinator_metadata_change_by_broker(self):
# Issue #108. We can have a misleading metadata change, that will
# trigger additional rebalance
client = AIOKafkaClient(
bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
client.set_topics(['other_topic'])
await client.force_metadata_update()
subscription = SubscriptionState()
coordinator = GroupCoordinator(
client, subscription,
group_id='race-rebalance-subscribe-append',
heartbeat_interval_ms=2000000)
subscription.subscribe(topics={'topic1'})
await client.set_topics(('topic1', ))
await subscription.wait_for_assignment()
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
mocked.side_effect = _perform_assignment
subscription.subscribe(topics={'topic2'})
await client.set_topics(('topic2', ))
# Should only trigger 1 rebalance, but will trigger 2 with bug:
# Metadata snapshot will change:
# {'topic1': {0, 1}} -> {'topic1': {0, 1}, 'topic2': {0, 1}}
# And then again:
# {'topic1': {0, 1}, 'topic2': {0, 1}} -> {'topic2': {0, 1}}
await subscription.wait_for_assignment()
await client.force_metadata_update()
self.assertFalse(
coordinator.need_rejoin(subscription.subscription))
self.assertEqual(mocked.call_count, 1)
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator_ensure_active_group_on_expired_membership(self):
# Do not fail group join if group membership has expired (ie autocommit
# fails on join prepare)
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='test-offsets-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
# Make sure we have something to commit before rejoining
tp = TopicPartition('topic1', 0)
subscription.seek(tp, 0)
offsets = assignment.all_consumed_offsets()
self.assertTrue(offsets) # Not empty
# during OffsetCommit, UnknownMemberIdError is raised
_orig_send_req = coordinator._send_req
resp_topics = [("topic1", [(0, Errors.UnknownMemberIdError.errno)])]
with mock.patch.object(coordinator, "_send_req") as mocked:
async def mock_send_req(request):
if request.API_KEY == OffsetCommitRequest[0].API_KEY:
return OffsetCommitResponse_v2(resp_topics)
return (await _orig_send_req(request))
mocked.side_effect = mock_send_req
with self.assertRaises(Errors.CommitFailedError):
await coordinator.commit_offsets(assignment, offsets)
self.assertTrue(coordinator.need_rejoin(subscription.subscription))
# Waiting will assure we could rebalance even if commit fails
await subscription.wait_for_assignment()
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator__send_req(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='test-my-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
self.add_cleanup(coordinator.close)
request = OffsetCommitRequest[2](topics=[])
# We did not call ensure_coordinator_known yet
with self.assertRaises(Errors.GroupCoordinatorNotAvailableError):
await coordinator._send_req(request)
await coordinator.ensure_coordinator_known()
self.assertIsNotNone(coordinator.coordinator_id)
with mock.patch.object(client, "send") as mocked:
async def mock_send(*args, **kw):
raise Errors.KafkaError("Some unexpected error")
mocked.side_effect = mock_send
# _send_req should mark coordinator dead on errors
with self.assertRaises(Errors.KafkaError):
await coordinator._send_req(request)
self.assertIsNone(coordinator.coordinator_id)
@run_until_complete
async def test_coordinator_close(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState()
waiter = create_future()
class WaitingListener(ConsumerRebalanceListener):
def on_partitions_revoked(self, revoked):
pass
async def on_partitions_assigned(self, assigned, waiter=waiter):
await waiter
coordinator = GroupCoordinator(
client, subscription,
group_id='test-my-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
subscription.subscribe(
topics={'topic1'}, listener=WaitingListener())
# Close task should be loyal to rebalance and wait for it to finish
close_task = create_task(coordinator.close())
await asyncio.sleep(0.1)
self.assertFalse(close_task.done())
# Releasing the waiter on listener will allow close task to finish
waiter.set_result(True)
await close_task
# You can close again with no effect
await coordinator.close()
@run_until_complete
async def test_coordinator_close_autocommit(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState()
coordinator = GroupCoordinator(
client, subscription,
group_id='test-my-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
subscription.subscribe(topics={'topic1'})
await subscription.wait_for_assignment()
waiter = create_future()
async def commit_offsets(*args, **kw):
await waiter
coordinator.commit_offsets = mocked = mock.Mock()
mocked.side_effect = commit_offsets
# Close task should call autocommit last time
close_task = create_task(coordinator.close())
await asyncio.sleep(0.1)
# self.assertFalse(close_task.done())
# Raising an error should not prevent from closing. Error should be
# just logged
waiter.set_exception(Errors.UnknownError())
await close_task
@run_until_complete
async def test_coordinator_ensure_coordinator_known(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1,)
)
self.add_cleanup(coordinator.close)
def force_metadata_update():
fut = create_future()
fut.set_result(True)
return fut
client.ready = mock.Mock()
client.force_metadata_update = mock.Mock()
client.force_metadata_update.side_effect = force_metadata_update
async def ready(node_id, group=None):
if node_id == 0:
return True
return False
client.ready.side_effect = ready
client.coordinator_lookup = mock.Mock()
coordinator_lookup = None
async def _do_coordinator_lookup(type_, key):
node_id = coordinator_lookup.pop()
if isinstance(node_id, Exception):
raise node_id
return node_id
client.coordinator_lookup.side_effect = _do_coordinator_lookup
# CASE: the lookup returns a broken node, that can't be connected
# to. Ensure should wait until coordinator lookup finds the correct
# node.
coordinator.coordinator_dead()
coordinator_lookup = [0, 1, 1]
await coordinator.ensure_coordinator_known()
self.assertEqual(coordinator.coordinator_id, 0)
self.assertEqual(client.force_metadata_update.call_count, 0)
# CASE: lookup fails with error first time. We update metadata and try
# again
coordinator.coordinator_dead()
coordinator_lookup = [0, Errors.UnknownTopicOrPartitionError()]
await coordinator.ensure_coordinator_known()
self.assertEqual(client.force_metadata_update.call_count, 1)
# CASE: Special case for group authorization
coordinator.coordinator_dead()
coordinator_lookup = [0, Errors.GroupAuthorizationFailedError()]
with self.assertRaises(Errors.GroupAuthorizationFailedError) as cm:
await coordinator.ensure_coordinator_known()
self.assertEqual(cm.exception.args[0], coordinator.group_id)
# CASE: unretriable errors should be reraised to higher level
coordinator.coordinator_dead()
coordinator_lookup = [0, Errors.UnknownError()]
with self.assertRaises(Errors.UnknownError):
await coordinator.ensure_coordinator_known()
@run_until_complete
async def test_coordinator__do_heartbeat(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
self.add_cleanup(coordinator.close)
_orig_send_req = coordinator._send_req
coordinator._send_req = mocked = mock.Mock()
heartbeat_error = None
send_req_error = None
async def mock_send_req(request):
if send_req_error is not None:
raise send_req_error
if request.API_KEY == HeartbeatRequest.API_KEY:
if isinstance(heartbeat_error, list):
error_code = heartbeat_error.pop(0).errno
else:
error_code = heartbeat_error.errno
return HeartbeatRequest.RESPONSE_TYPE(error_code)
return (await _orig_send_req(request))
mocked.side_effect = mock_send_req
coordinator.coordinator_id = 15
heartbeat_error = Errors.GroupCoordinatorNotAvailableError()
success = await coordinator._do_heartbeat()
self.assertFalse(success)
self.assertIsNone(coordinator.coordinator_id)
coordinator._rejoin_needed_fut = create_future()
heartbeat_error = Errors.RebalanceInProgressError()
success = await coordinator._do_heartbeat()
self.assertTrue(success)
self.assertTrue(coordinator._rejoin_needed_fut.done())
coordinator.member_id = "some_member"
coordinator._rejoin_needed_fut = create_future()
heartbeat_error = Errors.IllegalGenerationError()
success = await coordinator._do_heartbeat()
self.assertFalse(success)
self.assertTrue(coordinator._rejoin_needed_fut.done())
self.assertEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
coordinator.member_id = "some_member"
coordinator._rejoin_needed_fut = create_future()
heartbeat_error = Errors.UnknownMemberIdError()
success = await coordinator._do_heartbeat()
self.assertFalse(success)
self.assertTrue(coordinator._rejoin_needed_fut.done())
self.assertEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
heartbeat_error = Errors.GroupAuthorizationFailedError()
with self.assertRaises(Errors.GroupAuthorizationFailedError) as cm:
await coordinator._do_heartbeat()
self.assertEqual(cm.exception.args[0], coordinator.group_id)
heartbeat_error = Errors.UnknownError()
with self.assertRaises(Errors.KafkaError):
await coordinator._do_heartbeat()
heartbeat_error = None
send_req_error = Errors.RequestTimedOutError()
success = await coordinator._do_heartbeat()
self.assertFalse(success)
heartbeat_error = Errors.NoError()
send_req_error = None
success = await coordinator._do_heartbeat()
self.assertTrue(success)
@run_until_complete
async def test_coordinator__heartbeat_routine(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=100,
session_timeout_ms=300,
retry_backoff_ms=50)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
self.add_cleanup(coordinator.close)
coordinator._do_heartbeat = mocked = mock.Mock()
coordinator.coordinator_id = 15
coordinator.member_id = 17
coordinator.generation = 0
success = None
async def _do_heartbeat(*args, **kw):
if isinstance(success, list):
return success.pop(0)
return success
mocked.side_effect = _do_heartbeat
async def ensure_coordinator_known():
return None
coordinator.ensure_coordinator_known = mock.Mock()
coordinator.ensure_coordinator_known.side_effect = (
ensure_coordinator_known
)
routine = create_task(
coordinator._heartbeat_routine())
def cleanup():
routine.cancel()
return routine
self.add_cleanup(cleanup)
# CASE: simple heartbeat
success = True
await asyncio.sleep(0.13)
self.assertFalse(routine.done())
self.assertEqual(mocked.call_count, 1)
# CASE: 2 heartbeat fail
success = False
await asyncio.sleep(0.15)
self.assertFalse(routine.done())
# We did 2 heartbeats as we waited only retry_backoff_ms between them
self.assertEqual(mocked.call_count, 3)
# CASE: session_timeout_ms elapsed without heartbeat
await asyncio.sleep(0.10)
self.assertEqual(mocked.call_count, 5)
self.assertEqual(coordinator.coordinator_id, 15)
# last heartbeat try
await asyncio.sleep(0.05)
self.assertEqual(mocked.call_count, 6)
self.assertIsNone(coordinator.coordinator_id)
@run_until_complete
async def test_coordinator__maybe_refresh_commit_offsets(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
tp = TopicPartition("topic1", 0)
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
self.add_cleanup(coordinator.close)
coordinator._do_fetch_commit_offsets = mocked = mock.Mock()
fetched_offsets = {tp: OffsetAndMetadata(12, "")}
test_self = self
async def do_fetch(need_update):
test_self.assertEqual(need_update, [tp])
return fetched_offsets
mocked.side_effect = do_fetch
def reset_assignment():
subscription.assign_from_user({tp})
assignment = subscription.subscription.assignment
tp_state = assignment.state_value(tp)
fut = tp_state.fetch_committed()
return assignment, tp_state, fut
assignment, tp_state, fut = reset_assignment()
# Success case
resp = await coordinator._maybe_refresh_commit_offsets(assignment)
self.assertEqual(resp, True)
self.assertEqual(fut.result(), OffsetAndMetadata(12, ""))
# Calling again will fast return without a request
resp = await coordinator._maybe_refresh_commit_offsets(assignment)
self.assertEqual(resp, True)
self.assertEqual(mocked.call_count, 1)
# Commit not found case
fetched_offsets = {}
assignment, tp_state, fut = reset_assignment()
resp = await coordinator._maybe_refresh_commit_offsets(assignment)
self.assertEqual(resp, True)
self.assertEqual(fut.result(), OffsetAndMetadata(-1, ""))
# Retriable error will be skipped
assignment, tp_state, fut = reset_assignment()
mocked.side_effect = Errors.GroupCoordinatorNotAvailableError()
resp = await coordinator._maybe_refresh_commit_offsets(assignment)
self.assertEqual(resp, False)
# Not retriable error will not be skipped
mocked.side_effect = Errors.UnknownError()
with self.assertRaises(Errors.UnknownError):
await coordinator._maybe_refresh_commit_offsets(assignment)
@run_until_complete
async def test_coordinator__maybe_do_autocommit(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
tp = TopicPartition("topic1", 0)
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000, auto_commit_interval_ms=1000,
retry_backoff_ms=50)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
self.add_cleanup(coordinator.close)
coordinator._do_commit_offsets = mocked = mock.Mock()
async def do_commit(*args, **kw):
await asyncio.sleep(0.1)
return
mocked.side_effect = do_commit
def reset_assignment():
subscription.assign_from_user({tp})
assignment = subscription.subscription.assignment
tp_state = assignment.state_value(tp)
return assignment, tp_state
assignment, tp_state = reset_assignment()
# Fast return if autocommit disabled
coordinator._enable_auto_commit = False
timeout = await coordinator._maybe_do_autocommit(assignment)
self.assertIsNone(timeout) # Infinite timeout in this case
self.assertEqual(mocked.call_count, 0)
coordinator._enable_auto_commit = True
# Successful case should count time to next autocommit
loop = get_running_loop()
now = loop.time()
interval = 1
coordinator._next_autocommit_deadline = 0
timeout = await coordinator._maybe_do_autocommit(assignment)
# 1000ms interval minus 100 sleep
self.assertAlmostEqual(timeout, 0.9, places=1)
self.assertAlmostEqual(
coordinator._next_autocommit_deadline, now + interval, places=1)
self.assertEqual(mocked.call_count, 1)
# Retriable errors should backoff and retry, no skip autocommit
coordinator._next_autocommit_deadline = 0
mocked.side_effect = Errors.NotCoordinatorForGroupError()
now = loop.time()
timeout = await coordinator._maybe_do_autocommit(assignment)
self.assertEqual(timeout, 0.05)
# Dealine should be set into future, not depending on commit time, to
# avoid busy loops
self.assertAlmostEqual(
coordinator._next_autocommit_deadline, now + timeout,
places=1)
# UnknownMemberId should also retry
coordinator._next_autocommit_deadline = 0
mocked.side_effect = Errors.UnknownMemberIdError()
now = loop.time()
timeout = await coordinator._maybe_do_autocommit(assignment)
self.assertEqual(timeout, 0.05)
# Not retriable errors should skip autocommit and log
mocked.side_effect = Errors.UnknownError()
now = loop.time()
coordinator._next_autocommit_deadline = 0
with self.assertRaises(Errors.KafkaError):
await coordinator._maybe_do_autocommit(assignment)
@run_until_complete
async def test_coordinator__coordination_routine(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
tp = TopicPartition("topic1", 0)
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000, auto_commit_interval_ms=1000,
retry_backoff_ms=50)
self.add_cleanup(coordinator.close)
def start_coordination():
if coordinator._coordination_task:
coordinator._coordination_task.cancel()
coordinator._coordination_task = task = create_task(
coordinator._coordination_routine())
return task
async def stop_coordination():
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1))
await stop_coordination()
async def ensure_coordinator_known():
return None
coordinator.ensure_coordinator_known = coord_mock = mock.Mock()
coord_mock.side_effect = ensure_coordinator_known
async def _on_join_prepare(assign):
return None
coordinator._on_join_prepare = prepare_mock = mock.Mock()
prepare_mock.side_effect = _on_join_prepare
coordinator._do_rejoin_group = rejoin_mock = mock.Mock()
rejoin_ok = True
async def do_rejoin(subsc):
if rejoin_ok:
subscription.assign_from_subscribed({tp})
coordinator._rejoin_needed_fut = create_future()
return True
else:
await asyncio.sleep(0.1)
return False
rejoin_mock.side_effect = do_rejoin
async def _maybe_do_autocommit(assign):
return None
coordinator._maybe_do_autocommit = autocommit_mock = mock.Mock()
autocommit_mock.side_effect = _maybe_do_autocommit
coordinator._start_heartbeat_task = mock.Mock()
client.force_metadata_update = metadata_mock = mock.Mock()
done_fut = create_future()
done_fut.set_result(None)
metadata_mock.side_effect = lambda: done_fut
# CASE: coordination should stop and wait if subscription is not
# present
task = start_coordination()
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 0)
# CASE: user assignment should skip rebalance calls
subscription.assign_from_user({tp})
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 1)
self.assertEqual(prepare_mock.call_count, 0)
self.assertEqual(rejoin_mock.call_count, 0)
self.assertEqual(autocommit_mock.call_count, 1)
# CASE: with user assignment routine should not react to request_rejoin
coordinator.request_rejoin()
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 1)
self.assertEqual(prepare_mock.call_count, 0)
self.assertEqual(rejoin_mock.call_count, 0)
self.assertEqual(autocommit_mock.call_count, 1)
coordinator._rejoin_needed_fut = create_future()
# CASE: Changing subscription should propagete a rebalance
subscription.unsubscribe()
subscription.subscribe({"topic1"})
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 2)
self.assertEqual(prepare_mock.call_count, 1)
self.assertEqual(rejoin_mock.call_count, 1)
self.assertEqual(autocommit_mock.call_count, 2)
# CASE: If rejoin fails, we do it again without autocommit
rejoin_ok = False
coordinator.request_rejoin()
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 3)
self.assertEqual(prepare_mock.call_count, 2)
self.assertEqual(rejoin_mock.call_count, 2)
self.assertEqual(autocommit_mock.call_count, 2)
# CASE: After we retry we should not call _on_join_prepare again
rejoin_ok = True
await subscription.wait_for_assignment()
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 4)
self.assertEqual(prepare_mock.call_count, 2)
self.assertEqual(rejoin_mock.call_count, 3)
self.assertEqual(autocommit_mock.call_count, 3)
# CASE: If pattern subscription present we should update metadata
# before joining.
subscription.unsubscribe()
subscription.subscribe_pattern(re.compile("^topic1&"))
subscription.subscribe_from_pattern({"topic1"})
self.assertEqual(metadata_mock.call_count, 0)
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 5)
self.assertEqual(prepare_mock.call_count, 3)
self.assertEqual(rejoin_mock.call_count, 4)
self.assertEqual(autocommit_mock.call_count, 4)
self.assertEqual(metadata_mock.call_count, 1)
# CASE: on unsubscribe we should stop and wait for new subscription
subscription.unsubscribe()
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 5)
self.assertEqual(prepare_mock.call_count, 3)
self.assertEqual(rejoin_mock.call_count, 4)
self.assertEqual(autocommit_mock.call_count, 4)
self.assertEqual(metadata_mock.call_count, 1)
# CASE: on close we should perform finalizer and ignore it's error
coordinator._maybe_do_last_autocommit = last_commit_mock = mock.Mock()
last_commit_mock.side_effect = Errors.UnknownError()
await coordinator.close()
self.assertTrue(task.done())
# As we continued from a subscription wait it should fast exit
self.assertEqual(coord_mock.call_count, 5)
self.assertEqual(prepare_mock.call_count, 3)
self.assertEqual(rejoin_mock.call_count, 4)
self.assertEqual(autocommit_mock.call_count, 4)
self.assertEqual(metadata_mock.call_count, 1)
self.assertEqual(last_commit_mock.call_count, 1)
@run_until_complete
async def test_no_group_subscribe_during_metadata_update(self):
# Issue #536. During metadata update we can't assume the subscription
# did not change. We should handle the case by refreshing meta again.
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
await client.set_topics(('other_topic', ))
subscription = SubscriptionState()
coordinator = NoGroupCoordinator(
client, subscription)
subscription.subscribe(topics={'topic1'})
client.set_topics(('topic1', ))
await asyncio.sleep(0.0001)
# Change subscription before metadata update is received
subscription.subscribe(topics={'topic2'})
metadata_fut = client.set_topics(('topic2', ))
try:
await asyncio.wait_for(
metadata_fut,
timeout=0.2
)
except asyncio.TimeoutError:
pass
self.assertFalse(client._sync_task.done())
await coordinator.close()
await client.close()
|
function camelCase(name) {
return name.charAt(0).toLowerCase() + name.slice(1);
}
export class ValueConverterResource {
constructor(name) {
this.name = name;
}
static convention(name) {
if (name.endsWith('ValueConverter')) {
return new ValueConverterResource(camelCase(name.substring(0, name.length - 14)));
}
}
analyze(container, target) {
this.instance = container.get(target);
}
register(registry, name) {
registry.registerValueConverter(name || this.name, this.instance);
}
load(container, target) {
return Promise.resolve(this);
}
}
|
/*
AngularJS v1.3.15
(c) 2010-2014 Google, Inc. http://angularjs.org
License: MIT
*/
(function(q,d,C){'use strict';function v(r,k,h){return{restrict:"ECA",terminal:!0,priority:400,transclude:"element",link:function(a,f,b,c,y){function z(){l&&(h.cancel(l),l=null);m&&(m.$destroy(),m=null);n&&(l=h.leave(n),l.then(function(){l=null}),n=null)}function x(){var b=r.current&&r.current.locals;if(d.isDefined(b&&b.$template)){var b=a.$new(),c=r.current;n=y(b,function(b){h.enter(b,null,n||f).then(function(){!d.isDefined(t)||t&&!a.$eval(t)||k()});z()});m=c.scope=b;m.$emit("$viewContentLoaded");
m.$eval(w)}else z()}var m,n,l,t=b.autoscroll,w=b.onload||"";a.$on("$routeChangeSuccess",x);x()}}}function A(d,k,h){return{restrict:"ECA",priority:-400,link:function(a,f){var b=h.current,c=b.locals;f.html(c.$template);var y=d(f.contents());b.controller&&(c.$scope=a,c=k(b.controller,c),b.controllerAs&&(a[b.controllerAs]=c),f.data("$ngControllerController",c),f.children().data("$ngControllerController",c));y(a)}}}q=d.module("ngRoute",["ng"]).provider("$route",function(){function r(a,f){return d.extend(Object.create(a),
f)}function k(a,d){var b=d.caseInsensitiveMatch,c={originalPath:a,regexp:a},h=c.keys=[];a=a.replace(/([().])/g,"\\$1").replace(/(\/)?:(\w+)([\?\*])?/g,function(a,d,b,c){a="?"===c?c:null;c="*"===c?c:null;h.push({name:b,optional:!!a});d=d||"";return""+(a?"":d)+"(?:"+(a?d:"")+(c&&"(.+?)"||"([^/]+)")+(a||"")+")"+(a||"")}).replace(/([\/$\*])/g,"\\$1");c.regexp=new RegExp("^"+a+"$",b?"i":"");return c}var h={};this.when=function(a,f){var b=d.copy(f);d.isUndefined(b.reloadOnSearch)&&(b.reloadOnSearch=!0);
d.isUndefined(b.caseInsensitiveMatch)&&(b.caseInsensitiveMatch=this.caseInsensitiveMatch);h[a]=d.extend(b,a&&k(a,b));if(a){var c="/"==a[a.length-1]?a.substr(0,a.length-1):a+"/";h[c]=d.extend({redirectTo:a},k(c,b))}return this};this.caseInsensitiveMatch=!1;this.otherwise=function(a){"string"===typeof a&&(a={redirectTo:a});this.when(null,a);return this};this.$get=["$rootScope","$location","$routeParams","$q","$injector","$templateRequest","$sce",function(a,f,b,c,k,q,x){function m(b){var e=s.current;
(v=(p=l())&&e&&p.$$route===e.$$route&&d.equals(p.pathParams,e.pathParams)&&!p.reloadOnSearch&&!w)||!e&&!p||a.$broadcast("$routeChangeStart",p,e).defaultPrevented&&b&&b.preventDefault()}function n(){var u=s.current,e=p;if(v)u.params=e.params,d.copy(u.params,b),a.$broadcast("$routeUpdate",u);else if(e||u)w=!1,(s.current=e)&&e.redirectTo&&(d.isString(e.redirectTo)?f.path(t(e.redirectTo,e.params)).search(e.params).replace():f.url(e.redirectTo(e.pathParams,f.path(),f.search())).replace()),c.when(e).then(function(){if(e){var a=
d.extend({},e.resolve),b,g;d.forEach(a,function(b,e){a[e]=d.isString(b)?k.get(b):k.invoke(b,null,null,e)});d.isDefined(b=e.template)?d.isFunction(b)&&(b=b(e.params)):d.isDefined(g=e.templateUrl)&&(d.isFunction(g)&&(g=g(e.params)),g=x.getTrustedResourceUrl(g),d.isDefined(g)&&(e.loadedTemplateUrl=g,b=q(g)));d.isDefined(b)&&(a.$template=b);return c.all(a)}}).then(function(c){e==s.current&&(e&&(e.locals=c,d.copy(e.params,b)),a.$broadcast("$routeChangeSuccess",e,u))},function(b){e==s.current&&a.$broadcast("$routeChangeError",
e,u,b)})}function l(){var a,b;d.forEach(h,function(c,h){var g;if(g=!b){var k=f.path();g=c.keys;var m={};if(c.regexp)if(k=c.regexp.exec(k)){for(var l=1,n=k.length;l<n;++l){var p=g[l-1],q=k[l];p&&q&&(m[p.name]=q)}g=m}else g=null;else g=null;g=a=g}g&&(b=r(c,{params:d.extend({},f.search(),a),pathParams:a}),b.$$route=c)});return b||h[null]&&r(h[null],{params:{},pathParams:{}})}function t(a,b){var c=[];d.forEach((a||"").split(":"),function(a,d){if(0===d)c.push(a);else{var f=a.match(/(\w+)(?:[?*])?(.*)/),
h=f[1];c.push(b[h]);c.push(f[2]||"");delete b[h]}});return c.join("")}var w=!1,p,v,s={routes:h,reload:function(){w=!0;a.$evalAsync(function(){m();n()})},updateParams:function(a){if(this.current&&this.current.$$route)a=d.extend({},this.current.params,a),f.path(t(this.current.$$route.originalPath,a)),f.search(a);else throw B("norout");}};a.$on("$locationChangeStart",m);a.$on("$locationChangeSuccess",n);return s}]});var B=d.$$minErr("ngRoute");q.provider("$routeParams",function(){this.$get=function(){return{}}});
q.directive("ngView",v);q.directive("ngView",A);v.$inject=["$route","$anchorScroll","$animate"];A.$inject=["$compile","$controller","$route"]})(window,window.angular);
//# sourceMappingURL=angular-route.min.js.map
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import xmlrunner
import unittest
import imp
from os.path import abspath, dirname, split as splitpath, join as joinpath
import logging
logger = logging.getLogger(__name__)
if not logger.root.handlers:
import logging.config
LOGGER_CONFIG_FILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.ini')
logging.config.fileConfig(LOGGER_CONFIG_FILE, disable_existing_loggers=False)
if len(sys.argv) < 2:
logger.error("Use %s <filename to test>",sys.argv[0])
sys.exit(-1)
run_py = joinpath(dirname(dirname(abspath(__file__))), 'run.py')
run = imp.load_source('sasview_run', run_py)
run.prepare()
#print("\n".join(sys.path))
test_path,test_file = splitpath(abspath(sys.argv[1]))
print("=== testing:",sys.argv[1])
#print(test_path, test_file)
sys.argv = [sys.argv[0]]
os.chdir(test_path)
sys.path.insert(0, test_path)
test = imp.load_source('tests',test_file)
unittest.main(test, testRunner=xmlrunner.XMLTestRunner(output='logs'))
|
import React from 'react';
import { FormattedMessage } from 'react-intl';
import { Link } from 'react-router-dom';
import { Container, Row, Col, ListGroup, ListGroupItem, Button } from 'reactstrap';
import FontAwesome from 'react-fontawesome';
import styled from 'styled-components';
// imported components
class SurveyTypeButton extends React.Component { // eslint-disable-line react/prefer-stateless-function
render() {
let {title, selected} = this.props;
if (selected) {
return <Button color="info" block onClick={this.props.onClick}>{title}</Button>
} else {
return <Button color="secondary" block onClick={this.props.onClick}>{title}</Button>
}
}
}
export default SurveyTypeButton;
|
from types import ModuleType, MethodType
from src.all_exceptions import MutatorException
def unbind(f):
"""
Function that unbinds a given function if it's actually binded to an object. If it's not binded to an object it'll
raise a TypeError Exception
:param f: function to unbind from an object
:type f: function
:raises: TypeError
"""
self = getattr(f, '__self__', None)
if self is not None and not isinstance(self, ModuleType) and not isinstance(self, type):
if hasattr(f, '__func__'):
return f.__func__
return getattr(type(f.__self__), f.__name__)
raise TypeError('not a bound method')
class Context(dict):
def get_selector(self, selector_name):
return self.get(selector_name)
class Mutator(object):
def __init__(self):
self.context = Context()
def get_context(self, new_context=False):
if new_context:
self.context = Context()
return self.context
def _build_arg_list(self, arg_list, instance_bound=False):
preffix = ''
if instance_bound:
preffix = 'self,'
if arg_list:
print preffix
return preffix + ''.join(str(i) for i in arg_list)
return preffix
def _build_var_args(self, arg_list, variable_args):
preffix = ','
if not arg_list:
preffix = ''
if variable_args:
return preffix + '*args, **kwargs'
return ''
def define_new_method(self, method_name, args_list, block_code, variable_args=False, instance_bound=False):
method_code = '''def {0}({1}{2}):
{3}
'''.format(method_name, self._build_arg_list(args_list, instance_bound),
self._build_var_args(args_list, variable_args),
block_code)
return self.compile_code(method_code)
def compile_code(self, code, new_context=False):
if not isinstance(code, str):
raise MutatorException(
'Invalid type for code: {0}. Type should be str. Found: {1}.'.format(code, type(code)))
context = self.get_context(new_context)
try:
exec (code.strip(), globals(), context)
except SyntaxError, e:
raise MutatorException('Failed compiling code: {0}'.format(code))
return context
def get_signature_string(self, code_function):
"""
"""
return code_function[code_function.find("def") + 3:code_function.find("(")].strip()
def create_class(self, class_name, bases=(object,), bound_context=False):
context = self.get_context() if bound_context else Context()
new_class = type(class_name, bases, context)
return new_class
def define_instance_method(self, instance, method_name, args_list, block_code, variable_args=False,
instance_bound=False):
self.define_new_method(method_name, args_list, block_code, variable_args, instance_bound)
setattr(instance, method_name, self.context[method_name])
return instance.__dict__[method_name]
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import opcodes
from ...serialization.serializables import FieldTypes, StringField, ListField
from .base import Operand
from .core import TileableOperandMixin
class Fetch(Operand):
_op_type_ = opcodes.FETCH
source_key = StringField("source_key", default=None)
class FetchMixin(TileableOperandMixin):
def check_inputs(self, inputs):
# no inputs
if inputs and len(inputs) > 0:
raise ValueError(f"{type(self).__name__} has no inputs")
@classmethod
def tile(cls, op):
raise NotImplementedError("Fetch tile cannot be handled by operand itself")
@classmethod
def execute(cls, ctx, op):
"""
Fetch operand needs nothing to do.
"""
class FetchShuffle(Operand):
_op_type_ = opcodes.FETCH_SHUFFLE
source_keys = ListField("source_keys", FieldTypes.string)
source_idxes = ListField("source_idxes", FieldTypes.tuple(FieldTypes.uint64))
source_mappers = ListField("source_mappers", FieldTypes.uint16)
|
'use strict';
const md5File = require('md5-file/promise');
module.exports = function hashAssetFiles(asset) {
return Promise.all(asset.files.map(md5File)).then(hashes => {
asset.fileHashes = hashes;
return asset;
});
};
|
(window["webpackJsonp"] = window["webpackJsonp"] || []).push([[12],{
/***/ "./node_modules/immutable/dist/immutable.js":
/*!**************************************************!*\
!*** ./node_modules/immutable/dist/immutable.js ***!
\**************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
eval("/**\n * Copyright (c) 2014-present, Facebook, Inc.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n(function (global, factory) {\n true ? module.exports = factory() :\n undefined;\n}(this, function () { 'use strict';var SLICE$0 = Array.prototype.slice;\n\n function createClass(ctor, superClass) {\n if (superClass) {\n ctor.prototype = Object.create(superClass.prototype);\n }\n ctor.prototype.constructor = ctor;\n }\n\n function Iterable(value) {\n return isIterable(value) ? value : Seq(value);\n }\n\n\n createClass(KeyedIterable, Iterable);\n function KeyedIterable(value) {\n return isKeyed(value) ? value : KeyedSeq(value);\n }\n\n\n createClass(IndexedIterable, Iterable);\n function IndexedIterable(value) {\n return isIndexed(value) ? value : IndexedSeq(value);\n }\n\n\n createClass(SetIterable, Iterable);\n function SetIterable(value) {\n return isIterable(value) && !isAssociative(value) ? value : SetSeq(value);\n }\n\n\n\n function isIterable(maybeIterable) {\n return !!(maybeIterable && maybeIterable[IS_ITERABLE_SENTINEL]);\n }\n\n function isKeyed(maybeKeyed) {\n return !!(maybeKeyed && maybeKeyed[IS_KEYED_SENTINEL]);\n }\n\n function isIndexed(maybeIndexed) {\n return !!(maybeIndexed && maybeIndexed[IS_INDEXED_SENTINEL]);\n }\n\n function isAssociative(maybeAssociative) {\n return isKeyed(maybeAssociative) || isIndexed(maybeAssociative);\n }\n\n function isOrdered(maybeOrdered) {\n return !!(maybeOrdered && maybeOrdered[IS_ORDERED_SENTINEL]);\n }\n\n Iterable.isIterable = isIterable;\n Iterable.isKeyed = isKeyed;\n Iterable.isIndexed = isIndexed;\n Iterable.isAssociative = isAssociative;\n Iterable.isOrdered = isOrdered;\n\n Iterable.Keyed = KeyedIterable;\n Iterable.Indexed = IndexedIterable;\n Iterable.Set = SetIterable;\n\n\n var IS_ITERABLE_SENTINEL = '@@__IMMUTABLE_ITERABLE__@@';\n var IS_KEYED_SENTINEL = '@@__IMMUTABLE_KEYED__@@';\n var IS_INDEXED_SENTINEL = '@@__IMMUTABLE_INDEXED__@@';\n var IS_ORDERED_SENTINEL = '@@__IMMUTABLE_ORDERED__@@';\n\n // Used for setting prototype methods that IE8 chokes on.\n var DELETE = 'delete';\n\n // Constants describing the size of trie nodes.\n var SHIFT = 5; // Resulted in best performance after ______?\n var SIZE = 1 << SHIFT;\n var MASK = SIZE - 1;\n\n // A consistent shared value representing \"not set\" which equals nothing other\n // than itself, and nothing that could be provided externally.\n var NOT_SET = {};\n\n // Boolean references, Rough equivalent of `bool &`.\n var CHANGE_LENGTH = { value: false };\n var DID_ALTER = { value: false };\n\n function MakeRef(ref) {\n ref.value = false;\n return ref;\n }\n\n function SetRef(ref) {\n ref && (ref.value = true);\n }\n\n // A function which returns a value representing an \"owner\" for transient writes\n // to tries. The return value will only ever equal itself, and will not equal\n // the return of any subsequent call of this function.\n function OwnerID() {}\n\n // http://jsperf.com/copy-array-inline\n function arrCopy(arr, offset) {\n offset = offset || 0;\n var len = Math.max(0, arr.length - offset);\n var newArr = new Array(len);\n for (var ii = 0; ii < len; ii++) {\n newArr[ii] = arr[ii + offset];\n }\n return newArr;\n }\n\n function ensureSize(iter) {\n if (iter.size === undefined) {\n iter.size = iter.__iterate(returnTrue);\n }\n return iter.size;\n }\n\n function wrapIndex(iter, index) {\n // This implements \"is array index\" which the ECMAString spec defines as:\n //\n // A String property name P is an array index if and only if\n // ToString(ToUint32(P)) is equal to P and ToUint32(P) is not equal\n // to 2^32−1.\n //\n // http://www.ecma-international.org/ecma-262/6.0/#sec-array-exotic-objects\n if (typeof index !== 'number') {\n var uint32Index = index >>> 0; // N >>> 0 is shorthand for ToUint32\n if ('' + uint32Index !== index || uint32Index === 4294967295) {\n return NaN;\n }\n index = uint32Index;\n }\n return index < 0 ? ensureSize(iter) + index : index;\n }\n\n function returnTrue() {\n return true;\n }\n\n function wholeSlice(begin, end, size) {\n return (begin === 0 || (size !== undefined && begin <= -size)) &&\n (end === undefined || (size !== undefined && end >= size));\n }\n\n function resolveBegin(begin, size) {\n return resolveIndex(begin, size, 0);\n }\n\n function resolveEnd(end, size) {\n return resolveIndex(end, size, size);\n }\n\n function resolveIndex(index, size, defaultIndex) {\n return index === undefined ?\n defaultIndex :\n index < 0 ?\n Math.max(0, size + index) :\n size === undefined ?\n index :\n Math.min(size, index);\n }\n\n /* global Symbol */\n\n var ITERATE_KEYS = 0;\n var ITERATE_VALUES = 1;\n var ITERATE_ENTRIES = 2;\n\n var REAL_ITERATOR_SYMBOL = typeof Symbol === 'function' && Symbol.iterator;\n var FAUX_ITERATOR_SYMBOL = '@@iterator';\n\n var ITERATOR_SYMBOL = REAL_ITERATOR_SYMBOL || FAUX_ITERATOR_SYMBOL;\n\n\n function Iterator(next) {\n this.next = next;\n }\n\n Iterator.prototype.toString = function() {\n return '[Iterator]';\n };\n\n\n Iterator.KEYS = ITERATE_KEYS;\n Iterator.VALUES = ITERATE_VALUES;\n Iterator.ENTRIES = ITERATE_ENTRIES;\n\n Iterator.prototype.inspect =\n Iterator.prototype.toSource = function () { return this.toString(); }\n Iterator.prototype[ITERATOR_SYMBOL] = function () {\n return this;\n };\n\n\n function iteratorValue(type, k, v, iteratorResult) {\n var value = type === 0 ? k : type === 1 ? v : [k, v];\n iteratorResult ? (iteratorResult.value = value) : (iteratorResult = {\n value: value, done: false\n });\n return iteratorResult;\n }\n\n function iteratorDone() {\n return { value: undefined, done: true };\n }\n\n function hasIterator(maybeIterable) {\n return !!getIteratorFn(maybeIterable);\n }\n\n function isIterator(maybeIterator) {\n return maybeIterator && typeof maybeIterator.next === 'function';\n }\n\n function getIterator(iterable) {\n var iteratorFn = getIteratorFn(iterable);\n return iteratorFn && iteratorFn.call(iterable);\n }\n\n function getIteratorFn(iterable) {\n var iteratorFn = iterable && (\n (REAL_ITERATOR_SYMBOL && iterable[REAL_ITERATOR_SYMBOL]) ||\n iterable[FAUX_ITERATOR_SYMBOL]\n );\n if (typeof iteratorFn === 'function') {\n return iteratorFn;\n }\n }\n\n function isArrayLike(value) {\n return value && typeof value.length === 'number';\n }\n\n createClass(Seq, Iterable);\n function Seq(value) {\n return value === null || value === undefined ? emptySequence() :\n isIterable(value) ? value.toSeq() : seqFromValue(value);\n }\n\n Seq.of = function(/*...values*/) {\n return Seq(arguments);\n };\n\n Seq.prototype.toSeq = function() {\n return this;\n };\n\n Seq.prototype.toString = function() {\n return this.__toString('Seq {', '}');\n };\n\n Seq.prototype.cacheResult = function() {\n if (!this._cache && this.__iterateUncached) {\n this._cache = this.entrySeq().toArray();\n this.size = this._cache.length;\n }\n return this;\n };\n\n // abstract __iterateUncached(fn, reverse)\n\n Seq.prototype.__iterate = function(fn, reverse) {\n return seqIterate(this, fn, reverse, true);\n };\n\n // abstract __iteratorUncached(type, reverse)\n\n Seq.prototype.__iterator = function(type, reverse) {\n return seqIterator(this, type, reverse, true);\n };\n\n\n\n createClass(KeyedSeq, Seq);\n function KeyedSeq(value) {\n return value === null || value === undefined ?\n emptySequence().toKeyedSeq() :\n isIterable(value) ?\n (isKeyed(value) ? value.toSeq() : value.fromEntrySeq()) :\n keyedSeqFromValue(value);\n }\n\n KeyedSeq.prototype.toKeyedSeq = function() {\n return this;\n };\n\n\n\n createClass(IndexedSeq, Seq);\n function IndexedSeq(value) {\n return value === null || value === undefined ? emptySequence() :\n !isIterable(value) ? indexedSeqFromValue(value) :\n isKeyed(value) ? value.entrySeq() : value.toIndexedSeq();\n }\n\n IndexedSeq.of = function(/*...values*/) {\n return IndexedSeq(arguments);\n };\n\n IndexedSeq.prototype.toIndexedSeq = function() {\n return this;\n };\n\n IndexedSeq.prototype.toString = function() {\n return this.__toString('Seq [', ']');\n };\n\n IndexedSeq.prototype.__iterate = function(fn, reverse) {\n return seqIterate(this, fn, reverse, false);\n };\n\n IndexedSeq.prototype.__iterator = function(type, reverse) {\n return seqIterator(this, type, reverse, false);\n };\n\n\n\n createClass(SetSeq, Seq);\n function SetSeq(value) {\n return (\n value === null || value === undefined ? emptySequence() :\n !isIterable(value) ? indexedSeqFromValue(value) :\n isKeyed(value) ? value.entrySeq() : value\n ).toSetSeq();\n }\n\n SetSeq.of = function(/*...values*/) {\n return SetSeq(arguments);\n };\n\n SetSeq.prototype.toSetSeq = function() {\n return this;\n };\n\n\n\n Seq.isSeq = isSeq;\n Seq.Keyed = KeyedSeq;\n Seq.Set = SetSeq;\n Seq.Indexed = IndexedSeq;\n\n var IS_SEQ_SENTINEL = '@@__IMMUTABLE_SEQ__@@';\n\n Seq.prototype[IS_SEQ_SENTINEL] = true;\n\n\n\n createClass(ArraySeq, IndexedSeq);\n function ArraySeq(array) {\n this._array = array;\n this.size = array.length;\n }\n\n ArraySeq.prototype.get = function(index, notSetValue) {\n return this.has(index) ? this._array[wrapIndex(this, index)] : notSetValue;\n };\n\n ArraySeq.prototype.__iterate = function(fn, reverse) {\n var array = this._array;\n var maxIndex = array.length - 1;\n for (var ii = 0; ii <= maxIndex; ii++) {\n if (fn(array[reverse ? maxIndex - ii : ii], ii, this) === false) {\n return ii + 1;\n }\n }\n return ii;\n };\n\n ArraySeq.prototype.__iterator = function(type, reverse) {\n var array = this._array;\n var maxIndex = array.length - 1;\n var ii = 0;\n return new Iterator(function() \n {return ii > maxIndex ?\n iteratorDone() :\n iteratorValue(type, ii, array[reverse ? maxIndex - ii++ : ii++])}\n );\n };\n\n\n\n createClass(ObjectSeq, KeyedSeq);\n function ObjectSeq(object) {\n var keys = Object.keys(object);\n this._object = object;\n this._keys = keys;\n this.size = keys.length;\n }\n\n ObjectSeq.prototype.get = function(key, notSetValue) {\n if (notSetValue !== undefined && !this.has(key)) {\n return notSetValue;\n }\n return this._object[key];\n };\n\n ObjectSeq.prototype.has = function(key) {\n return this._object.hasOwnProperty(key);\n };\n\n ObjectSeq.prototype.__iterate = function(fn, reverse) {\n var object = this._object;\n var keys = this._keys;\n var maxIndex = keys.length - 1;\n for (var ii = 0; ii <= maxIndex; ii++) {\n var key = keys[reverse ? maxIndex - ii : ii];\n if (fn(object[key], key, this) === false) {\n return ii + 1;\n }\n }\n return ii;\n };\n\n ObjectSeq.prototype.__iterator = function(type, reverse) {\n var object = this._object;\n var keys = this._keys;\n var maxIndex = keys.length - 1;\n var ii = 0;\n return new Iterator(function() {\n var key = keys[reverse ? maxIndex - ii : ii];\n return ii++ > maxIndex ?\n iteratorDone() :\n iteratorValue(type, key, object[key]);\n });\n };\n\n ObjectSeq.prototype[IS_ORDERED_SENTINEL] = true;\n\n\n createClass(IterableSeq, IndexedSeq);\n function IterableSeq(iterable) {\n this._iterable = iterable;\n this.size = iterable.length || iterable.size;\n }\n\n IterableSeq.prototype.__iterateUncached = function(fn, reverse) {\n if (reverse) {\n return this.cacheResult().__iterate(fn, reverse);\n }\n var iterable = this._iterable;\n var iterator = getIterator(iterable);\n var iterations = 0;\n if (isIterator(iterator)) {\n var step;\n while (!(step = iterator.next()).done) {\n if (fn(step.value, iterations++, this) === false) {\n break;\n }\n }\n }\n return iterations;\n };\n\n IterableSeq.prototype.__iteratorUncached = function(type, reverse) {\n if (reverse) {\n return this.cacheResult().__iterator(type, reverse);\n }\n var iterable = this._iterable;\n var iterator = getIterator(iterable);\n if (!isIterator(iterator)) {\n return new Iterator(iteratorDone);\n }\n var iterations = 0;\n return new Iterator(function() {\n var step = iterator.next();\n return step.done ? step : iteratorValue(type, iterations++, step.value);\n });\n };\n\n\n\n createClass(IteratorSeq, IndexedSeq);\n function IteratorSeq(iterator) {\n this._iterator = iterator;\n this._iteratorCache = [];\n }\n\n IteratorSeq.prototype.__iterateUncached = function(fn, reverse) {\n if (reverse) {\n return this.cacheResult().__iterate(fn, reverse);\n }\n var iterator = this._iterator;\n var cache = this._iteratorCache;\n var iterations = 0;\n while (iterations < cache.length) {\n if (fn(cache[iterations], iterations++, this) === false) {\n return iterations;\n }\n }\n var step;\n while (!(step = iterator.next()).done) {\n var val = step.value;\n cache[iterations] = val;\n if (fn(val, iterations++, this) === false) {\n break;\n }\n }\n return iterations;\n };\n\n IteratorSeq.prototype.__iteratorUncached = function(type, reverse) {\n if (reverse) {\n return this.cacheResult().__iterator(type, reverse);\n }\n var iterator = this._iterator;\n var cache = this._iteratorCache;\n var iterations = 0;\n return new Iterator(function() {\n if (iterations >= cache.length) {\n var step = iterator.next();\n if (step.done) {\n return step;\n }\n cache[iterations] = step.value;\n }\n return iteratorValue(type, iterations, cache[iterations++]);\n });\n };\n\n\n\n\n // # pragma Helper functions\n\n function isSeq(maybeSeq) {\n return !!(maybeSeq && maybeSeq[IS_SEQ_SENTINEL]);\n }\n\n var EMPTY_SEQ;\n\n function emptySequence() {\n return EMPTY_SEQ || (EMPTY_SEQ = new ArraySeq([]));\n }\n\n function keyedSeqFromValue(value) {\n var seq =\n Array.isArray(value) ? new ArraySeq(value).fromEntrySeq() :\n isIterator(value) ? new IteratorSeq(value).fromEntrySeq() :\n hasIterator(value) ? new IterableSeq(value).fromEntrySeq() :\n typeof value === 'object' ? new ObjectSeq(value) :\n undefined;\n if (!seq) {\n throw new TypeError(\n 'Expected Array or iterable object of [k, v] entries, '+\n 'or keyed object: ' + value\n );\n }\n return seq;\n }\n\n function indexedSeqFromValue(value) {\n var seq = maybeIndexedSeqFromValue(value);\n if (!seq) {\n throw new TypeError(\n 'Expected Array or iterable object of values: ' + value\n );\n }\n return seq;\n }\n\n function seqFromValue(value) {\n var seq = maybeIndexedSeqFromValue(value) ||\n (typeof value === 'object' && new ObjectSeq(value));\n if (!seq) {\n throw new TypeError(\n 'Expected Array or iterable object of values, or keyed object: ' + value\n );\n }\n return seq;\n }\n\n function maybeIndexedSeqFromValue(value) {\n return (\n isArrayLike(value) ? new ArraySeq(value) :\n isIterator(value) ? new IteratorSeq(value) :\n hasIterator(value) ? new IterableSeq(value) :\n undefined\n );\n }\n\n function seqIterate(seq, fn, reverse, useKeys) {\n var cache = seq._cache;\n if (cache) {\n var maxIndex = cache.length - 1;\n for (var ii = 0; ii <= maxIndex; ii++) {\n var entry = cache[reverse ? maxIndex - ii : ii];\n if (fn(entry[1], useKeys ? entry[0] : ii, seq) === false) {\n return ii + 1;\n }\n }\n return ii;\n }\n return seq.__iterateUncached(fn, reverse);\n }\n\n function seqIterator(seq, type, reverse, useKeys) {\n var cache = seq._cache;\n if (cache) {\n var maxIndex = cache.length - 1;\n var ii = 0;\n return new Iterator(function() {\n var entry = cache[reverse ? maxIndex - ii : ii];\n return ii++ > maxIndex ?\n iteratorDone() :\n iteratorValue(type, useKeys ? entry[0] : ii - 1, entry[1]);\n });\n }\n return seq.__iteratorUncached(type, reverse);\n }\n\n function fromJS(json, converter) {\n return converter ?\n fromJSWith(converter, json, '', {'': json}) :\n fromJSDefault(json);\n }\n\n function fromJSWith(converter, json, key, parentJSON) {\n if (Array.isArray(json)) {\n return converter.call(parentJSON, key, IndexedSeq(json).map(function(v, k) {return fromJSWith(converter, v, k, json)}));\n }\n if (isPlainObj(json)) {\n return converter.call(parentJSON, key, KeyedSeq(json).map(function(v, k) {return fromJSWith(converter, v, k, json)}));\n }\n return json;\n }\n\n function fromJSDefault(json) {\n if (Array.isArray(json)) {\n return IndexedSeq(json).map(fromJSDefault).toList();\n }\n if (isPlainObj(json)) {\n return KeyedSeq(json).map(fromJSDefault).toMap();\n }\n return json;\n }\n\n function isPlainObj(value) {\n return value && (value.constructor === Object || value.constructor === undefined);\n }\n\n /**\n * An extension of the \"same-value\" algorithm as [described for use by ES6 Map\n * and Set](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map#Key_equality)\n *\n * NaN is considered the same as NaN, however -0 and 0 are considered the same\n * value, which is different from the algorithm described by\n * [`Object.is`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is).\n *\n * This is extended further to allow Objects to describe the values they\n * represent, by way of `valueOf` or `equals` (and `hashCode`).\n *\n * Note: because of this extension, the key equality of Immutable.Map and the\n * value equality of Immutable.Set will differ from ES6 Map and Set.\n *\n * ### Defining custom values\n *\n * The easiest way to describe the value an object represents is by implementing\n * `valueOf`. For example, `Date` represents a value by returning a unix\n * timestamp for `valueOf`:\n *\n * var date1 = new Date(1234567890000); // Fri Feb 13 2009 ...\n * var date2 = new Date(1234567890000);\n * date1.valueOf(); // 1234567890000\n * assert( date1 !== date2 );\n * assert( Immutable.is( date1, date2 ) );\n *\n * Note: overriding `valueOf` may have other implications if you use this object\n * where JavaScript expects a primitive, such as implicit string coercion.\n *\n * For more complex types, especially collections, implementing `valueOf` may\n * not be performant. An alternative is to implement `equals` and `hashCode`.\n *\n * `equals` takes another object, presumably of similar type, and returns true\n * if the it is equal. Equality is symmetrical, so the same result should be\n * returned if this and the argument are flipped.\n *\n * assert( a.equals(b) === b.equals(a) );\n *\n * `hashCode` returns a 32bit integer number representing the object which will\n * be used to determine how to store the value object in a Map or Set. You must\n * provide both or neither methods, one must not exist without the other.\n *\n * Also, an important relationship between these methods must be upheld: if two\n * values are equal, they *must* return the same hashCode. If the values are not\n * equal, they might have the same hashCode; this is called a hash collision,\n * and while undesirable for performance reasons, it is acceptable.\n *\n * if (a.equals(b)) {\n * assert( a.hashCode() === b.hashCode() );\n * }\n *\n * All Immutable collections implement `equals` and `hashCode`.\n *\n */\n function is(valueA, valueB) {\n if (valueA === valueB || (valueA !== valueA && valueB !== valueB)) {\n return true;\n }\n if (!valueA || !valueB) {\n return false;\n }\n if (typeof valueA.valueOf === 'function' &&\n typeof valueB.valueOf === 'function') {\n valueA = valueA.valueOf();\n valueB = valueB.valueOf();\n if (valueA === valueB || (valueA !== valueA && valueB !== valueB)) {\n return true;\n }\n if (!valueA || !valueB) {\n return false;\n }\n }\n if (typeof valueA.equals === 'function' &&\n typeof valueB.equals === 'function' &&\n valueA.equals(valueB)) {\n return true;\n }\n return false;\n }\n\n function deepEqual(a, b) {\n if (a === b) {\n return true;\n }\n\n if (\n !isIterable(b) ||\n a.size !== undefined && b.size !== undefined && a.size !== b.size ||\n a.__hash !== undefined && b.__hash !== undefined && a.__hash !== b.__hash ||\n isKeyed(a) !== isKeyed(b) ||\n isIndexed(a) !== isIndexed(b) ||\n isOrdered(a) !== isOrdered(b)\n ) {\n return false;\n }\n\n if (a.size === 0 && b.size === 0) {\n return true;\n }\n\n var notAssociative = !isAssociative(a);\n\n if (isOrdered(a)) {\n var entries = a.entries();\n return b.every(function(v, k) {\n var entry = entries.next().value;\n return entry && is(entry[1], v) && (notAssociative || is(entry[0], k));\n }) && entries.next().done;\n }\n\n var flipped = false;\n\n if (a.size === undefined) {\n if (b.size === undefined) {\n if (typeof a.cacheResult === 'function') {\n a.cacheResult();\n }\n } else {\n flipped = true;\n var _ = a;\n a = b;\n b = _;\n }\n }\n\n var allEqual = true;\n var bSize = b.__iterate(function(v, k) {\n if (notAssociative ? !a.has(v) :\n flipped ? !is(v, a.get(k, NOT_SET)) : !is(a.get(k, NOT_SET), v)) {\n allEqual = false;\n return false;\n }\n });\n\n return allEqual && a.size === bSize;\n }\n\n createClass(Repeat, IndexedSeq);\n\n function Repeat(value, times) {\n if (!(this instanceof Repeat)) {\n return new Repeat(value, times);\n }\n this._value = value;\n this.size = times === undefined ? Infinity : Math.max(0, times);\n if (this.size === 0) {\n if (EMPTY_REPEAT) {\n return EMPTY_REPEAT;\n }\n EMPTY_REPEAT = this;\n }\n }\n\n Repeat.prototype.toString = function() {\n if (this.size === 0) {\n return 'Repeat []';\n }\n return 'Repeat [ ' + this._value + ' ' + this.size + ' times ]';\n };\n\n Repeat.prototype.get = function(index, notSetValue) {\n return this.has(index) ? this._value : notSetValue;\n };\n\n Repeat.prototype.includes = function(searchValue) {\n return is(this._value, searchValue);\n };\n\n Repeat.prototype.slice = function(begin, end) {\n var size = this.size;\n return wholeSlice(begin, end, size) ? this :\n new Repeat(this._value, resolveEnd(end, size) - resolveBegin(begin, size));\n };\n\n Repeat.prototype.reverse = function() {\n return this;\n };\n\n Repeat.prototype.indexOf = function(searchValue) {\n if (is(this._value, searchValue)) {\n return 0;\n }\n return -1;\n };\n\n Repeat.prototype.lastIndexOf = function(searchValue) {\n if (is(this._value, searchValue)) {\n return this.size;\n }\n return -1;\n };\n\n Repeat.prototype.__iterate = function(fn, reverse) {\n for (var ii = 0; ii < this.size; ii++) {\n if (fn(this._value, ii, this) === false) {\n return ii + 1;\n }\n }\n return ii;\n };\n\n Repeat.prototype.__iterator = function(type, reverse) {var this$0 = this;\n var ii = 0;\n return new Iterator(function() \n {return ii < this$0.size ? iteratorValue(type, ii++, this$0._value) : iteratorDone()}\n );\n };\n\n Repeat.prototype.equals = function(other) {\n return other instanceof Repeat ?\n is(this._value, other._value) :\n deepEqual(other);\n };\n\n\n var EMPTY_REPEAT;\n\n function invariant(condition, error) {\n if (!condition) throw new Error(error);\n }\n\n createClass(Range, IndexedSeq);\n\n function Range(start, end, step) {\n if (!(this instanceof Range)) {\n return new Range(start, end, step);\n }\n invariant(step !== 0, 'Cannot step a Range by 0');\n start = start || 0;\n if (end === undefined) {\n end = Infinity;\n }\n step = step === undefined ? 1 : Math.abs(step);\n if (end < start) {\n step = -step;\n }\n this._start = start;\n this._end = end;\n this._step = step;\n this.size = Math.max(0, Math.ceil((end - start) / step - 1) + 1);\n if (this.size === 0) {\n if (EMPTY_RANGE) {\n return EMPTY_RANGE;\n }\n EMPTY_RANGE = this;\n }\n }\n\n Range.prototype.toString = function() {\n if (this.size === 0) {\n return 'Range []';\n }\n return 'Range [ ' +\n this._start + '...' + this._end +\n (this._step !== 1 ? ' by ' + this._step : '') +\n ' ]';\n };\n\n Range.prototype.get = function(index, notSetValue) {\n return this.has(index) ?\n this._start + wrapIndex(this, index) * this._step :\n notSetValue;\n };\n\n Range.prototype.includes = function(searchValue) {\n var possibleIndex = (searchValue - this._start) / this._step;\n return possibleIndex >= 0 &&\n possibleIndex < this.size &&\n possibleIndex === Math.floor(possibleIndex);\n };\n\n Range.prototype.slice = function(begin, end) {\n if (wholeSlice(begin, end, this.size)) {\n return this;\n }\n begin = resolveBegin(begin, this.size);\n end = resolveEnd(end, this.size);\n if (end <= begin) {\n return new Range(0, 0);\n }\n return new Range(this.get(begin, this._end), this.get(end, this._end), this._step);\n };\n\n Range.prototype.indexOf = function(searchValue) {\n var offsetValue = searchValue - this._start;\n if (offsetValue % this._step === 0) {\n var index = offsetValue / this._step;\n if (index >= 0 && index < this.size) {\n return index\n }\n }\n return -1;\n };\n\n Range.prototype.lastIndexOf = function(searchValue) {\n return this.indexOf(searchValue);\n };\n\n Range.prototype.__iterate = function(fn, reverse) {\n var maxIndex = this.size - 1;\n var step = this._step;\n var value = reverse ? this._start + maxIndex * step : this._start;\n for (var ii = 0; ii <= maxIndex; ii++) {\n if (fn(value, ii, this) === false) {\n return ii + 1;\n }\n value += reverse ? -step : step;\n }\n return ii;\n };\n\n Range.prototype.__iterator = function(type, reverse) {\n var maxIndex = this.size - 1;\n var step = this._step;\n var value = reverse ? this._start + maxIndex * step : this._start;\n var ii = 0;\n return new Iterator(function() {\n var v = value;\n value += reverse ? -step : step;\n return ii > maxIndex ? iteratorDone() : iteratorValue(type, ii++, v);\n });\n };\n\n Range.prototype.equals = function(other) {\n return other instanceof Range ?\n this._start === other._start &&\n this._end === other._end &&\n this._step === other._step :\n deepEqual(this, other);\n };\n\n\n var EMPTY_RANGE;\n\n createClass(Collection, Iterable);\n function Collection() {\n throw TypeError('Abstract');\n }\n\n\n createClass(KeyedCollection, Collection);function KeyedCollection() {}\n\n createClass(IndexedCollection, Collection);function IndexedCollection() {}\n\n createClass(SetCollection, Collection);function SetCollection() {}\n\n\n Collection.Keyed = KeyedCollection;\n Collection.Indexed = IndexedCollection;\n Collection.Set = SetCollection;\n\n var imul =\n typeof Math.imul === 'function' && Math.imul(0xffffffff, 2) === -2 ?\n Math.imul :\n function imul(a, b) {\n a = a | 0; // int\n b = b | 0; // int\n var c = a & 0xffff;\n var d = b & 0xffff;\n // Shift by 0 fixes the sign on the high part.\n return (c * d) + ((((a >>> 16) * d + c * (b >>> 16)) << 16) >>> 0) | 0; // int\n };\n\n // v8 has an optimization for storing 31-bit signed numbers.\n // Values which have either 00 or 11 as the high order bits qualify.\n // This function drops the highest order bit in a signed number, maintaining\n // the sign bit.\n function smi(i32) {\n return ((i32 >>> 1) & 0x40000000) | (i32 & 0xBFFFFFFF);\n }\n\n function hash(o) {\n if (o === false || o === null || o === undefined) {\n return 0;\n }\n if (typeof o.valueOf === 'function') {\n o = o.valueOf();\n if (o === false || o === null || o === undefined) {\n return 0;\n }\n }\n if (o === true) {\n return 1;\n }\n var type = typeof o;\n if (type === 'number') {\n if (o !== o || o === Infinity) {\n return 0;\n }\n var h = o | 0;\n if (h !== o) {\n h ^= o * 0xFFFFFFFF;\n }\n while (o > 0xFFFFFFFF) {\n o /= 0xFFFFFFFF;\n h ^= o;\n }\n return smi(h);\n }\n if (type === 'string') {\n return o.length > STRING_HASH_CACHE_MIN_STRLEN ? cachedHashString(o) : hashString(o);\n }\n if (typeof o.hashCode === 'function') {\n return o.hashCode();\n }\n if (type === 'object') {\n return hashJSObj(o);\n }\n if (typeof o.toString === 'function') {\n return hashString(o.toString());\n }\n throw new Error('Value type ' + type + ' cannot be hashed.');\n }\n\n function cachedHashString(string) {\n var hash = stringHashCache[string];\n if (hash === undefined) {\n hash = hashString(string);\n if (STRING_HASH_CACHE_SIZE === STRING_HASH_CACHE_MAX_SIZE) {\n STRING_HASH_CACHE_SIZE = 0;\n stringHashCache = {};\n }\n STRING_HASH_CACHE_SIZE++;\n stringHashCache[string] = hash;\n }\n return hash;\n }\n\n // http://jsperf.com/hashing-strings\n function hashString(string) {\n // This is the hash from JVM\n // The hash code for a string is computed as\n // s[0] * 31 ^ (n - 1) + s[1] * 31 ^ (n - 2) + ... + s[n - 1],\n // where s[i] is the ith character of the string and n is the length of\n // the string. We \"mod\" the result to make it between 0 (inclusive) and 2^31\n // (exclusive) by dropping high bits.\n var hash = 0;\n for (var ii = 0; ii < string.length; ii++) {\n hash = 31 * hash + string.charCodeAt(ii) | 0;\n }\n return smi(hash);\n }\n\n function hashJSObj(obj) {\n var hash;\n if (usingWeakMap) {\n hash = weakMap.get(obj);\n if (hash !== undefined) {\n return hash;\n }\n }\n\n hash = obj[UID_HASH_KEY];\n if (hash !== undefined) {\n return hash;\n }\n\n if (!canDefineProperty) {\n hash = obj.propertyIsEnumerable && obj.propertyIsEnumerable[UID_HASH_KEY];\n if (hash !== undefined) {\n return hash;\n }\n\n hash = getIENodeHash(obj);\n if (hash !== undefined) {\n return hash;\n }\n }\n\n hash = ++objHashUID;\n if (objHashUID & 0x40000000) {\n objHashUID = 0;\n }\n\n if (usingWeakMap) {\n weakMap.set(obj, hash);\n } else if (isExtensible !== undefined && isExtensible(obj) === false) {\n throw new Error('Non-extensible objects are not allowed as keys.');\n } else if (canDefineProperty) {\n Object.defineProperty(obj, UID_HASH_KEY, {\n 'enumerable': false,\n 'configurable': false,\n 'writable': false,\n 'value': hash\n });\n } else if (obj.propertyIsEnumerable !== undefined &&\n obj.propertyIsEnumerable === obj.constructor.prototype.propertyIsEnumerable) {\n // Since we can't define a non-enumerable property on the object\n // we'll hijack one of the less-used non-enumerable properties to\n // save our hash on it. Since this is a function it will not show up in\n // `JSON.stringify` which is what we want.\n obj.propertyIsEnumerable = function() {\n return this.constructor.prototype.propertyIsEnumerable.apply(this, arguments);\n };\n obj.propertyIsEnumerable[UID_HASH_KEY] = hash;\n } else if (obj.nodeType !== undefined) {\n // At this point we couldn't get the IE `uniqueID` to use as a hash\n // and we couldn't use a non-enumerable property to exploit the\n // dontEnum bug so we simply add the `UID_HASH_KEY` on the node\n // itself.\n obj[UID_HASH_KEY] = hash;\n } else {\n throw new Error('Unable to set a non-enumerable property on object.');\n }\n\n return hash;\n }\n\n // Get references to ES5 object methods.\n var isExtensible = Object.isExtensible;\n\n // True if Object.defineProperty works as expected. IE8 fails this test.\n var canDefineProperty = (function() {\n try {\n Object.defineProperty({}, '@', {});\n return true;\n } catch (e) {\n return false;\n }\n }());\n\n // IE has a `uniqueID` property on DOM nodes. We can construct the hash from it\n // and avoid memory leaks from the IE cloneNode bug.\n function getIENodeHash(node) {\n if (node && node.nodeType > 0) {\n switch (node.nodeType) {\n case 1: // Element\n return node.uniqueID;\n case 9: // Document\n return node.documentElement && node.documentElement.uniqueID;\n }\n }\n }\n\n // If possible, use a WeakMap.\n var usingWeakMap = typeof WeakMap === 'function';\n var weakMap;\n if (usingWeakMap) {\n weakMap = new WeakMap();\n }\n\n var objHashUID = 0;\n\n var UID_HASH_KEY = '__immutablehash__';\n if (typeof Symbol === 'function') {\n UID_HASH_KEY = Symbol(UID_HASH_KEY);\n }\n\n var STRING_HASH_CACHE_MIN_STRLEN = 16;\n var STRING_HASH_CACHE_MAX_SIZE = 255;\n var STRING_HASH_CACHE_SIZE = 0;\n var stringHashCache = {};\n\n function assertNotInfinite(size) {\n invariant(\n size !== Infinity,\n 'Cannot perform this action with an infinite size.'\n );\n }\n\n createClass(Map, KeyedCollection);\n\n // @pragma Construction\n\n function Map(value) {\n return value === null || value === undefined ? emptyMap() :\n isMap(value) && !isOrdered(value) ? value :\n emptyMap().withMutations(function(map ) {\n var iter = KeyedIterable(value);\n assertNotInfinite(iter.size);\n iter.forEach(function(v, k) {return map.set(k, v)});\n });\n }\n\n Map.of = function() {var keyValues = SLICE$0.call(arguments, 0);\n return emptyMap().withMutations(function(map ) {\n for (var i = 0; i < keyValues.length; i += 2) {\n if (i + 1 >= keyValues.length) {\n throw new Error('Missing value for key: ' + keyValues[i]);\n }\n map.set(keyValues[i], keyValues[i + 1]);\n }\n });\n };\n\n Map.prototype.toString = function() {\n return this.__toString('Map {', '}');\n };\n\n // @pragma Access\n\n Map.prototype.get = function(k, notSetValue) {\n return this._root ?\n this._root.get(0, undefined, k, notSetValue) :\n notSetValue;\n };\n\n // @pragma Modification\n\n Map.prototype.set = function(k, v) {\n return updateMap(this, k, v);\n };\n\n Map.prototype.setIn = function(keyPath, v) {\n return this.updateIn(keyPath, NOT_SET, function() {return v});\n };\n\n Map.prototype.remove = function(k) {\n return updateMap(this, k, NOT_SET);\n };\n\n Map.prototype.deleteIn = function(keyPath) {\n return this.updateIn(keyPath, function() {return NOT_SET});\n };\n\n Map.prototype.update = function(k, notSetValue, updater) {\n return arguments.length === 1 ?\n k(this) :\n this.updateIn([k], notSetValue, updater);\n };\n\n Map.prototype.updateIn = function(keyPath, notSetValue, updater) {\n if (!updater) {\n updater = notSetValue;\n notSetValue = undefined;\n }\n var updatedValue = updateInDeepMap(\n this,\n forceIterator(keyPath),\n notSetValue,\n updater\n );\n return updatedValue === NOT_SET ? undefined : updatedValue;\n };\n\n Map.prototype.clear = function() {\n if (this.size === 0) {\n return this;\n }\n if (this.__ownerID) {\n this.size = 0;\n this._root = null;\n this.__hash = undefined;\n this.__altered = true;\n return this;\n }\n return emptyMap();\n };\n\n // @pragma Composition\n\n Map.prototype.merge = function(/*...iters*/) {\n return mergeIntoMapWith(this, undefined, arguments);\n };\n\n Map.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n return mergeIntoMapWith(this, merger, iters);\n };\n\n Map.prototype.mergeIn = function(keyPath) {var iters = SLICE$0.call(arguments, 1);\n return this.updateIn(\n keyPath,\n emptyMap(),\n function(m ) {return typeof m.merge === 'function' ?\n m.merge.apply(m, iters) :\n iters[iters.length - 1]}\n );\n };\n\n Map.prototype.mergeDeep = function(/*...iters*/) {\n return mergeIntoMapWith(this, deepMerger, arguments);\n };\n\n Map.prototype.mergeDeepWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n return mergeIntoMapWith(this, deepMergerWith(merger), iters);\n };\n\n Map.prototype.mergeDeepIn = function(keyPath) {var iters = SLICE$0.call(arguments, 1);\n return this.updateIn(\n keyPath,\n emptyMap(),\n function(m ) {return typeof m.mergeDeep === 'function' ?\n m.mergeDeep.apply(m, iters) :\n iters[iters.length - 1]}\n );\n };\n\n Map.prototype.sort = function(comparator) {\n // Late binding\n return OrderedMap(sortFactory(this, comparator));\n };\n\n Map.prototype.sortBy = function(mapper, comparator) {\n // Late binding\n return OrderedMap(sortFactory(this, comparator, mapper));\n };\n\n // @pragma Mutability\n\n Map.prototype.withMutations = function(fn) {\n var mutable = this.asMutable();\n fn(mutable);\n return mutable.wasAltered() ? mutable.__ensureOwner(this.__ownerID) : this;\n };\n\n Map.prototype.asMutable = function() {\n return this.__ownerID ? this : this.__ensureOwner(new OwnerID());\n };\n\n Map.prototype.asImmutable = function() {\n return this.__ensureOwner();\n };\n\n Map.prototype.wasAltered = function() {\n return this.__altered;\n };\n\n Map.prototype.__iterator = function(type, reverse) {\n return new MapIterator(this, type, reverse);\n };\n\n Map.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n var iterations = 0;\n this._root && this._root.iterate(function(entry ) {\n iterations++;\n return fn(entry[1], entry[0], this$0);\n }, reverse);\n return iterations;\n };\n\n Map.prototype.__ensureOwner = function(ownerID) {\n if (ownerID === this.__ownerID) {\n return this;\n }\n if (!ownerID) {\n this.__ownerID = ownerID;\n this.__altered = false;\n return this;\n }\n return makeMap(this.size, this._root, ownerID, this.__hash);\n };\n\n\n function isMap(maybeMap) {\n return !!(maybeMap && maybeMap[IS_MAP_SENTINEL]);\n }\n\n Map.isMap = isMap;\n\n var IS_MAP_SENTINEL = '@@__IMMUTABLE_MAP__@@';\n\n var MapPrototype = Map.prototype;\n MapPrototype[IS_MAP_SENTINEL] = true;\n MapPrototype[DELETE] = MapPrototype.remove;\n MapPrototype.removeIn = MapPrototype.deleteIn;\n\n\n // #pragma Trie Nodes\n\n\n\n function ArrayMapNode(ownerID, entries) {\n this.ownerID = ownerID;\n this.entries = entries;\n }\n\n ArrayMapNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n var entries = this.entries;\n for (var ii = 0, len = entries.length; ii < len; ii++) {\n if (is(key, entries[ii][0])) {\n return entries[ii][1];\n }\n }\n return notSetValue;\n };\n\n ArrayMapNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n var removed = value === NOT_SET;\n\n var entries = this.entries;\n var idx = 0;\n for (var len = entries.length; idx < len; idx++) {\n if (is(key, entries[idx][0])) {\n break;\n }\n }\n var exists = idx < len;\n\n if (exists ? entries[idx][1] === value : removed) {\n return this;\n }\n\n SetRef(didAlter);\n (removed || !exists) && SetRef(didChangeSize);\n\n if (removed && entries.length === 1) {\n return; // undefined\n }\n\n if (!exists && !removed && entries.length >= MAX_ARRAY_MAP_SIZE) {\n return createNodes(ownerID, entries, key, value);\n }\n\n var isEditable = ownerID && ownerID === this.ownerID;\n var newEntries = isEditable ? entries : arrCopy(entries);\n\n if (exists) {\n if (removed) {\n idx === len - 1 ? newEntries.pop() : (newEntries[idx] = newEntries.pop());\n } else {\n newEntries[idx] = [key, value];\n }\n } else {\n newEntries.push([key, value]);\n }\n\n if (isEditable) {\n this.entries = newEntries;\n return this;\n }\n\n return new ArrayMapNode(ownerID, newEntries);\n };\n\n\n\n\n function BitmapIndexedNode(ownerID, bitmap, nodes) {\n this.ownerID = ownerID;\n this.bitmap = bitmap;\n this.nodes = nodes;\n }\n\n BitmapIndexedNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n if (keyHash === undefined) {\n keyHash = hash(key);\n }\n var bit = (1 << ((shift === 0 ? keyHash : keyHash >>> shift) & MASK));\n var bitmap = this.bitmap;\n return (bitmap & bit) === 0 ? notSetValue :\n this.nodes[popCount(bitmap & (bit - 1))].get(shift + SHIFT, keyHash, key, notSetValue);\n };\n\n BitmapIndexedNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n if (keyHash === undefined) {\n keyHash = hash(key);\n }\n var keyHashFrag = (shift === 0 ? keyHash : keyHash >>> shift) & MASK;\n var bit = 1 << keyHashFrag;\n var bitmap = this.bitmap;\n var exists = (bitmap & bit) !== 0;\n\n if (!exists && value === NOT_SET) {\n return this;\n }\n\n var idx = popCount(bitmap & (bit - 1));\n var nodes = this.nodes;\n var node = exists ? nodes[idx] : undefined;\n var newNode = updateNode(node, ownerID, shift + SHIFT, keyHash, key, value, didChangeSize, didAlter);\n\n if (newNode === node) {\n return this;\n }\n\n if (!exists && newNode && nodes.length >= MAX_BITMAP_INDEXED_SIZE) {\n return expandNodes(ownerID, nodes, bitmap, keyHashFrag, newNode);\n }\n\n if (exists && !newNode && nodes.length === 2 && isLeafNode(nodes[idx ^ 1])) {\n return nodes[idx ^ 1];\n }\n\n if (exists && newNode && nodes.length === 1 && isLeafNode(newNode)) {\n return newNode;\n }\n\n var isEditable = ownerID && ownerID === this.ownerID;\n var newBitmap = exists ? newNode ? bitmap : bitmap ^ bit : bitmap | bit;\n var newNodes = exists ? newNode ?\n setIn(nodes, idx, newNode, isEditable) :\n spliceOut(nodes, idx, isEditable) :\n spliceIn(nodes, idx, newNode, isEditable);\n\n if (isEditable) {\n this.bitmap = newBitmap;\n this.nodes = newNodes;\n return this;\n }\n\n return new BitmapIndexedNode(ownerID, newBitmap, newNodes);\n };\n\n\n\n\n function HashArrayMapNode(ownerID, count, nodes) {\n this.ownerID = ownerID;\n this.count = count;\n this.nodes = nodes;\n }\n\n HashArrayMapNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n if (keyHash === undefined) {\n keyHash = hash(key);\n }\n var idx = (shift === 0 ? keyHash : keyHash >>> shift) & MASK;\n var node = this.nodes[idx];\n return node ? node.get(shift + SHIFT, keyHash, key, notSetValue) : notSetValue;\n };\n\n HashArrayMapNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n if (keyHash === undefined) {\n keyHash = hash(key);\n }\n var idx = (shift === 0 ? keyHash : keyHash >>> shift) & MASK;\n var removed = value === NOT_SET;\n var nodes = this.nodes;\n var node = nodes[idx];\n\n if (removed && !node) {\n return this;\n }\n\n var newNode = updateNode(node, ownerID, shift + SHIFT, keyHash, key, value, didChangeSize, didAlter);\n if (newNode === node) {\n return this;\n }\n\n var newCount = this.count;\n if (!node) {\n newCount++;\n } else if (!newNode) {\n newCount--;\n if (newCount < MIN_HASH_ARRAY_MAP_SIZE) {\n return packNodes(ownerID, nodes, newCount, idx);\n }\n }\n\n var isEditable = ownerID && ownerID === this.ownerID;\n var newNodes = setIn(nodes, idx, newNode, isEditable);\n\n if (isEditable) {\n this.count = newCount;\n this.nodes = newNodes;\n return this;\n }\n\n return new HashArrayMapNode(ownerID, newCount, newNodes);\n };\n\n\n\n\n function HashCollisionNode(ownerID, keyHash, entries) {\n this.ownerID = ownerID;\n this.keyHash = keyHash;\n this.entries = entries;\n }\n\n HashCollisionNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n var entries = this.entries;\n for (var ii = 0, len = entries.length; ii < len; ii++) {\n if (is(key, entries[ii][0])) {\n return entries[ii][1];\n }\n }\n return notSetValue;\n };\n\n HashCollisionNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n if (keyHash === undefined) {\n keyHash = hash(key);\n }\n\n var removed = value === NOT_SET;\n\n if (keyHash !== this.keyHash) {\n if (removed) {\n return this;\n }\n SetRef(didAlter);\n SetRef(didChangeSize);\n return mergeIntoNode(this, ownerID, shift, keyHash, [key, value]);\n }\n\n var entries = this.entries;\n var idx = 0;\n for (var len = entries.length; idx < len; idx++) {\n if (is(key, entries[idx][0])) {\n break;\n }\n }\n var exists = idx < len;\n\n if (exists ? entries[idx][1] === value : removed) {\n return this;\n }\n\n SetRef(didAlter);\n (removed || !exists) && SetRef(didChangeSize);\n\n if (removed && len === 2) {\n return new ValueNode(ownerID, this.keyHash, entries[idx ^ 1]);\n }\n\n var isEditable = ownerID && ownerID === this.ownerID;\n var newEntries = isEditable ? entries : arrCopy(entries);\n\n if (exists) {\n if (removed) {\n idx === len - 1 ? newEntries.pop() : (newEntries[idx] = newEntries.pop());\n } else {\n newEntries[idx] = [key, value];\n }\n } else {\n newEntries.push([key, value]);\n }\n\n if (isEditable) {\n this.entries = newEntries;\n return this;\n }\n\n return new HashCollisionNode(ownerID, this.keyHash, newEntries);\n };\n\n\n\n\n function ValueNode(ownerID, keyHash, entry) {\n this.ownerID = ownerID;\n this.keyHash = keyHash;\n this.entry = entry;\n }\n\n ValueNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n return is(key, this.entry[0]) ? this.entry[1] : notSetValue;\n };\n\n ValueNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n var removed = value === NOT_SET;\n var keyMatch = is(key, this.entry[0]);\n if (keyMatch ? value === this.entry[1] : removed) {\n return this;\n }\n\n SetRef(didAlter);\n\n if (removed) {\n SetRef(didChangeSize);\n return; // undefined\n }\n\n if (keyMatch) {\n if (ownerID && ownerID === this.ownerID) {\n this.entry[1] = value;\n return this;\n }\n return new ValueNode(ownerID, this.keyHash, [key, value]);\n }\n\n SetRef(didChangeSize);\n return mergeIntoNode(this, ownerID, shift, hash(key), [key, value]);\n };\n\n\n\n // #pragma Iterators\n\n ArrayMapNode.prototype.iterate =\n HashCollisionNode.prototype.iterate = function (fn, reverse) {\n var entries = this.entries;\n for (var ii = 0, maxIndex = entries.length - 1; ii <= maxIndex; ii++) {\n if (fn(entries[reverse ? maxIndex - ii : ii]) === false) {\n return false;\n }\n }\n }\n\n BitmapIndexedNode.prototype.iterate =\n HashArrayMapNode.prototype.iterate = function (fn, reverse) {\n var nodes = this.nodes;\n for (var ii = 0, maxIndex = nodes.length - 1; ii <= maxIndex; ii++) {\n var node = nodes[reverse ? maxIndex - ii : ii];\n if (node && node.iterate(fn, reverse) === false) {\n return false;\n }\n }\n }\n\n ValueNode.prototype.iterate = function (fn, reverse) {\n return fn(this.entry);\n }\n\n createClass(MapIterator, Iterator);\n\n function MapIterator(map, type, reverse) {\n this._type = type;\n this._reverse = reverse;\n this._stack = map._root && mapIteratorFrame(map._root);\n }\n\n MapIterator.prototype.next = function() {\n var type = this._type;\n var stack = this._stack;\n while (stack) {\n var node = stack.node;\n var index = stack.index++;\n var maxIndex;\n if (node.entry) {\n if (index === 0) {\n return mapIteratorValue(type, node.entry);\n }\n } else if (node.entries) {\n maxIndex = node.entries.length - 1;\n if (index <= maxIndex) {\n return mapIteratorValue(type, node.entries[this._reverse ? maxIndex - index : index]);\n }\n } else {\n maxIndex = node.nodes.length - 1;\n if (index <= maxIndex) {\n var subNode = node.nodes[this._reverse ? maxIndex - index : index];\n if (subNode) {\n if (subNode.entry) {\n return mapIteratorValue(type, subNode.entry);\n }\n stack = this._stack = mapIteratorFrame(subNode, stack);\n }\n continue;\n }\n }\n stack = this._stack = this._stack.__prev;\n }\n return iteratorDone();\n };\n\n\n function mapIteratorValue(type, entry) {\n return iteratorValue(type, entry[0], entry[1]);\n }\n\n function mapIteratorFrame(node, prev) {\n return {\n node: node,\n index: 0,\n __prev: prev\n };\n }\n\n function makeMap(size, root, ownerID, hash) {\n var map = Object.create(MapPrototype);\n map.size = size;\n map._root = root;\n map.__ownerID = ownerID;\n map.__hash = hash;\n map.__altered = false;\n return map;\n }\n\n var EMPTY_MAP;\n function emptyMap() {\n return EMPTY_MAP || (EMPTY_MAP = makeMap(0));\n }\n\n function updateMap(map, k, v) {\n var newRoot;\n var newSize;\n if (!map._root) {\n if (v === NOT_SET) {\n return map;\n }\n newSize = 1;\n newRoot = new ArrayMapNode(map.__ownerID, [[k, v]]);\n } else {\n var didChangeSize = MakeRef(CHANGE_LENGTH);\n var didAlter = MakeRef(DID_ALTER);\n newRoot = updateNode(map._root, map.__ownerID, 0, undefined, k, v, didChangeSize, didAlter);\n if (!didAlter.value) {\n return map;\n }\n newSize = map.size + (didChangeSize.value ? v === NOT_SET ? -1 : 1 : 0);\n }\n if (map.__ownerID) {\n map.size = newSize;\n map._root = newRoot;\n map.__hash = undefined;\n map.__altered = true;\n return map;\n }\n return newRoot ? makeMap(newSize, newRoot) : emptyMap();\n }\n\n function updateNode(node, ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n if (!node) {\n if (value === NOT_SET) {\n return node;\n }\n SetRef(didAlter);\n SetRef(didChangeSize);\n return new ValueNode(ownerID, keyHash, [key, value]);\n }\n return node.update(ownerID, shift, keyHash, key, value, didChangeSize, didAlter);\n }\n\n function isLeafNode(node) {\n return node.constructor === ValueNode || node.constructor === HashCollisionNode;\n }\n\n function mergeIntoNode(node, ownerID, shift, keyHash, entry) {\n if (node.keyHash === keyHash) {\n return new HashCollisionNode(ownerID, keyHash, [node.entry, entry]);\n }\n\n var idx1 = (shift === 0 ? node.keyHash : node.keyHash >>> shift) & MASK;\n var idx2 = (shift === 0 ? keyHash : keyHash >>> shift) & MASK;\n\n var newNode;\n var nodes = idx1 === idx2 ?\n [mergeIntoNode(node, ownerID, shift + SHIFT, keyHash, entry)] :\n ((newNode = new ValueNode(ownerID, keyHash, entry)), idx1 < idx2 ? [node, newNode] : [newNode, node]);\n\n return new BitmapIndexedNode(ownerID, (1 << idx1) | (1 << idx2), nodes);\n }\n\n function createNodes(ownerID, entries, key, value) {\n if (!ownerID) {\n ownerID = new OwnerID();\n }\n var node = new ValueNode(ownerID, hash(key), [key, value]);\n for (var ii = 0; ii < entries.length; ii++) {\n var entry = entries[ii];\n node = node.update(ownerID, 0, undefined, entry[0], entry[1]);\n }\n return node;\n }\n\n function packNodes(ownerID, nodes, count, excluding) {\n var bitmap = 0;\n var packedII = 0;\n var packedNodes = new Array(count);\n for (var ii = 0, bit = 1, len = nodes.length; ii < len; ii++, bit <<= 1) {\n var node = nodes[ii];\n if (node !== undefined && ii !== excluding) {\n bitmap |= bit;\n packedNodes[packedII++] = node;\n }\n }\n return new BitmapIndexedNode(ownerID, bitmap, packedNodes);\n }\n\n function expandNodes(ownerID, nodes, bitmap, including, node) {\n var count = 0;\n var expandedNodes = new Array(SIZE);\n for (var ii = 0; bitmap !== 0; ii++, bitmap >>>= 1) {\n expandedNodes[ii] = bitmap & 1 ? nodes[count++] : undefined;\n }\n expandedNodes[including] = node;\n return new HashArrayMapNode(ownerID, count + 1, expandedNodes);\n }\n\n function mergeIntoMapWith(map, merger, iterables) {\n var iters = [];\n for (var ii = 0; ii < iterables.length; ii++) {\n var value = iterables[ii];\n var iter = KeyedIterable(value);\n if (!isIterable(value)) {\n iter = iter.map(function(v ) {return fromJS(v)});\n }\n iters.push(iter);\n }\n return mergeIntoCollectionWith(map, merger, iters);\n }\n\n function deepMerger(existing, value, key) {\n return existing && existing.mergeDeep && isIterable(value) ?\n existing.mergeDeep(value) :\n is(existing, value) ? existing : value;\n }\n\n function deepMergerWith(merger) {\n return function(existing, value, key) {\n if (existing && existing.mergeDeepWith && isIterable(value)) {\n return existing.mergeDeepWith(merger, value);\n }\n var nextValue = merger(existing, value, key);\n return is(existing, nextValue) ? existing : nextValue;\n };\n }\n\n function mergeIntoCollectionWith(collection, merger, iters) {\n iters = iters.filter(function(x ) {return x.size !== 0});\n if (iters.length === 0) {\n return collection;\n }\n if (collection.size === 0 && !collection.__ownerID && iters.length === 1) {\n return collection.constructor(iters[0]);\n }\n return collection.withMutations(function(collection ) {\n var mergeIntoMap = merger ?\n function(value, key) {\n collection.update(key, NOT_SET, function(existing )\n {return existing === NOT_SET ? value : merger(existing, value, key)}\n );\n } :\n function(value, key) {\n collection.set(key, value);\n }\n for (var ii = 0; ii < iters.length; ii++) {\n iters[ii].forEach(mergeIntoMap);\n }\n });\n }\n\n function updateInDeepMap(existing, keyPathIter, notSetValue, updater) {\n var isNotSet = existing === NOT_SET;\n var step = keyPathIter.next();\n if (step.done) {\n var existingValue = isNotSet ? notSetValue : existing;\n var newValue = updater(existingValue);\n return newValue === existingValue ? existing : newValue;\n }\n invariant(\n isNotSet || (existing && existing.set),\n 'invalid keyPath'\n );\n var key = step.value;\n var nextExisting = isNotSet ? NOT_SET : existing.get(key, NOT_SET);\n var nextUpdated = updateInDeepMap(\n nextExisting,\n keyPathIter,\n notSetValue,\n updater\n );\n return nextUpdated === nextExisting ? existing :\n nextUpdated === NOT_SET ? existing.remove(key) :\n (isNotSet ? emptyMap() : existing).set(key, nextUpdated);\n }\n\n function popCount(x) {\n x = x - ((x >> 1) & 0x55555555);\n x = (x & 0x33333333) + ((x >> 2) & 0x33333333);\n x = (x + (x >> 4)) & 0x0f0f0f0f;\n x = x + (x >> 8);\n x = x + (x >> 16);\n return x & 0x7f;\n }\n\n function setIn(array, idx, val, canEdit) {\n var newArray = canEdit ? array : arrCopy(array);\n newArray[idx] = val;\n return newArray;\n }\n\n function spliceIn(array, idx, val, canEdit) {\n var newLen = array.length + 1;\n if (canEdit && idx + 1 === newLen) {\n array[idx] = val;\n return array;\n }\n var newArray = new Array(newLen);\n var after = 0;\n for (var ii = 0; ii < newLen; ii++) {\n if (ii === idx) {\n newArray[ii] = val;\n after = -1;\n } else {\n newArray[ii] = array[ii + after];\n }\n }\n return newArray;\n }\n\n function spliceOut(array, idx, canEdit) {\n var newLen = array.length - 1;\n if (canEdit && idx === newLen) {\n array.pop();\n return array;\n }\n var newArray = new Array(newLen);\n var after = 0;\n for (var ii = 0; ii < newLen; ii++) {\n if (ii === idx) {\n after = 1;\n }\n newArray[ii] = array[ii + after];\n }\n return newArray;\n }\n\n var MAX_ARRAY_MAP_SIZE = SIZE / 4;\n var MAX_BITMAP_INDEXED_SIZE = SIZE / 2;\n var MIN_HASH_ARRAY_MAP_SIZE = SIZE / 4;\n\n createClass(List, IndexedCollection);\n\n // @pragma Construction\n\n function List(value) {\n var empty = emptyList();\n if (value === null || value === undefined) {\n return empty;\n }\n if (isList(value)) {\n return value;\n }\n var iter = IndexedIterable(value);\n var size = iter.size;\n if (size === 0) {\n return empty;\n }\n assertNotInfinite(size);\n if (size > 0 && size < SIZE) {\n return makeList(0, size, SHIFT, null, new VNode(iter.toArray()));\n }\n return empty.withMutations(function(list ) {\n list.setSize(size);\n iter.forEach(function(v, i) {return list.set(i, v)});\n });\n }\n\n List.of = function(/*...values*/) {\n return this(arguments);\n };\n\n List.prototype.toString = function() {\n return this.__toString('List [', ']');\n };\n\n // @pragma Access\n\n List.prototype.get = function(index, notSetValue) {\n index = wrapIndex(this, index);\n if (index >= 0 && index < this.size) {\n index += this._origin;\n var node = listNodeFor(this, index);\n return node && node.array[index & MASK];\n }\n return notSetValue;\n };\n\n // @pragma Modification\n\n List.prototype.set = function(index, value) {\n return updateList(this, index, value);\n };\n\n List.prototype.remove = function(index) {\n return !this.has(index) ? this :\n index === 0 ? this.shift() :\n index === this.size - 1 ? this.pop() :\n this.splice(index, 1);\n };\n\n List.prototype.insert = function(index, value) {\n return this.splice(index, 0, value);\n };\n\n List.prototype.clear = function() {\n if (this.size === 0) {\n return this;\n }\n if (this.__ownerID) {\n this.size = this._origin = this._capacity = 0;\n this._level = SHIFT;\n this._root = this._tail = null;\n this.__hash = undefined;\n this.__altered = true;\n return this;\n }\n return emptyList();\n };\n\n List.prototype.push = function(/*...values*/) {\n var values = arguments;\n var oldSize = this.size;\n return this.withMutations(function(list ) {\n setListBounds(list, 0, oldSize + values.length);\n for (var ii = 0; ii < values.length; ii++) {\n list.set(oldSize + ii, values[ii]);\n }\n });\n };\n\n List.prototype.pop = function() {\n return setListBounds(this, 0, -1);\n };\n\n List.prototype.unshift = function(/*...values*/) {\n var values = arguments;\n return this.withMutations(function(list ) {\n setListBounds(list, -values.length);\n for (var ii = 0; ii < values.length; ii++) {\n list.set(ii, values[ii]);\n }\n });\n };\n\n List.prototype.shift = function() {\n return setListBounds(this, 1);\n };\n\n // @pragma Composition\n\n List.prototype.merge = function(/*...iters*/) {\n return mergeIntoListWith(this, undefined, arguments);\n };\n\n List.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n return mergeIntoListWith(this, merger, iters);\n };\n\n List.prototype.mergeDeep = function(/*...iters*/) {\n return mergeIntoListWith(this, deepMerger, arguments);\n };\n\n List.prototype.mergeDeepWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n return mergeIntoListWith(this, deepMergerWith(merger), iters);\n };\n\n List.prototype.setSize = function(size) {\n return setListBounds(this, 0, size);\n };\n\n // @pragma Iteration\n\n List.prototype.slice = function(begin, end) {\n var size = this.size;\n if (wholeSlice(begin, end, size)) {\n return this;\n }\n return setListBounds(\n this,\n resolveBegin(begin, size),\n resolveEnd(end, size)\n );\n };\n\n List.prototype.__iterator = function(type, reverse) {\n var index = 0;\n var values = iterateList(this, reverse);\n return new Iterator(function() {\n var value = values();\n return value === DONE ?\n iteratorDone() :\n iteratorValue(type, index++, value);\n });\n };\n\n List.prototype.__iterate = function(fn, reverse) {\n var index = 0;\n var values = iterateList(this, reverse);\n var value;\n while ((value = values()) !== DONE) {\n if (fn(value, index++, this) === false) {\n break;\n }\n }\n return index;\n };\n\n List.prototype.__ensureOwner = function(ownerID) {\n if (ownerID === this.__ownerID) {\n return this;\n }\n if (!ownerID) {\n this.__ownerID = ownerID;\n return this;\n }\n return makeList(this._origin, this._capacity, this._level, this._root, this._tail, ownerID, this.__hash);\n };\n\n\n function isList(maybeList) {\n return !!(maybeList && maybeList[IS_LIST_SENTINEL]);\n }\n\n List.isList = isList;\n\n var IS_LIST_SENTINEL = '@@__IMMUTABLE_LIST__@@';\n\n var ListPrototype = List.prototype;\n ListPrototype[IS_LIST_SENTINEL] = true;\n ListPrototype[DELETE] = ListPrototype.remove;\n ListPrototype.setIn = MapPrototype.setIn;\n ListPrototype.deleteIn =\n ListPrototype.removeIn = MapPrototype.removeIn;\n ListPrototype.update = MapPrototype.update;\n ListPrototype.updateIn = MapPrototype.updateIn;\n ListPrototype.mergeIn = MapPrototype.mergeIn;\n ListPrototype.mergeDeepIn = MapPrototype.mergeDeepIn;\n ListPrototype.withMutations = MapPrototype.withMutations;\n ListPrototype.asMutable = MapPrototype.asMutable;\n ListPrototype.asImmutable = MapPrototype.asImmutable;\n ListPrototype.wasAltered = MapPrototype.wasAltered;\n\n\n\n function VNode(array, ownerID) {\n this.array = array;\n this.ownerID = ownerID;\n }\n\n // TODO: seems like these methods are very similar\n\n VNode.prototype.removeBefore = function(ownerID, level, index) {\n if (index === level ? 1 << level : false || this.array.length === 0) {\n return this;\n }\n var originIndex = (index >>> level) & MASK;\n if (originIndex >= this.array.length) {\n return new VNode([], ownerID);\n }\n var removingFirst = originIndex === 0;\n var newChild;\n if (level > 0) {\n var oldChild = this.array[originIndex];\n newChild = oldChild && oldChild.removeBefore(ownerID, level - SHIFT, index);\n if (newChild === oldChild && removingFirst) {\n return this;\n }\n }\n if (removingFirst && !newChild) {\n return this;\n }\n var editable = editableVNode(this, ownerID);\n if (!removingFirst) {\n for (var ii = 0; ii < originIndex; ii++) {\n editable.array[ii] = undefined;\n }\n }\n if (newChild) {\n editable.array[originIndex] = newChild;\n }\n return editable;\n };\n\n VNode.prototype.removeAfter = function(ownerID, level, index) {\n if (index === (level ? 1 << level : 0) || this.array.length === 0) {\n return this;\n }\n var sizeIndex = ((index - 1) >>> level) & MASK;\n if (sizeIndex >= this.array.length) {\n return this;\n }\n\n var newChild;\n if (level > 0) {\n var oldChild = this.array[sizeIndex];\n newChild = oldChild && oldChild.removeAfter(ownerID, level - SHIFT, index);\n if (newChild === oldChild && sizeIndex === this.array.length - 1) {\n return this;\n }\n }\n\n var editable = editableVNode(this, ownerID);\n editable.array.splice(sizeIndex + 1);\n if (newChild) {\n editable.array[sizeIndex] = newChild;\n }\n return editable;\n };\n\n\n\n var DONE = {};\n\n function iterateList(list, reverse) {\n var left = list._origin;\n var right = list._capacity;\n var tailPos = getTailOffset(right);\n var tail = list._tail;\n\n return iterateNodeOrLeaf(list._root, list._level, 0);\n\n function iterateNodeOrLeaf(node, level, offset) {\n return level === 0 ?\n iterateLeaf(node, offset) :\n iterateNode(node, level, offset);\n }\n\n function iterateLeaf(node, offset) {\n var array = offset === tailPos ? tail && tail.array : node && node.array;\n var from = offset > left ? 0 : left - offset;\n var to = right - offset;\n if (to > SIZE) {\n to = SIZE;\n }\n return function() {\n if (from === to) {\n return DONE;\n }\n var idx = reverse ? --to : from++;\n return array && array[idx];\n };\n }\n\n function iterateNode(node, level, offset) {\n var values;\n var array = node && node.array;\n var from = offset > left ? 0 : (left - offset) >> level;\n var to = ((right - offset) >> level) + 1;\n if (to > SIZE) {\n to = SIZE;\n }\n return function() {\n do {\n if (values) {\n var value = values();\n if (value !== DONE) {\n return value;\n }\n values = null;\n }\n if (from === to) {\n return DONE;\n }\n var idx = reverse ? --to : from++;\n values = iterateNodeOrLeaf(\n array && array[idx], level - SHIFT, offset + (idx << level)\n );\n } while (true);\n };\n }\n }\n\n function makeList(origin, capacity, level, root, tail, ownerID, hash) {\n var list = Object.create(ListPrototype);\n list.size = capacity - origin;\n list._origin = origin;\n list._capacity = capacity;\n list._level = level;\n list._root = root;\n list._tail = tail;\n list.__ownerID = ownerID;\n list.__hash = hash;\n list.__altered = false;\n return list;\n }\n\n var EMPTY_LIST;\n function emptyList() {\n return EMPTY_LIST || (EMPTY_LIST = makeList(0, 0, SHIFT));\n }\n\n function updateList(list, index, value) {\n index = wrapIndex(list, index);\n\n if (index !== index) {\n return list;\n }\n\n if (index >= list.size || index < 0) {\n return list.withMutations(function(list ) {\n index < 0 ?\n setListBounds(list, index).set(0, value) :\n setListBounds(list, 0, index + 1).set(index, value)\n });\n }\n\n index += list._origin;\n\n var newTail = list._tail;\n var newRoot = list._root;\n var didAlter = MakeRef(DID_ALTER);\n if (index >= getTailOffset(list._capacity)) {\n newTail = updateVNode(newTail, list.__ownerID, 0, index, value, didAlter);\n } else {\n newRoot = updateVNode(newRoot, list.__ownerID, list._level, index, value, didAlter);\n }\n\n if (!didAlter.value) {\n return list;\n }\n\n if (list.__ownerID) {\n list._root = newRoot;\n list._tail = newTail;\n list.__hash = undefined;\n list.__altered = true;\n return list;\n }\n return makeList(list._origin, list._capacity, list._level, newRoot, newTail);\n }\n\n function updateVNode(node, ownerID, level, index, value, didAlter) {\n var idx = (index >>> level) & MASK;\n var nodeHas = node && idx < node.array.length;\n if (!nodeHas && value === undefined) {\n return node;\n }\n\n var newNode;\n\n if (level > 0) {\n var lowerNode = node && node.array[idx];\n var newLowerNode = updateVNode(lowerNode, ownerID, level - SHIFT, index, value, didAlter);\n if (newLowerNode === lowerNode) {\n return node;\n }\n newNode = editableVNode(node, ownerID);\n newNode.array[idx] = newLowerNode;\n return newNode;\n }\n\n if (nodeHas && node.array[idx] === value) {\n return node;\n }\n\n SetRef(didAlter);\n\n newNode = editableVNode(node, ownerID);\n if (value === undefined && idx === newNode.array.length - 1) {\n newNode.array.pop();\n } else {\n newNode.array[idx] = value;\n }\n return newNode;\n }\n\n function editableVNode(node, ownerID) {\n if (ownerID && node && ownerID === node.ownerID) {\n return node;\n }\n return new VNode(node ? node.array.slice() : [], ownerID);\n }\n\n function listNodeFor(list, rawIndex) {\n if (rawIndex >= getTailOffset(list._capacity)) {\n return list._tail;\n }\n if (rawIndex < 1 << (list._level + SHIFT)) {\n var node = list._root;\n var level = list._level;\n while (node && level > 0) {\n node = node.array[(rawIndex >>> level) & MASK];\n level -= SHIFT;\n }\n return node;\n }\n }\n\n function setListBounds(list, begin, end) {\n // Sanitize begin & end using this shorthand for ToInt32(argument)\n // http://www.ecma-international.org/ecma-262/6.0/#sec-toint32\n if (begin !== undefined) {\n begin = begin | 0;\n }\n if (end !== undefined) {\n end = end | 0;\n }\n var owner = list.__ownerID || new OwnerID();\n var oldOrigin = list._origin;\n var oldCapacity = list._capacity;\n var newOrigin = oldOrigin + begin;\n var newCapacity = end === undefined ? oldCapacity : end < 0 ? oldCapacity + end : oldOrigin + end;\n if (newOrigin === oldOrigin && newCapacity === oldCapacity) {\n return list;\n }\n\n // If it's going to end after it starts, it's empty.\n if (newOrigin >= newCapacity) {\n return list.clear();\n }\n\n var newLevel = list._level;\n var newRoot = list._root;\n\n // New origin might need creating a higher root.\n var offsetShift = 0;\n while (newOrigin + offsetShift < 0) {\n newRoot = new VNode(newRoot && newRoot.array.length ? [undefined, newRoot] : [], owner);\n newLevel += SHIFT;\n offsetShift += 1 << newLevel;\n }\n if (offsetShift) {\n newOrigin += offsetShift;\n oldOrigin += offsetShift;\n newCapacity += offsetShift;\n oldCapacity += offsetShift;\n }\n\n var oldTailOffset = getTailOffset(oldCapacity);\n var newTailOffset = getTailOffset(newCapacity);\n\n // New size might need creating a higher root.\n while (newTailOffset >= 1 << (newLevel + SHIFT)) {\n newRoot = new VNode(newRoot && newRoot.array.length ? [newRoot] : [], owner);\n newLevel += SHIFT;\n }\n\n // Locate or create the new tail.\n var oldTail = list._tail;\n var newTail = newTailOffset < oldTailOffset ?\n listNodeFor(list, newCapacity - 1) :\n newTailOffset > oldTailOffset ? new VNode([], owner) : oldTail;\n\n // Merge Tail into tree.\n if (oldTail && newTailOffset > oldTailOffset && newOrigin < oldCapacity && oldTail.array.length) {\n newRoot = editableVNode(newRoot, owner);\n var node = newRoot;\n for (var level = newLevel; level > SHIFT; level -= SHIFT) {\n var idx = (oldTailOffset >>> level) & MASK;\n node = node.array[idx] = editableVNode(node.array[idx], owner);\n }\n node.array[(oldTailOffset >>> SHIFT) & MASK] = oldTail;\n }\n\n // If the size has been reduced, there's a chance the tail needs to be trimmed.\n if (newCapacity < oldCapacity) {\n newTail = newTail && newTail.removeAfter(owner, 0, newCapacity);\n }\n\n // If the new origin is within the tail, then we do not need a root.\n if (newOrigin >= newTailOffset) {\n newOrigin -= newTailOffset;\n newCapacity -= newTailOffset;\n newLevel = SHIFT;\n newRoot = null;\n newTail = newTail && newTail.removeBefore(owner, 0, newOrigin);\n\n // Otherwise, if the root has been trimmed, garbage collect.\n } else if (newOrigin > oldOrigin || newTailOffset < oldTailOffset) {\n offsetShift = 0;\n\n // Identify the new top root node of the subtree of the old root.\n while (newRoot) {\n var beginIndex = (newOrigin >>> newLevel) & MASK;\n if (beginIndex !== (newTailOffset >>> newLevel) & MASK) {\n break;\n }\n if (beginIndex) {\n offsetShift += (1 << newLevel) * beginIndex;\n }\n newLevel -= SHIFT;\n newRoot = newRoot.array[beginIndex];\n }\n\n // Trim the new sides of the new root.\n if (newRoot && newOrigin > oldOrigin) {\n newRoot = newRoot.removeBefore(owner, newLevel, newOrigin - offsetShift);\n }\n if (newRoot && newTailOffset < oldTailOffset) {\n newRoot = newRoot.removeAfter(owner, newLevel, newTailOffset - offsetShift);\n }\n if (offsetShift) {\n newOrigin -= offsetShift;\n newCapacity -= offsetShift;\n }\n }\n\n if (list.__ownerID) {\n list.size = newCapacity - newOrigin;\n list._origin = newOrigin;\n list._capacity = newCapacity;\n list._level = newLevel;\n list._root = newRoot;\n list._tail = newTail;\n list.__hash = undefined;\n list.__altered = true;\n return list;\n }\n return makeList(newOrigin, newCapacity, newLevel, newRoot, newTail);\n }\n\n function mergeIntoListWith(list, merger, iterables) {\n var iters = [];\n var maxSize = 0;\n for (var ii = 0; ii < iterables.length; ii++) {\n var value = iterables[ii];\n var iter = IndexedIterable(value);\n if (iter.size > maxSize) {\n maxSize = iter.size;\n }\n if (!isIterable(value)) {\n iter = iter.map(function(v ) {return fromJS(v)});\n }\n iters.push(iter);\n }\n if (maxSize > list.size) {\n list = list.setSize(maxSize);\n }\n return mergeIntoCollectionWith(list, merger, iters);\n }\n\n function getTailOffset(size) {\n return size < SIZE ? 0 : (((size - 1) >>> SHIFT) << SHIFT);\n }\n\n createClass(OrderedMap, Map);\n\n // @pragma Construction\n\n function OrderedMap(value) {\n return value === null || value === undefined ? emptyOrderedMap() :\n isOrderedMap(value) ? value :\n emptyOrderedMap().withMutations(function(map ) {\n var iter = KeyedIterable(value);\n assertNotInfinite(iter.size);\n iter.forEach(function(v, k) {return map.set(k, v)});\n });\n }\n\n OrderedMap.of = function(/*...values*/) {\n return this(arguments);\n };\n\n OrderedMap.prototype.toString = function() {\n return this.__toString('OrderedMap {', '}');\n };\n\n // @pragma Access\n\n OrderedMap.prototype.get = function(k, notSetValue) {\n var index = this._map.get(k);\n return index !== undefined ? this._list.get(index)[1] : notSetValue;\n };\n\n // @pragma Modification\n\n OrderedMap.prototype.clear = function() {\n if (this.size === 0) {\n return this;\n }\n if (this.__ownerID) {\n this.size = 0;\n this._map.clear();\n this._list.clear();\n return this;\n }\n return emptyOrderedMap();\n };\n\n OrderedMap.prototype.set = function(k, v) {\n return updateOrderedMap(this, k, v);\n };\n\n OrderedMap.prototype.remove = function(k) {\n return updateOrderedMap(this, k, NOT_SET);\n };\n\n OrderedMap.prototype.wasAltered = function() {\n return this._map.wasAltered() || this._list.wasAltered();\n };\n\n OrderedMap.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n return this._list.__iterate(\n function(entry ) {return entry && fn(entry[1], entry[0], this$0)},\n reverse\n );\n };\n\n OrderedMap.prototype.__iterator = function(type, reverse) {\n return this._list.fromEntrySeq().__iterator(type, reverse);\n };\n\n OrderedMap.prototype.__ensureOwner = function(ownerID) {\n if (ownerID === this.__ownerID) {\n return this;\n }\n var newMap = this._map.__ensureOwner(ownerID);\n var newList = this._list.__ensureOwner(ownerID);\n if (!ownerID) {\n this.__ownerID = ownerID;\n this._map = newMap;\n this._list = newList;\n return this;\n }\n return makeOrderedMap(newMap, newList, ownerID, this.__hash);\n };\n\n\n function isOrderedMap(maybeOrderedMap) {\n return isMap(maybeOrderedMap) && isOrdered(maybeOrderedMap);\n }\n\n OrderedMap.isOrderedMap = isOrderedMap;\n\n OrderedMap.prototype[IS_ORDERED_SENTINEL] = true;\n OrderedMap.prototype[DELETE] = OrderedMap.prototype.remove;\n\n\n\n function makeOrderedMap(map, list, ownerID, hash) {\n var omap = Object.create(OrderedMap.prototype);\n omap.size = map ? map.size : 0;\n omap._map = map;\n omap._list = list;\n omap.__ownerID = ownerID;\n omap.__hash = hash;\n return omap;\n }\n\n var EMPTY_ORDERED_MAP;\n function emptyOrderedMap() {\n return EMPTY_ORDERED_MAP || (EMPTY_ORDERED_MAP = makeOrderedMap(emptyMap(), emptyList()));\n }\n\n function updateOrderedMap(omap, k, v) {\n var map = omap._map;\n var list = omap._list;\n var i = map.get(k);\n var has = i !== undefined;\n var newMap;\n var newList;\n if (v === NOT_SET) { // removed\n if (!has) {\n return omap;\n }\n if (list.size >= SIZE && list.size >= map.size * 2) {\n newList = list.filter(function(entry, idx) {return entry !== undefined && i !== idx});\n newMap = newList.toKeyedSeq().map(function(entry ) {return entry[0]}).flip().toMap();\n if (omap.__ownerID) {\n newMap.__ownerID = newList.__ownerID = omap.__ownerID;\n }\n } else {\n newMap = map.remove(k);\n newList = i === list.size - 1 ? list.pop() : list.set(i, undefined);\n }\n } else {\n if (has) {\n if (v === list.get(i)[1]) {\n return omap;\n }\n newMap = map;\n newList = list.set(i, [k, v]);\n } else {\n newMap = map.set(k, list.size);\n newList = list.set(list.size, [k, v]);\n }\n }\n if (omap.__ownerID) {\n omap.size = newMap.size;\n omap._map = newMap;\n omap._list = newList;\n omap.__hash = undefined;\n return omap;\n }\n return makeOrderedMap(newMap, newList);\n }\n\n createClass(ToKeyedSequence, KeyedSeq);\n function ToKeyedSequence(indexed, useKeys) {\n this._iter = indexed;\n this._useKeys = useKeys;\n this.size = indexed.size;\n }\n\n ToKeyedSequence.prototype.get = function(key, notSetValue) {\n return this._iter.get(key, notSetValue);\n };\n\n ToKeyedSequence.prototype.has = function(key) {\n return this._iter.has(key);\n };\n\n ToKeyedSequence.prototype.valueSeq = function() {\n return this._iter.valueSeq();\n };\n\n ToKeyedSequence.prototype.reverse = function() {var this$0 = this;\n var reversedSequence = reverseFactory(this, true);\n if (!this._useKeys) {\n reversedSequence.valueSeq = function() {return this$0._iter.toSeq().reverse()};\n }\n return reversedSequence;\n };\n\n ToKeyedSequence.prototype.map = function(mapper, context) {var this$0 = this;\n var mappedSequence = mapFactory(this, mapper, context);\n if (!this._useKeys) {\n mappedSequence.valueSeq = function() {return this$0._iter.toSeq().map(mapper, context)};\n }\n return mappedSequence;\n };\n\n ToKeyedSequence.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n var ii;\n return this._iter.__iterate(\n this._useKeys ?\n function(v, k) {return fn(v, k, this$0)} :\n ((ii = reverse ? resolveSize(this) : 0),\n function(v ) {return fn(v, reverse ? --ii : ii++, this$0)}),\n reverse\n );\n };\n\n ToKeyedSequence.prototype.__iterator = function(type, reverse) {\n if (this._useKeys) {\n return this._iter.__iterator(type, reverse);\n }\n var iterator = this._iter.__iterator(ITERATE_VALUES, reverse);\n var ii = reverse ? resolveSize(this) : 0;\n return new Iterator(function() {\n var step = iterator.next();\n return step.done ? step :\n iteratorValue(type, reverse ? --ii : ii++, step.value, step);\n });\n };\n\n ToKeyedSequence.prototype[IS_ORDERED_SENTINEL] = true;\n\n\n createClass(ToIndexedSequence, IndexedSeq);\n function ToIndexedSequence(iter) {\n this._iter = iter;\n this.size = iter.size;\n }\n\n ToIndexedSequence.prototype.includes = function(value) {\n return this._iter.includes(value);\n };\n\n ToIndexedSequence.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n var iterations = 0;\n return this._iter.__iterate(function(v ) {return fn(v, iterations++, this$0)}, reverse);\n };\n\n ToIndexedSequence.prototype.__iterator = function(type, reverse) {\n var iterator = this._iter.__iterator(ITERATE_VALUES, reverse);\n var iterations = 0;\n return new Iterator(function() {\n var step = iterator.next();\n return step.done ? step :\n iteratorValue(type, iterations++, step.value, step)\n });\n };\n\n\n\n createClass(ToSetSequence, SetSeq);\n function ToSetSequence(iter) {\n this._iter = iter;\n this.size = iter.size;\n }\n\n ToSetSequence.prototype.has = function(key) {\n return this._iter.includes(key);\n };\n\n ToSetSequence.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n return this._iter.__iterate(function(v ) {return fn(v, v, this$0)}, reverse);\n };\n\n ToSetSequence.prototype.__iterator = function(type, reverse) {\n var iterator = this._iter.__iterator(ITERATE_VALUES, reverse);\n return new Iterator(function() {\n var step = iterator.next();\n return step.done ? step :\n iteratorValue(type, step.value, step.value, step);\n });\n };\n\n\n\n createClass(FromEntriesSequence, KeyedSeq);\n function FromEntriesSequence(entries) {\n this._iter = entries;\n this.size = entries.size;\n }\n\n FromEntriesSequence.prototype.entrySeq = function() {\n return this._iter.toSeq();\n };\n\n FromEntriesSequence.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n return this._iter.__iterate(function(entry ) {\n // Check if entry exists first so array access doesn't throw for holes\n // in the parent iteration.\n if (entry) {\n validateEntry(entry);\n var indexedIterable = isIterable(entry);\n return fn(\n indexedIterable ? entry.get(1) : entry[1],\n indexedIterable ? entry.get(0) : entry[0],\n this$0\n );\n }\n }, reverse);\n };\n\n FromEntriesSequence.prototype.__iterator = function(type, reverse) {\n var iterator = this._iter.__iterator(ITERATE_VALUES, reverse);\n return new Iterator(function() {\n while (true) {\n var step = iterator.next();\n if (step.done) {\n return step;\n }\n var entry = step.value;\n // Check if entry exists first so array access doesn't throw for holes\n // in the parent iteration.\n if (entry) {\n validateEntry(entry);\n var indexedIterable = isIterable(entry);\n return iteratorValue(\n type,\n indexedIterable ? entry.get(0) : entry[0],\n indexedIterable ? entry.get(1) : entry[1],\n step\n );\n }\n }\n });\n };\n\n\n ToIndexedSequence.prototype.cacheResult =\n ToKeyedSequence.prototype.cacheResult =\n ToSetSequence.prototype.cacheResult =\n FromEntriesSequence.prototype.cacheResult =\n cacheResultThrough;\n\n\n function flipFactory(iterable) {\n var flipSequence = makeSequence(iterable);\n flipSequence._iter = iterable;\n flipSequence.size = iterable.size;\n flipSequence.flip = function() {return iterable};\n flipSequence.reverse = function () {\n var reversedSequence = iterable.reverse.apply(this); // super.reverse()\n reversedSequence.flip = function() {return iterable.reverse()};\n return reversedSequence;\n };\n flipSequence.has = function(key ) {return iterable.includes(key)};\n flipSequence.includes = function(key ) {return iterable.has(key)};\n flipSequence.cacheResult = cacheResultThrough;\n flipSequence.__iterateUncached = function (fn, reverse) {var this$0 = this;\n return iterable.__iterate(function(v, k) {return fn(k, v, this$0) !== false}, reverse);\n }\n flipSequence.__iteratorUncached = function(type, reverse) {\n if (type === ITERATE_ENTRIES) {\n var iterator = iterable.__iterator(type, reverse);\n return new Iterator(function() {\n var step = iterator.next();\n if (!step.done) {\n var k = step.value[0];\n step.value[0] = step.value[1];\n step.value[1] = k;\n }\n return step;\n });\n }\n return iterable.__iterator(\n type === ITERATE_VALUES ? ITERATE_KEYS : ITERATE_VALUES,\n reverse\n );\n }\n return flipSequence;\n }\n\n\n function mapFactory(iterable, mapper, context) {\n var mappedSequence = makeSequence(iterable);\n mappedSequence.size = iterable.size;\n mappedSequence.has = function(key ) {return iterable.has(key)};\n mappedSequence.get = function(key, notSetValue) {\n var v = iterable.get(key, NOT_SET);\n return v === NOT_SET ?\n notSetValue :\n mapper.call(context, v, key, iterable);\n };\n mappedSequence.__iterateUncached = function (fn, reverse) {var this$0 = this;\n return iterable.__iterate(\n function(v, k, c) {return fn(mapper.call(context, v, k, c), k, this$0) !== false},\n reverse\n );\n }\n mappedSequence.__iteratorUncached = function (type, reverse) {\n var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse);\n return new Iterator(function() {\n var step = iterator.next();\n if (step.done) {\n return step;\n }\n var entry = step.value;\n var key = entry[0];\n return iteratorValue(\n type,\n key,\n mapper.call(context, entry[1], key, iterable),\n step\n );\n });\n }\n return mappedSequence;\n }\n\n\n function reverseFactory(iterable, useKeys) {\n var reversedSequence = makeSequence(iterable);\n reversedSequence._iter = iterable;\n reversedSequence.size = iterable.size;\n reversedSequence.reverse = function() {return iterable};\n if (iterable.flip) {\n reversedSequence.flip = function () {\n var flipSequence = flipFactory(iterable);\n flipSequence.reverse = function() {return iterable.flip()};\n return flipSequence;\n };\n }\n reversedSequence.get = function(key, notSetValue) \n {return iterable.get(useKeys ? key : -1 - key, notSetValue)};\n reversedSequence.has = function(key )\n {return iterable.has(useKeys ? key : -1 - key)};\n reversedSequence.includes = function(value ) {return iterable.includes(value)};\n reversedSequence.cacheResult = cacheResultThrough;\n reversedSequence.__iterate = function (fn, reverse) {var this$0 = this;\n return iterable.__iterate(function(v, k) {return fn(v, k, this$0)}, !reverse);\n };\n reversedSequence.__iterator =\n function(type, reverse) {return iterable.__iterator(type, !reverse)};\n return reversedSequence;\n }\n\n\n function filterFactory(iterable, predicate, context, useKeys) {\n var filterSequence = makeSequence(iterable);\n if (useKeys) {\n filterSequence.has = function(key ) {\n var v = iterable.get(key, NOT_SET);\n return v !== NOT_SET && !!predicate.call(context, v, key, iterable);\n };\n filterSequence.get = function(key, notSetValue) {\n var v = iterable.get(key, NOT_SET);\n return v !== NOT_SET && predicate.call(context, v, key, iterable) ?\n v : notSetValue;\n };\n }\n filterSequence.__iterateUncached = function (fn, reverse) {var this$0 = this;\n var iterations = 0;\n iterable.__iterate(function(v, k, c) {\n if (predicate.call(context, v, k, c)) {\n iterations++;\n return fn(v, useKeys ? k : iterations - 1, this$0);\n }\n }, reverse);\n return iterations;\n };\n filterSequence.__iteratorUncached = function (type, reverse) {\n var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse);\n var iterations = 0;\n return new Iterator(function() {\n while (true) {\n var step = iterator.next();\n if (step.done) {\n return step;\n }\n var entry = step.value;\n var key = entry[0];\n var value = entry[1];\n if (predicate.call(context, value, key, iterable)) {\n return iteratorValue(type, useKeys ? key : iterations++, value, step);\n }\n }\n });\n }\n return filterSequence;\n }\n\n\n function countByFactory(iterable, grouper, context) {\n var groups = Map().asMutable();\n iterable.__iterate(function(v, k) {\n groups.update(\n grouper.call(context, v, k, iterable),\n 0,\n function(a ) {return a + 1}\n );\n });\n return groups.asImmutable();\n }\n\n\n function groupByFactory(iterable, grouper, context) {\n var isKeyedIter = isKeyed(iterable);\n var groups = (isOrdered(iterable) ? OrderedMap() : Map()).asMutable();\n iterable.__iterate(function(v, k) {\n groups.update(\n grouper.call(context, v, k, iterable),\n function(a ) {return (a = a || [], a.push(isKeyedIter ? [k, v] : v), a)}\n );\n });\n var coerce = iterableClass(iterable);\n return groups.map(function(arr ) {return reify(iterable, coerce(arr))});\n }\n\n\n function sliceFactory(iterable, begin, end, useKeys) {\n var originalSize = iterable.size;\n\n // Sanitize begin & end using this shorthand for ToInt32(argument)\n // http://www.ecma-international.org/ecma-262/6.0/#sec-toint32\n if (begin !== undefined) {\n begin = begin | 0;\n }\n if (end !== undefined) {\n if (end === Infinity) {\n end = originalSize;\n } else {\n end = end | 0;\n }\n }\n\n if (wholeSlice(begin, end, originalSize)) {\n return iterable;\n }\n\n var resolvedBegin = resolveBegin(begin, originalSize);\n var resolvedEnd = resolveEnd(end, originalSize);\n\n // begin or end will be NaN if they were provided as negative numbers and\n // this iterable's size is unknown. In that case, cache first so there is\n // a known size and these do not resolve to NaN.\n if (resolvedBegin !== resolvedBegin || resolvedEnd !== resolvedEnd) {\n return sliceFactory(iterable.toSeq().cacheResult(), begin, end, useKeys);\n }\n\n // Note: resolvedEnd is undefined when the original sequence's length is\n // unknown and this slice did not supply an end and should contain all\n // elements after resolvedBegin.\n // In that case, resolvedSize will be NaN and sliceSize will remain undefined.\n var resolvedSize = resolvedEnd - resolvedBegin;\n var sliceSize;\n if (resolvedSize === resolvedSize) {\n sliceSize = resolvedSize < 0 ? 0 : resolvedSize;\n }\n\n var sliceSeq = makeSequence(iterable);\n\n // If iterable.size is undefined, the size of the realized sliceSeq is\n // unknown at this point unless the number of items to slice is 0\n sliceSeq.size = sliceSize === 0 ? sliceSize : iterable.size && sliceSize || undefined;\n\n if (!useKeys && isSeq(iterable) && sliceSize >= 0) {\n sliceSeq.get = function (index, notSetValue) {\n index = wrapIndex(this, index);\n return index >= 0 && index < sliceSize ?\n iterable.get(index + resolvedBegin, notSetValue) :\n notSetValue;\n }\n }\n\n sliceSeq.__iterateUncached = function(fn, reverse) {var this$0 = this;\n if (sliceSize === 0) {\n return 0;\n }\n if (reverse) {\n return this.cacheResult().__iterate(fn, reverse);\n }\n var skipped = 0;\n var isSkipping = true;\n var iterations = 0;\n iterable.__iterate(function(v, k) {\n if (!(isSkipping && (isSkipping = skipped++ < resolvedBegin))) {\n iterations++;\n return fn(v, useKeys ? k : iterations - 1, this$0) !== false &&\n iterations !== sliceSize;\n }\n });\n return iterations;\n };\n\n sliceSeq.__iteratorUncached = function(type, reverse) {\n if (sliceSize !== 0 && reverse) {\n return this.cacheResult().__iterator(type, reverse);\n }\n // Don't bother instantiating parent iterator if taking 0.\n var iterator = sliceSize !== 0 && iterable.__iterator(type, reverse);\n var skipped = 0;\n var iterations = 0;\n return new Iterator(function() {\n while (skipped++ < resolvedBegin) {\n iterator.next();\n }\n if (++iterations > sliceSize) {\n return iteratorDone();\n }\n var step = iterator.next();\n if (useKeys || type === ITERATE_VALUES) {\n return step;\n } else if (type === ITERATE_KEYS) {\n return iteratorValue(type, iterations - 1, undefined, step);\n } else {\n return iteratorValue(type, iterations - 1, step.value[1], step);\n }\n });\n }\n\n return sliceSeq;\n }\n\n\n function takeWhileFactory(iterable, predicate, context) {\n var takeSequence = makeSequence(iterable);\n takeSequence.__iterateUncached = function(fn, reverse) {var this$0 = this;\n if (reverse) {\n return this.cacheResult().__iterate(fn, reverse);\n }\n var iterations = 0;\n iterable.__iterate(function(v, k, c) \n {return predicate.call(context, v, k, c) && ++iterations && fn(v, k, this$0)}\n );\n return iterations;\n };\n takeSequence.__iteratorUncached = function(type, reverse) {var this$0 = this;\n if (reverse) {\n return this.cacheResult().__iterator(type, reverse);\n }\n var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse);\n var iterating = true;\n return new Iterator(function() {\n if (!iterating) {\n return iteratorDone();\n }\n var step = iterator.next();\n if (step.done) {\n return step;\n }\n var entry = step.value;\n var k = entry[0];\n var v = entry[1];\n if (!predicate.call(context, v, k, this$0)) {\n iterating = false;\n return iteratorDone();\n }\n return type === ITERATE_ENTRIES ? step :\n iteratorValue(type, k, v, step);\n });\n };\n return takeSequence;\n }\n\n\n function skipWhileFactory(iterable, predicate, context, useKeys) {\n var skipSequence = makeSequence(iterable);\n skipSequence.__iterateUncached = function (fn, reverse) {var this$0 = this;\n if (reverse) {\n return this.cacheResult().__iterate(fn, reverse);\n }\n var isSkipping = true;\n var iterations = 0;\n iterable.__iterate(function(v, k, c) {\n if (!(isSkipping && (isSkipping = predicate.call(context, v, k, c)))) {\n iterations++;\n return fn(v, useKeys ? k : iterations - 1, this$0);\n }\n });\n return iterations;\n };\n skipSequence.__iteratorUncached = function(type, reverse) {var this$0 = this;\n if (reverse) {\n return this.cacheResult().__iterator(type, reverse);\n }\n var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse);\n var skipping = true;\n var iterations = 0;\n return new Iterator(function() {\n var step, k, v;\n do {\n step = iterator.next();\n if (step.done) {\n if (useKeys || type === ITERATE_VALUES) {\n return step;\n } else if (type === ITERATE_KEYS) {\n return iteratorValue(type, iterations++, undefined, step);\n } else {\n return iteratorValue(type, iterations++, step.value[1], step);\n }\n }\n var entry = step.value;\n k = entry[0];\n v = entry[1];\n skipping && (skipping = predicate.call(context, v, k, this$0));\n } while (skipping);\n return type === ITERATE_ENTRIES ? step :\n iteratorValue(type, k, v, step);\n });\n };\n return skipSequence;\n }\n\n\n function concatFactory(iterable, values) {\n var isKeyedIterable = isKeyed(iterable);\n var iters = [iterable].concat(values).map(function(v ) {\n if (!isIterable(v)) {\n v = isKeyedIterable ?\n keyedSeqFromValue(v) :\n indexedSeqFromValue(Array.isArray(v) ? v : [v]);\n } else if (isKeyedIterable) {\n v = KeyedIterable(v);\n }\n return v;\n }).filter(function(v ) {return v.size !== 0});\n\n if (iters.length === 0) {\n return iterable;\n }\n\n if (iters.length === 1) {\n var singleton = iters[0];\n if (singleton === iterable ||\n isKeyedIterable && isKeyed(singleton) ||\n isIndexed(iterable) && isIndexed(singleton)) {\n return singleton;\n }\n }\n\n var concatSeq = new ArraySeq(iters);\n if (isKeyedIterable) {\n concatSeq = concatSeq.toKeyedSeq();\n } else if (!isIndexed(iterable)) {\n concatSeq = concatSeq.toSetSeq();\n }\n concatSeq = concatSeq.flatten(true);\n concatSeq.size = iters.reduce(\n function(sum, seq) {\n if (sum !== undefined) {\n var size = seq.size;\n if (size !== undefined) {\n return sum + size;\n }\n }\n },\n 0\n );\n return concatSeq;\n }\n\n\n function flattenFactory(iterable, depth, useKeys) {\n var flatSequence = makeSequence(iterable);\n flatSequence.__iterateUncached = function(fn, reverse) {\n var iterations = 0;\n var stopped = false;\n function flatDeep(iter, currentDepth) {var this$0 = this;\n iter.__iterate(function(v, k) {\n if ((!depth || currentDepth < depth) && isIterable(v)) {\n flatDeep(v, currentDepth + 1);\n } else if (fn(v, useKeys ? k : iterations++, this$0) === false) {\n stopped = true;\n }\n return !stopped;\n }, reverse);\n }\n flatDeep(iterable, 0);\n return iterations;\n }\n flatSequence.__iteratorUncached = function(type, reverse) {\n var iterator = iterable.__iterator(type, reverse);\n var stack = [];\n var iterations = 0;\n return new Iterator(function() {\n while (iterator) {\n var step = iterator.next();\n if (step.done !== false) {\n iterator = stack.pop();\n continue;\n }\n var v = step.value;\n if (type === ITERATE_ENTRIES) {\n v = v[1];\n }\n if ((!depth || stack.length < depth) && isIterable(v)) {\n stack.push(iterator);\n iterator = v.__iterator(type, reverse);\n } else {\n return useKeys ? step : iteratorValue(type, iterations++, v, step);\n }\n }\n return iteratorDone();\n });\n }\n return flatSequence;\n }\n\n\n function flatMapFactory(iterable, mapper, context) {\n var coerce = iterableClass(iterable);\n return iterable.toSeq().map(\n function(v, k) {return coerce(mapper.call(context, v, k, iterable))}\n ).flatten(true);\n }\n\n\n function interposeFactory(iterable, separator) {\n var interposedSequence = makeSequence(iterable);\n interposedSequence.size = iterable.size && iterable.size * 2 -1;\n interposedSequence.__iterateUncached = function(fn, reverse) {var this$0 = this;\n var iterations = 0;\n iterable.__iterate(function(v, k) \n {return (!iterations || fn(separator, iterations++, this$0) !== false) &&\n fn(v, iterations++, this$0) !== false},\n reverse\n );\n return iterations;\n };\n interposedSequence.__iteratorUncached = function(type, reverse) {\n var iterator = iterable.__iterator(ITERATE_VALUES, reverse);\n var iterations = 0;\n var step;\n return new Iterator(function() {\n if (!step || iterations % 2) {\n step = iterator.next();\n if (step.done) {\n return step;\n }\n }\n return iterations % 2 ?\n iteratorValue(type, iterations++, separator) :\n iteratorValue(type, iterations++, step.value, step);\n });\n };\n return interposedSequence;\n }\n\n\n function sortFactory(iterable, comparator, mapper) {\n if (!comparator) {\n comparator = defaultComparator;\n }\n var isKeyedIterable = isKeyed(iterable);\n var index = 0;\n var entries = iterable.toSeq().map(\n function(v, k) {return [k, v, index++, mapper ? mapper(v, k, iterable) : v]}\n ).toArray();\n entries.sort(function(a, b) {return comparator(a[3], b[3]) || a[2] - b[2]}).forEach(\n isKeyedIterable ?\n function(v, i) { entries[i].length = 2; } :\n function(v, i) { entries[i] = v[1]; }\n );\n return isKeyedIterable ? KeyedSeq(entries) :\n isIndexed(iterable) ? IndexedSeq(entries) :\n SetSeq(entries);\n }\n\n\n function maxFactory(iterable, comparator, mapper) {\n if (!comparator) {\n comparator = defaultComparator;\n }\n if (mapper) {\n var entry = iterable.toSeq()\n .map(function(v, k) {return [v, mapper(v, k, iterable)]})\n .reduce(function(a, b) {return maxCompare(comparator, a[1], b[1]) ? b : a});\n return entry && entry[0];\n } else {\n return iterable.reduce(function(a, b) {return maxCompare(comparator, a, b) ? b : a});\n }\n }\n\n function maxCompare(comparator, a, b) {\n var comp = comparator(b, a);\n // b is considered the new max if the comparator declares them equal, but\n // they are not equal and b is in fact a nullish value.\n return (comp === 0 && b !== a && (b === undefined || b === null || b !== b)) || comp > 0;\n }\n\n\n function zipWithFactory(keyIter, zipper, iters) {\n var zipSequence = makeSequence(keyIter);\n zipSequence.size = new ArraySeq(iters).map(function(i ) {return i.size}).min();\n // Note: this a generic base implementation of __iterate in terms of\n // __iterator which may be more generically useful in the future.\n zipSequence.__iterate = function(fn, reverse) {\n /* generic:\n var iterator = this.__iterator(ITERATE_ENTRIES, reverse);\n var step;\n var iterations = 0;\n while (!(step = iterator.next()).done) {\n iterations++;\n if (fn(step.value[1], step.value[0], this) === false) {\n break;\n }\n }\n return iterations;\n */\n // indexed:\n var iterator = this.__iterator(ITERATE_VALUES, reverse);\n var step;\n var iterations = 0;\n while (!(step = iterator.next()).done) {\n if (fn(step.value, iterations++, this) === false) {\n break;\n }\n }\n return iterations;\n };\n zipSequence.__iteratorUncached = function(type, reverse) {\n var iterators = iters.map(function(i )\n {return (i = Iterable(i), getIterator(reverse ? i.reverse() : i))}\n );\n var iterations = 0;\n var isDone = false;\n return new Iterator(function() {\n var steps;\n if (!isDone) {\n steps = iterators.map(function(i ) {return i.next()});\n isDone = steps.some(function(s ) {return s.done});\n }\n if (isDone) {\n return iteratorDone();\n }\n return iteratorValue(\n type,\n iterations++,\n zipper.apply(null, steps.map(function(s ) {return s.value}))\n );\n });\n };\n return zipSequence\n }\n\n\n // #pragma Helper Functions\n\n function reify(iter, seq) {\n return isSeq(iter) ? seq : iter.constructor(seq);\n }\n\n function validateEntry(entry) {\n if (entry !== Object(entry)) {\n throw new TypeError('Expected [K, V] tuple: ' + entry);\n }\n }\n\n function resolveSize(iter) {\n assertNotInfinite(iter.size);\n return ensureSize(iter);\n }\n\n function iterableClass(iterable) {\n return isKeyed(iterable) ? KeyedIterable :\n isIndexed(iterable) ? IndexedIterable :\n SetIterable;\n }\n\n function makeSequence(iterable) {\n return Object.create(\n (\n isKeyed(iterable) ? KeyedSeq :\n isIndexed(iterable) ? IndexedSeq :\n SetSeq\n ).prototype\n );\n }\n\n function cacheResultThrough() {\n if (this._iter.cacheResult) {\n this._iter.cacheResult();\n this.size = this._iter.size;\n return this;\n } else {\n return Seq.prototype.cacheResult.call(this);\n }\n }\n\n function defaultComparator(a, b) {\n return a > b ? 1 : a < b ? -1 : 0;\n }\n\n function forceIterator(keyPath) {\n var iter = getIterator(keyPath);\n if (!iter) {\n // Array might not be iterable in this environment, so we need a fallback\n // to our wrapped type.\n if (!isArrayLike(keyPath)) {\n throw new TypeError('Expected iterable or array-like: ' + keyPath);\n }\n iter = getIterator(Iterable(keyPath));\n }\n return iter;\n }\n\n createClass(Record, KeyedCollection);\n\n function Record(defaultValues, name) {\n var hasInitialized;\n\n var RecordType = function Record(values) {\n if (values instanceof RecordType) {\n return values;\n }\n if (!(this instanceof RecordType)) {\n return new RecordType(values);\n }\n if (!hasInitialized) {\n hasInitialized = true;\n var keys = Object.keys(defaultValues);\n setProps(RecordTypePrototype, keys);\n RecordTypePrototype.size = keys.length;\n RecordTypePrototype._name = name;\n RecordTypePrototype._keys = keys;\n RecordTypePrototype._defaultValues = defaultValues;\n }\n this._map = Map(values);\n };\n\n var RecordTypePrototype = RecordType.prototype = Object.create(RecordPrototype);\n RecordTypePrototype.constructor = RecordType;\n\n return RecordType;\n }\n\n Record.prototype.toString = function() {\n return this.__toString(recordName(this) + ' {', '}');\n };\n\n // @pragma Access\n\n Record.prototype.has = function(k) {\n return this._defaultValues.hasOwnProperty(k);\n };\n\n Record.prototype.get = function(k, notSetValue) {\n if (!this.has(k)) {\n return notSetValue;\n }\n var defaultVal = this._defaultValues[k];\n return this._map ? this._map.get(k, defaultVal) : defaultVal;\n };\n\n // @pragma Modification\n\n Record.prototype.clear = function() {\n if (this.__ownerID) {\n this._map && this._map.clear();\n return this;\n }\n var RecordType = this.constructor;\n return RecordType._empty || (RecordType._empty = makeRecord(this, emptyMap()));\n };\n\n Record.prototype.set = function(k, v) {\n if (!this.has(k)) {\n throw new Error('Cannot set unknown key \"' + k + '\" on ' + recordName(this));\n }\n if (this._map && !this._map.has(k)) {\n var defaultVal = this._defaultValues[k];\n if (v === defaultVal) {\n return this;\n }\n }\n var newMap = this._map && this._map.set(k, v);\n if (this.__ownerID || newMap === this._map) {\n return this;\n }\n return makeRecord(this, newMap);\n };\n\n Record.prototype.remove = function(k) {\n if (!this.has(k)) {\n return this;\n }\n var newMap = this._map && this._map.remove(k);\n if (this.__ownerID || newMap === this._map) {\n return this;\n }\n return makeRecord(this, newMap);\n };\n\n Record.prototype.wasAltered = function() {\n return this._map.wasAltered();\n };\n\n Record.prototype.__iterator = function(type, reverse) {var this$0 = this;\n return KeyedIterable(this._defaultValues).map(function(_, k) {return this$0.get(k)}).__iterator(type, reverse);\n };\n\n Record.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n return KeyedIterable(this._defaultValues).map(function(_, k) {return this$0.get(k)}).__iterate(fn, reverse);\n };\n\n Record.prototype.__ensureOwner = function(ownerID) {\n if (ownerID === this.__ownerID) {\n return this;\n }\n var newMap = this._map && this._map.__ensureOwner(ownerID);\n if (!ownerID) {\n this.__ownerID = ownerID;\n this._map = newMap;\n return this;\n }\n return makeRecord(this, newMap, ownerID);\n };\n\n\n var RecordPrototype = Record.prototype;\n RecordPrototype[DELETE] = RecordPrototype.remove;\n RecordPrototype.deleteIn =\n RecordPrototype.removeIn = MapPrototype.removeIn;\n RecordPrototype.merge = MapPrototype.merge;\n RecordPrototype.mergeWith = MapPrototype.mergeWith;\n RecordPrototype.mergeIn = MapPrototype.mergeIn;\n RecordPrototype.mergeDeep = MapPrototype.mergeDeep;\n RecordPrototype.mergeDeepWith = MapPrototype.mergeDeepWith;\n RecordPrototype.mergeDeepIn = MapPrototype.mergeDeepIn;\n RecordPrototype.setIn = MapPrototype.setIn;\n RecordPrototype.update = MapPrototype.update;\n RecordPrototype.updateIn = MapPrototype.updateIn;\n RecordPrototype.withMutations = MapPrototype.withMutations;\n RecordPrototype.asMutable = MapPrototype.asMutable;\n RecordPrototype.asImmutable = MapPrototype.asImmutable;\n\n\n function makeRecord(likeRecord, map, ownerID) {\n var record = Object.create(Object.getPrototypeOf(likeRecord));\n record._map = map;\n record.__ownerID = ownerID;\n return record;\n }\n\n function recordName(record) {\n return record._name || record.constructor.name || 'Record';\n }\n\n function setProps(prototype, names) {\n try {\n names.forEach(setProp.bind(undefined, prototype));\n } catch (error) {\n // Object.defineProperty failed. Probably IE8.\n }\n }\n\n function setProp(prototype, name) {\n Object.defineProperty(prototype, name, {\n get: function() {\n return this.get(name);\n },\n set: function(value) {\n invariant(this.__ownerID, 'Cannot set on an immutable record.');\n this.set(name, value);\n }\n });\n }\n\n createClass(Set, SetCollection);\n\n // @pragma Construction\n\n function Set(value) {\n return value === null || value === undefined ? emptySet() :\n isSet(value) && !isOrdered(value) ? value :\n emptySet().withMutations(function(set ) {\n var iter = SetIterable(value);\n assertNotInfinite(iter.size);\n iter.forEach(function(v ) {return set.add(v)});\n });\n }\n\n Set.of = function(/*...values*/) {\n return this(arguments);\n };\n\n Set.fromKeys = function(value) {\n return this(KeyedIterable(value).keySeq());\n };\n\n Set.prototype.toString = function() {\n return this.__toString('Set {', '}');\n };\n\n // @pragma Access\n\n Set.prototype.has = function(value) {\n return this._map.has(value);\n };\n\n // @pragma Modification\n\n Set.prototype.add = function(value) {\n return updateSet(this, this._map.set(value, true));\n };\n\n Set.prototype.remove = function(value) {\n return updateSet(this, this._map.remove(value));\n };\n\n Set.prototype.clear = function() {\n return updateSet(this, this._map.clear());\n };\n\n // @pragma Composition\n\n Set.prototype.union = function() {var iters = SLICE$0.call(arguments, 0);\n iters = iters.filter(function(x ) {return x.size !== 0});\n if (iters.length === 0) {\n return this;\n }\n if (this.size === 0 && !this.__ownerID && iters.length === 1) {\n return this.constructor(iters[0]);\n }\n return this.withMutations(function(set ) {\n for (var ii = 0; ii < iters.length; ii++) {\n SetIterable(iters[ii]).forEach(function(value ) {return set.add(value)});\n }\n });\n };\n\n Set.prototype.intersect = function() {var iters = SLICE$0.call(arguments, 0);\n if (iters.length === 0) {\n return this;\n }\n iters = iters.map(function(iter ) {return SetIterable(iter)});\n var originalSet = this;\n return this.withMutations(function(set ) {\n originalSet.forEach(function(value ) {\n if (!iters.every(function(iter ) {return iter.includes(value)})) {\n set.remove(value);\n }\n });\n });\n };\n\n Set.prototype.subtract = function() {var iters = SLICE$0.call(arguments, 0);\n if (iters.length === 0) {\n return this;\n }\n iters = iters.map(function(iter ) {return SetIterable(iter)});\n var originalSet = this;\n return this.withMutations(function(set ) {\n originalSet.forEach(function(value ) {\n if (iters.some(function(iter ) {return iter.includes(value)})) {\n set.remove(value);\n }\n });\n });\n };\n\n Set.prototype.merge = function() {\n return this.union.apply(this, arguments);\n };\n\n Set.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n return this.union.apply(this, iters);\n };\n\n Set.prototype.sort = function(comparator) {\n // Late binding\n return OrderedSet(sortFactory(this, comparator));\n };\n\n Set.prototype.sortBy = function(mapper, comparator) {\n // Late binding\n return OrderedSet(sortFactory(this, comparator, mapper));\n };\n\n Set.prototype.wasAltered = function() {\n return this._map.wasAltered();\n };\n\n Set.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n return this._map.__iterate(function(_, k) {return fn(k, k, this$0)}, reverse);\n };\n\n Set.prototype.__iterator = function(type, reverse) {\n return this._map.map(function(_, k) {return k}).__iterator(type, reverse);\n };\n\n Set.prototype.__ensureOwner = function(ownerID) {\n if (ownerID === this.__ownerID) {\n return this;\n }\n var newMap = this._map.__ensureOwner(ownerID);\n if (!ownerID) {\n this.__ownerID = ownerID;\n this._map = newMap;\n return this;\n }\n return this.__make(newMap, ownerID);\n };\n\n\n function isSet(maybeSet) {\n return !!(maybeSet && maybeSet[IS_SET_SENTINEL]);\n }\n\n Set.isSet = isSet;\n\n var IS_SET_SENTINEL = '@@__IMMUTABLE_SET__@@';\n\n var SetPrototype = Set.prototype;\n SetPrototype[IS_SET_SENTINEL] = true;\n SetPrototype[DELETE] = SetPrototype.remove;\n SetPrototype.mergeDeep = SetPrototype.merge;\n SetPrototype.mergeDeepWith = SetPrototype.mergeWith;\n SetPrototype.withMutations = MapPrototype.withMutations;\n SetPrototype.asMutable = MapPrototype.asMutable;\n SetPrototype.asImmutable = MapPrototype.asImmutable;\n\n SetPrototype.__empty = emptySet;\n SetPrototype.__make = makeSet;\n\n function updateSet(set, newMap) {\n if (set.__ownerID) {\n set.size = newMap.size;\n set._map = newMap;\n return set;\n }\n return newMap === set._map ? set :\n newMap.size === 0 ? set.__empty() :\n set.__make(newMap);\n }\n\n function makeSet(map, ownerID) {\n var set = Object.create(SetPrototype);\n set.size = map ? map.size : 0;\n set._map = map;\n set.__ownerID = ownerID;\n return set;\n }\n\n var EMPTY_SET;\n function emptySet() {\n return EMPTY_SET || (EMPTY_SET = makeSet(emptyMap()));\n }\n\n createClass(OrderedSet, Set);\n\n // @pragma Construction\n\n function OrderedSet(value) {\n return value === null || value === undefined ? emptyOrderedSet() :\n isOrderedSet(value) ? value :\n emptyOrderedSet().withMutations(function(set ) {\n var iter = SetIterable(value);\n assertNotInfinite(iter.size);\n iter.forEach(function(v ) {return set.add(v)});\n });\n }\n\n OrderedSet.of = function(/*...values*/) {\n return this(arguments);\n };\n\n OrderedSet.fromKeys = function(value) {\n return this(KeyedIterable(value).keySeq());\n };\n\n OrderedSet.prototype.toString = function() {\n return this.__toString('OrderedSet {', '}');\n };\n\n\n function isOrderedSet(maybeOrderedSet) {\n return isSet(maybeOrderedSet) && isOrdered(maybeOrderedSet);\n }\n\n OrderedSet.isOrderedSet = isOrderedSet;\n\n var OrderedSetPrototype = OrderedSet.prototype;\n OrderedSetPrototype[IS_ORDERED_SENTINEL] = true;\n\n OrderedSetPrototype.__empty = emptyOrderedSet;\n OrderedSetPrototype.__make = makeOrderedSet;\n\n function makeOrderedSet(map, ownerID) {\n var set = Object.create(OrderedSetPrototype);\n set.size = map ? map.size : 0;\n set._map = map;\n set.__ownerID = ownerID;\n return set;\n }\n\n var EMPTY_ORDERED_SET;\n function emptyOrderedSet() {\n return EMPTY_ORDERED_SET || (EMPTY_ORDERED_SET = makeOrderedSet(emptyOrderedMap()));\n }\n\n createClass(Stack, IndexedCollection);\n\n // @pragma Construction\n\n function Stack(value) {\n return value === null || value === undefined ? emptyStack() :\n isStack(value) ? value :\n emptyStack().unshiftAll(value);\n }\n\n Stack.of = function(/*...values*/) {\n return this(arguments);\n };\n\n Stack.prototype.toString = function() {\n return this.__toString('Stack [', ']');\n };\n\n // @pragma Access\n\n Stack.prototype.get = function(index, notSetValue) {\n var head = this._head;\n index = wrapIndex(this, index);\n while (head && index--) {\n head = head.next;\n }\n return head ? head.value : notSetValue;\n };\n\n Stack.prototype.peek = function() {\n return this._head && this._head.value;\n };\n\n // @pragma Modification\n\n Stack.prototype.push = function(/*...values*/) {\n if (arguments.length === 0) {\n return this;\n }\n var newSize = this.size + arguments.length;\n var head = this._head;\n for (var ii = arguments.length - 1; ii >= 0; ii--) {\n head = {\n value: arguments[ii],\n next: head\n };\n }\n if (this.__ownerID) {\n this.size = newSize;\n this._head = head;\n this.__hash = undefined;\n this.__altered = true;\n return this;\n }\n return makeStack(newSize, head);\n };\n\n Stack.prototype.pushAll = function(iter) {\n iter = IndexedIterable(iter);\n if (iter.size === 0) {\n return this;\n }\n assertNotInfinite(iter.size);\n var newSize = this.size;\n var head = this._head;\n iter.reverse().forEach(function(value ) {\n newSize++;\n head = {\n value: value,\n next: head\n };\n });\n if (this.__ownerID) {\n this.size = newSize;\n this._head = head;\n this.__hash = undefined;\n this.__altered = true;\n return this;\n }\n return makeStack(newSize, head);\n };\n\n Stack.prototype.pop = function() {\n return this.slice(1);\n };\n\n Stack.prototype.unshift = function(/*...values*/) {\n return this.push.apply(this, arguments);\n };\n\n Stack.prototype.unshiftAll = function(iter) {\n return this.pushAll(iter);\n };\n\n Stack.prototype.shift = function() {\n return this.pop.apply(this, arguments);\n };\n\n Stack.prototype.clear = function() {\n if (this.size === 0) {\n return this;\n }\n if (this.__ownerID) {\n this.size = 0;\n this._head = undefined;\n this.__hash = undefined;\n this.__altered = true;\n return this;\n }\n return emptyStack();\n };\n\n Stack.prototype.slice = function(begin, end) {\n if (wholeSlice(begin, end, this.size)) {\n return this;\n }\n var resolvedBegin = resolveBegin(begin, this.size);\n var resolvedEnd = resolveEnd(end, this.size);\n if (resolvedEnd !== this.size) {\n // super.slice(begin, end);\n return IndexedCollection.prototype.slice.call(this, begin, end);\n }\n var newSize = this.size - resolvedBegin;\n var head = this._head;\n while (resolvedBegin--) {\n head = head.next;\n }\n if (this.__ownerID) {\n this.size = newSize;\n this._head = head;\n this.__hash = undefined;\n this.__altered = true;\n return this;\n }\n return makeStack(newSize, head);\n };\n\n // @pragma Mutability\n\n Stack.prototype.__ensureOwner = function(ownerID) {\n if (ownerID === this.__ownerID) {\n return this;\n }\n if (!ownerID) {\n this.__ownerID = ownerID;\n this.__altered = false;\n return this;\n }\n return makeStack(this.size, this._head, ownerID, this.__hash);\n };\n\n // @pragma Iteration\n\n Stack.prototype.__iterate = function(fn, reverse) {\n if (reverse) {\n return this.reverse().__iterate(fn);\n }\n var iterations = 0;\n var node = this._head;\n while (node) {\n if (fn(node.value, iterations++, this) === false) {\n break;\n }\n node = node.next;\n }\n return iterations;\n };\n\n Stack.prototype.__iterator = function(type, reverse) {\n if (reverse) {\n return this.reverse().__iterator(type);\n }\n var iterations = 0;\n var node = this._head;\n return new Iterator(function() {\n if (node) {\n var value = node.value;\n node = node.next;\n return iteratorValue(type, iterations++, value);\n }\n return iteratorDone();\n });\n };\n\n\n function isStack(maybeStack) {\n return !!(maybeStack && maybeStack[IS_STACK_SENTINEL]);\n }\n\n Stack.isStack = isStack;\n\n var IS_STACK_SENTINEL = '@@__IMMUTABLE_STACK__@@';\n\n var StackPrototype = Stack.prototype;\n StackPrototype[IS_STACK_SENTINEL] = true;\n StackPrototype.withMutations = MapPrototype.withMutations;\n StackPrototype.asMutable = MapPrototype.asMutable;\n StackPrototype.asImmutable = MapPrototype.asImmutable;\n StackPrototype.wasAltered = MapPrototype.wasAltered;\n\n\n function makeStack(size, head, ownerID, hash) {\n var map = Object.create(StackPrototype);\n map.size = size;\n map._head = head;\n map.__ownerID = ownerID;\n map.__hash = hash;\n map.__altered = false;\n return map;\n }\n\n var EMPTY_STACK;\n function emptyStack() {\n return EMPTY_STACK || (EMPTY_STACK = makeStack(0));\n }\n\n /**\n * Contributes additional methods to a constructor\n */\n function mixin(ctor, methods) {\n var keyCopier = function(key ) { ctor.prototype[key] = methods[key]; };\n Object.keys(methods).forEach(keyCopier);\n Object.getOwnPropertySymbols &&\n Object.getOwnPropertySymbols(methods).forEach(keyCopier);\n return ctor;\n }\n\n Iterable.Iterator = Iterator;\n\n mixin(Iterable, {\n\n // ### Conversion to other types\n\n toArray: function() {\n assertNotInfinite(this.size);\n var array = new Array(this.size || 0);\n this.valueSeq().__iterate(function(v, i) { array[i] = v; });\n return array;\n },\n\n toIndexedSeq: function() {\n return new ToIndexedSequence(this);\n },\n\n toJS: function() {\n return this.toSeq().map(\n function(value ) {return value && typeof value.toJS === 'function' ? value.toJS() : value}\n ).__toJS();\n },\n\n toJSON: function() {\n return this.toSeq().map(\n function(value ) {return value && typeof value.toJSON === 'function' ? value.toJSON() : value}\n ).__toJS();\n },\n\n toKeyedSeq: function() {\n return new ToKeyedSequence(this, true);\n },\n\n toMap: function() {\n // Use Late Binding here to solve the circular dependency.\n return Map(this.toKeyedSeq());\n },\n\n toObject: function() {\n assertNotInfinite(this.size);\n var object = {};\n this.__iterate(function(v, k) { object[k] = v; });\n return object;\n },\n\n toOrderedMap: function() {\n // Use Late Binding here to solve the circular dependency.\n return OrderedMap(this.toKeyedSeq());\n },\n\n toOrderedSet: function() {\n // Use Late Binding here to solve the circular dependency.\n return OrderedSet(isKeyed(this) ? this.valueSeq() : this);\n },\n\n toSet: function() {\n // Use Late Binding here to solve the circular dependency.\n return Set(isKeyed(this) ? this.valueSeq() : this);\n },\n\n toSetSeq: function() {\n return new ToSetSequence(this);\n },\n\n toSeq: function() {\n return isIndexed(this) ? this.toIndexedSeq() :\n isKeyed(this) ? this.toKeyedSeq() :\n this.toSetSeq();\n },\n\n toStack: function() {\n // Use Late Binding here to solve the circular dependency.\n return Stack(isKeyed(this) ? this.valueSeq() : this);\n },\n\n toList: function() {\n // Use Late Binding here to solve the circular dependency.\n return List(isKeyed(this) ? this.valueSeq() : this);\n },\n\n\n // ### Common JavaScript methods and properties\n\n toString: function() {\n return '[Iterable]';\n },\n\n __toString: function(head, tail) {\n if (this.size === 0) {\n return head + tail;\n }\n return head + ' ' + this.toSeq().map(this.__toStringMapper).join(', ') + ' ' + tail;\n },\n\n\n // ### ES6 Collection methods (ES6 Array and Map)\n\n concat: function() {var values = SLICE$0.call(arguments, 0);\n return reify(this, concatFactory(this, values));\n },\n\n includes: function(searchValue) {\n return this.some(function(value ) {return is(value, searchValue)});\n },\n\n entries: function() {\n return this.__iterator(ITERATE_ENTRIES);\n },\n\n every: function(predicate, context) {\n assertNotInfinite(this.size);\n var returnValue = true;\n this.__iterate(function(v, k, c) {\n if (!predicate.call(context, v, k, c)) {\n returnValue = false;\n return false;\n }\n });\n return returnValue;\n },\n\n filter: function(predicate, context) {\n return reify(this, filterFactory(this, predicate, context, true));\n },\n\n find: function(predicate, context, notSetValue) {\n var entry = this.findEntry(predicate, context);\n return entry ? entry[1] : notSetValue;\n },\n\n forEach: function(sideEffect, context) {\n assertNotInfinite(this.size);\n return this.__iterate(context ? sideEffect.bind(context) : sideEffect);\n },\n\n join: function(separator) {\n assertNotInfinite(this.size);\n separator = separator !== undefined ? '' + separator : ',';\n var joined = '';\n var isFirst = true;\n this.__iterate(function(v ) {\n isFirst ? (isFirst = false) : (joined += separator);\n joined += v !== null && v !== undefined ? v.toString() : '';\n });\n return joined;\n },\n\n keys: function() {\n return this.__iterator(ITERATE_KEYS);\n },\n\n map: function(mapper, context) {\n return reify(this, mapFactory(this, mapper, context));\n },\n\n reduce: function(reducer, initialReduction, context) {\n assertNotInfinite(this.size);\n var reduction;\n var useFirst;\n if (arguments.length < 2) {\n useFirst = true;\n } else {\n reduction = initialReduction;\n }\n this.__iterate(function(v, k, c) {\n if (useFirst) {\n useFirst = false;\n reduction = v;\n } else {\n reduction = reducer.call(context, reduction, v, k, c);\n }\n });\n return reduction;\n },\n\n reduceRight: function(reducer, initialReduction, context) {\n var reversed = this.toKeyedSeq().reverse();\n return reversed.reduce.apply(reversed, arguments);\n },\n\n reverse: function() {\n return reify(this, reverseFactory(this, true));\n },\n\n slice: function(begin, end) {\n return reify(this, sliceFactory(this, begin, end, true));\n },\n\n some: function(predicate, context) {\n return !this.every(not(predicate), context);\n },\n\n sort: function(comparator) {\n return reify(this, sortFactory(this, comparator));\n },\n\n values: function() {\n return this.__iterator(ITERATE_VALUES);\n },\n\n\n // ### More sequential methods\n\n butLast: function() {\n return this.slice(0, -1);\n },\n\n isEmpty: function() {\n return this.size !== undefined ? this.size === 0 : !this.some(function() {return true});\n },\n\n count: function(predicate, context) {\n return ensureSize(\n predicate ? this.toSeq().filter(predicate, context) : this\n );\n },\n\n countBy: function(grouper, context) {\n return countByFactory(this, grouper, context);\n },\n\n equals: function(other) {\n return deepEqual(this, other);\n },\n\n entrySeq: function() {\n var iterable = this;\n if (iterable._cache) {\n // We cache as an entries array, so we can just return the cache!\n return new ArraySeq(iterable._cache);\n }\n var entriesSequence = iterable.toSeq().map(entryMapper).toIndexedSeq();\n entriesSequence.fromEntrySeq = function() {return iterable.toSeq()};\n return entriesSequence;\n },\n\n filterNot: function(predicate, context) {\n return this.filter(not(predicate), context);\n },\n\n findEntry: function(predicate, context, notSetValue) {\n var found = notSetValue;\n this.__iterate(function(v, k, c) {\n if (predicate.call(context, v, k, c)) {\n found = [k, v];\n return false;\n }\n });\n return found;\n },\n\n findKey: function(predicate, context) {\n var entry = this.findEntry(predicate, context);\n return entry && entry[0];\n },\n\n findLast: function(predicate, context, notSetValue) {\n return this.toKeyedSeq().reverse().find(predicate, context, notSetValue);\n },\n\n findLastEntry: function(predicate, context, notSetValue) {\n return this.toKeyedSeq().reverse().findEntry(predicate, context, notSetValue);\n },\n\n findLastKey: function(predicate, context) {\n return this.toKeyedSeq().reverse().findKey(predicate, context);\n },\n\n first: function() {\n return this.find(returnTrue);\n },\n\n flatMap: function(mapper, context) {\n return reify(this, flatMapFactory(this, mapper, context));\n },\n\n flatten: function(depth) {\n return reify(this, flattenFactory(this, depth, true));\n },\n\n fromEntrySeq: function() {\n return new FromEntriesSequence(this);\n },\n\n get: function(searchKey, notSetValue) {\n return this.find(function(_, key) {return is(key, searchKey)}, undefined, notSetValue);\n },\n\n getIn: function(searchKeyPath, notSetValue) {\n var nested = this;\n // Note: in an ES6 environment, we would prefer:\n // for (var key of searchKeyPath) {\n var iter = forceIterator(searchKeyPath);\n var step;\n while (!(step = iter.next()).done) {\n var key = step.value;\n nested = nested && nested.get ? nested.get(key, NOT_SET) : NOT_SET;\n if (nested === NOT_SET) {\n return notSetValue;\n }\n }\n return nested;\n },\n\n groupBy: function(grouper, context) {\n return groupByFactory(this, grouper, context);\n },\n\n has: function(searchKey) {\n return this.get(searchKey, NOT_SET) !== NOT_SET;\n },\n\n hasIn: function(searchKeyPath) {\n return this.getIn(searchKeyPath, NOT_SET) !== NOT_SET;\n },\n\n isSubset: function(iter) {\n iter = typeof iter.includes === 'function' ? iter : Iterable(iter);\n return this.every(function(value ) {return iter.includes(value)});\n },\n\n isSuperset: function(iter) {\n iter = typeof iter.isSubset === 'function' ? iter : Iterable(iter);\n return iter.isSubset(this);\n },\n\n keyOf: function(searchValue) {\n return this.findKey(function(value ) {return is(value, searchValue)});\n },\n\n keySeq: function() {\n return this.toSeq().map(keyMapper).toIndexedSeq();\n },\n\n last: function() {\n return this.toSeq().reverse().first();\n },\n\n lastKeyOf: function(searchValue) {\n return this.toKeyedSeq().reverse().keyOf(searchValue);\n },\n\n max: function(comparator) {\n return maxFactory(this, comparator);\n },\n\n maxBy: function(mapper, comparator) {\n return maxFactory(this, comparator, mapper);\n },\n\n min: function(comparator) {\n return maxFactory(this, comparator ? neg(comparator) : defaultNegComparator);\n },\n\n minBy: function(mapper, comparator) {\n return maxFactory(this, comparator ? neg(comparator) : defaultNegComparator, mapper);\n },\n\n rest: function() {\n return this.slice(1);\n },\n\n skip: function(amount) {\n return this.slice(Math.max(0, amount));\n },\n\n skipLast: function(amount) {\n return reify(this, this.toSeq().reverse().skip(amount).reverse());\n },\n\n skipWhile: function(predicate, context) {\n return reify(this, skipWhileFactory(this, predicate, context, true));\n },\n\n skipUntil: function(predicate, context) {\n return this.skipWhile(not(predicate), context);\n },\n\n sortBy: function(mapper, comparator) {\n return reify(this, sortFactory(this, comparator, mapper));\n },\n\n take: function(amount) {\n return this.slice(0, Math.max(0, amount));\n },\n\n takeLast: function(amount) {\n return reify(this, this.toSeq().reverse().take(amount).reverse());\n },\n\n takeWhile: function(predicate, context) {\n return reify(this, takeWhileFactory(this, predicate, context));\n },\n\n takeUntil: function(predicate, context) {\n return this.takeWhile(not(predicate), context);\n },\n\n valueSeq: function() {\n return this.toIndexedSeq();\n },\n\n\n // ### Hashable Object\n\n hashCode: function() {\n return this.__hash || (this.__hash = hashIterable(this));\n }\n\n\n // ### Internal\n\n // abstract __iterate(fn, reverse)\n\n // abstract __iterator(type, reverse)\n });\n\n // var IS_ITERABLE_SENTINEL = '@@__IMMUTABLE_ITERABLE__@@';\n // var IS_KEYED_SENTINEL = '@@__IMMUTABLE_KEYED__@@';\n // var IS_INDEXED_SENTINEL = '@@__IMMUTABLE_INDEXED__@@';\n // var IS_ORDERED_SENTINEL = '@@__IMMUTABLE_ORDERED__@@';\n\n var IterablePrototype = Iterable.prototype;\n IterablePrototype[IS_ITERABLE_SENTINEL] = true;\n IterablePrototype[ITERATOR_SYMBOL] = IterablePrototype.values;\n IterablePrototype.__toJS = IterablePrototype.toArray;\n IterablePrototype.__toStringMapper = quoteString;\n IterablePrototype.inspect =\n IterablePrototype.toSource = function() { return this.toString(); };\n IterablePrototype.chain = IterablePrototype.flatMap;\n IterablePrototype.contains = IterablePrototype.includes;\n\n mixin(KeyedIterable, {\n\n // ### More sequential methods\n\n flip: function() {\n return reify(this, flipFactory(this));\n },\n\n mapEntries: function(mapper, context) {var this$0 = this;\n var iterations = 0;\n return reify(this,\n this.toSeq().map(\n function(v, k) {return mapper.call(context, [k, v], iterations++, this$0)}\n ).fromEntrySeq()\n );\n },\n\n mapKeys: function(mapper, context) {var this$0 = this;\n return reify(this,\n this.toSeq().flip().map(\n function(k, v) {return mapper.call(context, k, v, this$0)}\n ).flip()\n );\n }\n\n });\n\n var KeyedIterablePrototype = KeyedIterable.prototype;\n KeyedIterablePrototype[IS_KEYED_SENTINEL] = true;\n KeyedIterablePrototype[ITERATOR_SYMBOL] = IterablePrototype.entries;\n KeyedIterablePrototype.__toJS = IterablePrototype.toObject;\n KeyedIterablePrototype.__toStringMapper = function(v, k) {return JSON.stringify(k) + ': ' + quoteString(v)};\n\n\n\n mixin(IndexedIterable, {\n\n // ### Conversion to other types\n\n toKeyedSeq: function() {\n return new ToKeyedSequence(this, false);\n },\n\n\n // ### ES6 Collection methods (ES6 Array and Map)\n\n filter: function(predicate, context) {\n return reify(this, filterFactory(this, predicate, context, false));\n },\n\n findIndex: function(predicate, context) {\n var entry = this.findEntry(predicate, context);\n return entry ? entry[0] : -1;\n },\n\n indexOf: function(searchValue) {\n var key = this.keyOf(searchValue);\n return key === undefined ? -1 : key;\n },\n\n lastIndexOf: function(searchValue) {\n var key = this.lastKeyOf(searchValue);\n return key === undefined ? -1 : key;\n },\n\n reverse: function() {\n return reify(this, reverseFactory(this, false));\n },\n\n slice: function(begin, end) {\n return reify(this, sliceFactory(this, begin, end, false));\n },\n\n splice: function(index, removeNum /*, ...values*/) {\n var numArgs = arguments.length;\n removeNum = Math.max(removeNum | 0, 0);\n if (numArgs === 0 || (numArgs === 2 && !removeNum)) {\n return this;\n }\n // If index is negative, it should resolve relative to the size of the\n // collection. However size may be expensive to compute if not cached, so\n // only call count() if the number is in fact negative.\n index = resolveBegin(index, index < 0 ? this.count() : this.size);\n var spliced = this.slice(0, index);\n return reify(\n this,\n numArgs === 1 ?\n spliced :\n spliced.concat(arrCopy(arguments, 2), this.slice(index + removeNum))\n );\n },\n\n\n // ### More collection methods\n\n findLastIndex: function(predicate, context) {\n var entry = this.findLastEntry(predicate, context);\n return entry ? entry[0] : -1;\n },\n\n first: function() {\n return this.get(0);\n },\n\n flatten: function(depth) {\n return reify(this, flattenFactory(this, depth, false));\n },\n\n get: function(index, notSetValue) {\n index = wrapIndex(this, index);\n return (index < 0 || (this.size === Infinity ||\n (this.size !== undefined && index > this.size))) ?\n notSetValue :\n this.find(function(_, key) {return key === index}, undefined, notSetValue);\n },\n\n has: function(index) {\n index = wrapIndex(this, index);\n return index >= 0 && (this.size !== undefined ?\n this.size === Infinity || index < this.size :\n this.indexOf(index) !== -1\n );\n },\n\n interpose: function(separator) {\n return reify(this, interposeFactory(this, separator));\n },\n\n interleave: function(/*...iterables*/) {\n var iterables = [this].concat(arrCopy(arguments));\n var zipped = zipWithFactory(this.toSeq(), IndexedSeq.of, iterables);\n var interleaved = zipped.flatten(true);\n if (zipped.size) {\n interleaved.size = zipped.size * iterables.length;\n }\n return reify(this, interleaved);\n },\n\n keySeq: function() {\n return Range(0, this.size);\n },\n\n last: function() {\n return this.get(-1);\n },\n\n skipWhile: function(predicate, context) {\n return reify(this, skipWhileFactory(this, predicate, context, false));\n },\n\n zip: function(/*, ...iterables */) {\n var iterables = [this].concat(arrCopy(arguments));\n return reify(this, zipWithFactory(this, defaultZipper, iterables));\n },\n\n zipWith: function(zipper/*, ...iterables */) {\n var iterables = arrCopy(arguments);\n iterables[0] = this;\n return reify(this, zipWithFactory(this, zipper, iterables));\n }\n\n });\n\n IndexedIterable.prototype[IS_INDEXED_SENTINEL] = true;\n IndexedIterable.prototype[IS_ORDERED_SENTINEL] = true;\n\n\n\n mixin(SetIterable, {\n\n // ### ES6 Collection methods (ES6 Array and Map)\n\n get: function(value, notSetValue) {\n return this.has(value) ? value : notSetValue;\n },\n\n includes: function(value) {\n return this.has(value);\n },\n\n\n // ### More sequential methods\n\n keySeq: function() {\n return this.valueSeq();\n }\n\n });\n\n SetIterable.prototype.has = IterablePrototype.includes;\n SetIterable.prototype.contains = SetIterable.prototype.includes;\n\n\n // Mixin subclasses\n\n mixin(KeyedSeq, KeyedIterable.prototype);\n mixin(IndexedSeq, IndexedIterable.prototype);\n mixin(SetSeq, SetIterable.prototype);\n\n mixin(KeyedCollection, KeyedIterable.prototype);\n mixin(IndexedCollection, IndexedIterable.prototype);\n mixin(SetCollection, SetIterable.prototype);\n\n\n // #pragma Helper functions\n\n function keyMapper(v, k) {\n return k;\n }\n\n function entryMapper(v, k) {\n return [k, v];\n }\n\n function not(predicate) {\n return function() {\n return !predicate.apply(this, arguments);\n }\n }\n\n function neg(predicate) {\n return function() {\n return -predicate.apply(this, arguments);\n }\n }\n\n function quoteString(value) {\n return typeof value === 'string' ? JSON.stringify(value) : String(value);\n }\n\n function defaultZipper() {\n return arrCopy(arguments);\n }\n\n function defaultNegComparator(a, b) {\n return a < b ? 1 : a > b ? -1 : 0;\n }\n\n function hashIterable(iterable) {\n if (iterable.size === Infinity) {\n return 0;\n }\n var ordered = isOrdered(iterable);\n var keyed = isKeyed(iterable);\n var h = ordered ? 1 : 0;\n var size = iterable.__iterate(\n keyed ?\n ordered ?\n function(v, k) { h = 31 * h + hashMerge(hash(v), hash(k)) | 0; } :\n function(v, k) { h = h + hashMerge(hash(v), hash(k)) | 0; } :\n ordered ?\n function(v ) { h = 31 * h + hash(v) | 0; } :\n function(v ) { h = h + hash(v) | 0; }\n );\n return murmurHashOfSize(size, h);\n }\n\n function murmurHashOfSize(size, h) {\n h = imul(h, 0xCC9E2D51);\n h = imul(h << 15 | h >>> -15, 0x1B873593);\n h = imul(h << 13 | h >>> -13, 5);\n h = (h + 0xE6546B64 | 0) ^ size;\n h = imul(h ^ h >>> 16, 0x85EBCA6B);\n h = imul(h ^ h >>> 13, 0xC2B2AE35);\n h = smi(h ^ h >>> 16);\n return h;\n }\n\n function hashMerge(a, b) {\n return a ^ b + 0x9E3779B9 + (a << 6) + (a >> 2) | 0; // int\n }\n\n var Immutable = {\n\n Iterable: Iterable,\n\n Seq: Seq,\n Collection: Collection,\n Map: Map,\n OrderedMap: OrderedMap,\n List: List,\n Stack: Stack,\n Set: Set,\n OrderedSet: OrderedSet,\n\n Record: Record,\n Range: Range,\n Repeat: Repeat,\n\n is: is,\n fromJS: fromJS\n\n };\n\n return Immutable;\n\n}));//# sourceURL=[module]\n//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"file":"./node_modules/immutable/dist/immutable.js.js","sources":["webpack:///./node_modules/immutable/dist/immutable.js?24f7"],"sourcesContent":["/**\n * Copyright (c) 2014-present, Facebook, Inc.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n(function (global, factory) {\n  typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :\n  typeof define === 'function' && define.amd ? define(factory) :\n  (global.Immutable = factory());\n}(this, function () { 'use strict';var SLICE$0 = Array.prototype.slice;\n\n  function createClass(ctor, superClass) {\n    if (superClass) {\n      ctor.prototype = Object.create(superClass.prototype);\n    }\n    ctor.prototype.constructor = ctor;\n  }\n\n  function Iterable(value) {\n      return isIterable(value) ? value : Seq(value);\n    }\n\n\n  createClass(KeyedIterable, Iterable);\n    function KeyedIterable(value) {\n      return isKeyed(value) ? value : KeyedSeq(value);\n    }\n\n\n  createClass(IndexedIterable, Iterable);\n    function IndexedIterable(value) {\n      return isIndexed(value) ? value : IndexedSeq(value);\n    }\n\n\n  createClass(SetIterable, Iterable);\n    function SetIterable(value) {\n      return isIterable(value) && !isAssociative(value) ? value : SetSeq(value);\n    }\n\n\n\n  function isIterable(maybeIterable) {\n    return !!(maybeIterable && maybeIterable[IS_ITERABLE_SENTINEL]);\n  }\n\n  function isKeyed(maybeKeyed) {\n    return !!(maybeKeyed && maybeKeyed[IS_KEYED_SENTINEL]);\n  }\n\n  function isIndexed(maybeIndexed) {\n    return !!(maybeIndexed && maybeIndexed[IS_INDEXED_SENTINEL]);\n  }\n\n  function isAssociative(maybeAssociative) {\n    return isKeyed(maybeAssociative) || isIndexed(maybeAssociative);\n  }\n\n  function isOrdered(maybeOrdered) {\n    return !!(maybeOrdered && maybeOrdered[IS_ORDERED_SENTINEL]);\n  }\n\n  Iterable.isIterable = isIterable;\n  Iterable.isKeyed = isKeyed;\n  Iterable.isIndexed = isIndexed;\n  Iterable.isAssociative = isAssociative;\n  Iterable.isOrdered = isOrdered;\n\n  Iterable.Keyed = KeyedIterable;\n  Iterable.Indexed = IndexedIterable;\n  Iterable.Set = SetIterable;\n\n\n  var IS_ITERABLE_SENTINEL = '@@__IMMUTABLE_ITERABLE__@@';\n  var IS_KEYED_SENTINEL = '@@__IMMUTABLE_KEYED__@@';\n  var IS_INDEXED_SENTINEL = '@@__IMMUTABLE_INDEXED__@@';\n  var IS_ORDERED_SENTINEL = '@@__IMMUTABLE_ORDERED__@@';\n\n  // Used for setting prototype methods that IE8 chokes on.\n  var DELETE = 'delete';\n\n  // Constants describing the size of trie nodes.\n  var SHIFT = 5; // Resulted in best performance after ______?\n  var SIZE = 1 << SHIFT;\n  var MASK = SIZE - 1;\n\n  // A consistent shared value representing \"not set\" which equals nothing other\n  // than itself, and nothing that could be provided externally.\n  var NOT_SET = {};\n\n  // Boolean references, Rough equivalent of `bool &`.\n  var CHANGE_LENGTH = { value: false };\n  var DID_ALTER = { value: false };\n\n  function MakeRef(ref) {\n    ref.value = false;\n    return ref;\n  }\n\n  function SetRef(ref) {\n    ref && (ref.value = true);\n  }\n\n  // A function which returns a value representing an \"owner\" for transient writes\n  // to tries. The return value will only ever equal itself, and will not equal\n  // the return of any subsequent call of this function.\n  function OwnerID() {}\n\n  // http://jsperf.com/copy-array-inline\n  function arrCopy(arr, offset) {\n    offset = offset || 0;\n    var len = Math.max(0, arr.length - offset);\n    var newArr = new Array(len);\n    for (var ii = 0; ii < len; ii++) {\n      newArr[ii] = arr[ii + offset];\n    }\n    return newArr;\n  }\n\n  function ensureSize(iter) {\n    if (iter.size === undefined) {\n      iter.size = iter.__iterate(returnTrue);\n    }\n    return iter.size;\n  }\n\n  function wrapIndex(iter, index) {\n    // This implements \"is array index\" which the ECMAString spec defines as:\n    //\n    //     A String property name P is an array index if and only if\n    //     ToString(ToUint32(P)) is equal to P and ToUint32(P) is not equal\n    //     to 2^32−1.\n    //\n    // http://www.ecma-international.org/ecma-262/6.0/#sec-array-exotic-objects\n    if (typeof index !== 'number') {\n      var uint32Index = index >>> 0; // N >>> 0 is shorthand for ToUint32\n      if ('' + uint32Index !== index || uint32Index === 4294967295) {\n        return NaN;\n      }\n      index = uint32Index;\n    }\n    return index < 0 ? ensureSize(iter) + index : index;\n  }\n\n  function returnTrue() {\n    return true;\n  }\n\n  function wholeSlice(begin, end, size) {\n    return (begin === 0 || (size !== undefined && begin <= -size)) &&\n      (end === undefined || (size !== undefined && end >= size));\n  }\n\n  function resolveBegin(begin, size) {\n    return resolveIndex(begin, size, 0);\n  }\n\n  function resolveEnd(end, size) {\n    return resolveIndex(end, size, size);\n  }\n\n  function resolveIndex(index, size, defaultIndex) {\n    return index === undefined ?\n      defaultIndex :\n      index < 0 ?\n        Math.max(0, size + index) :\n        size === undefined ?\n          index :\n          Math.min(size, index);\n  }\n\n  /* global Symbol */\n\n  var ITERATE_KEYS = 0;\n  var ITERATE_VALUES = 1;\n  var ITERATE_ENTRIES = 2;\n\n  var REAL_ITERATOR_SYMBOL = typeof Symbol === 'function' && Symbol.iterator;\n  var FAUX_ITERATOR_SYMBOL = '@@iterator';\n\n  var ITERATOR_SYMBOL = REAL_ITERATOR_SYMBOL || FAUX_ITERATOR_SYMBOL;\n\n\n  function Iterator(next) {\n      this.next = next;\n    }\n\n    Iterator.prototype.toString = function() {\n      return '[Iterator]';\n    };\n\n\n  Iterator.KEYS = ITERATE_KEYS;\n  Iterator.VALUES = ITERATE_VALUES;\n  Iterator.ENTRIES = ITERATE_ENTRIES;\n\n  Iterator.prototype.inspect =\n  Iterator.prototype.toSource = function () { return this.toString(); }\n  Iterator.prototype[ITERATOR_SYMBOL] = function () {\n    return this;\n  };\n\n\n  function iteratorValue(type, k, v, iteratorResult) {\n    var value = type === 0 ? k : type === 1 ? v : [k, v];\n    iteratorResult ? (iteratorResult.value = value) : (iteratorResult = {\n      value: value, done: false\n    });\n    return iteratorResult;\n  }\n\n  function iteratorDone() {\n    return { value: undefined, done: true };\n  }\n\n  function hasIterator(maybeIterable) {\n    return !!getIteratorFn(maybeIterable);\n  }\n\n  function isIterator(maybeIterator) {\n    return maybeIterator && typeof maybeIterator.next === 'function';\n  }\n\n  function getIterator(iterable) {\n    var iteratorFn = getIteratorFn(iterable);\n    return iteratorFn && iteratorFn.call(iterable);\n  }\n\n  function getIteratorFn(iterable) {\n    var iteratorFn = iterable && (\n      (REAL_ITERATOR_SYMBOL && iterable[REAL_ITERATOR_SYMBOL]) ||\n      iterable[FAUX_ITERATOR_SYMBOL]\n    );\n    if (typeof iteratorFn === 'function') {\n      return iteratorFn;\n    }\n  }\n\n  function isArrayLike(value) {\n    return value && typeof value.length === 'number';\n  }\n\n  createClass(Seq, Iterable);\n    function Seq(value) {\n      return value === null || value === undefined ? emptySequence() :\n        isIterable(value) ? value.toSeq() : seqFromValue(value);\n    }\n\n    Seq.of = function(/*...values*/) {\n      return Seq(arguments);\n    };\n\n    Seq.prototype.toSeq = function() {\n      return this;\n    };\n\n    Seq.prototype.toString = function() {\n      return this.__toString('Seq {', '}');\n    };\n\n    Seq.prototype.cacheResult = function() {\n      if (!this._cache && this.__iterateUncached) {\n        this._cache = this.entrySeq().toArray();\n        this.size = this._cache.length;\n      }\n      return this;\n    };\n\n    // abstract __iterateUncached(fn, reverse)\n\n    Seq.prototype.__iterate = function(fn, reverse) {\n      return seqIterate(this, fn, reverse, true);\n    };\n\n    // abstract __iteratorUncached(type, reverse)\n\n    Seq.prototype.__iterator = function(type, reverse) {\n      return seqIterator(this, type, reverse, true);\n    };\n\n\n\n  createClass(KeyedSeq, Seq);\n    function KeyedSeq(value) {\n      return value === null || value === undefined ?\n        emptySequence().toKeyedSeq() :\n        isIterable(value) ?\n          (isKeyed(value) ? value.toSeq() : value.fromEntrySeq()) :\n          keyedSeqFromValue(value);\n    }\n\n    KeyedSeq.prototype.toKeyedSeq = function() {\n      return this;\n    };\n\n\n\n  createClass(IndexedSeq, Seq);\n    function IndexedSeq(value) {\n      return value === null || value === undefined ? emptySequence() :\n        !isIterable(value) ? indexedSeqFromValue(value) :\n        isKeyed(value) ? value.entrySeq() : value.toIndexedSeq();\n    }\n\n    IndexedSeq.of = function(/*...values*/) {\n      return IndexedSeq(arguments);\n    };\n\n    IndexedSeq.prototype.toIndexedSeq = function() {\n      return this;\n    };\n\n    IndexedSeq.prototype.toString = function() {\n      return this.__toString('Seq [', ']');\n    };\n\n    IndexedSeq.prototype.__iterate = function(fn, reverse) {\n      return seqIterate(this, fn, reverse, false);\n    };\n\n    IndexedSeq.prototype.__iterator = function(type, reverse) {\n      return seqIterator(this, type, reverse, false);\n    };\n\n\n\n  createClass(SetSeq, Seq);\n    function SetSeq(value) {\n      return (\n        value === null || value === undefined ? emptySequence() :\n        !isIterable(value) ? indexedSeqFromValue(value) :\n        isKeyed(value) ? value.entrySeq() : value\n      ).toSetSeq();\n    }\n\n    SetSeq.of = function(/*...values*/) {\n      return SetSeq(arguments);\n    };\n\n    SetSeq.prototype.toSetSeq = function() {\n      return this;\n    };\n\n\n\n  Seq.isSeq = isSeq;\n  Seq.Keyed = KeyedSeq;\n  Seq.Set = SetSeq;\n  Seq.Indexed = IndexedSeq;\n\n  var IS_SEQ_SENTINEL = '@@__IMMUTABLE_SEQ__@@';\n\n  Seq.prototype[IS_SEQ_SENTINEL] = true;\n\n\n\n  createClass(ArraySeq, IndexedSeq);\n    function ArraySeq(array) {\n      this._array = array;\n      this.size = array.length;\n    }\n\n    ArraySeq.prototype.get = function(index, notSetValue) {\n      return this.has(index) ? this._array[wrapIndex(this, index)] : notSetValue;\n    };\n\n    ArraySeq.prototype.__iterate = function(fn, reverse) {\n      var array = this._array;\n      var maxIndex = array.length - 1;\n      for (var ii = 0; ii <= maxIndex; ii++) {\n        if (fn(array[reverse ? maxIndex - ii : ii], ii, this) === false) {\n          return ii + 1;\n        }\n      }\n      return ii;\n    };\n\n    ArraySeq.prototype.__iterator = function(type, reverse) {\n      var array = this._array;\n      var maxIndex = array.length - 1;\n      var ii = 0;\n      return new Iterator(function() \n        {return ii > maxIndex ?\n          iteratorDone() :\n          iteratorValue(type, ii, array[reverse ? maxIndex - ii++ : ii++])}\n      );\n    };\n\n\n\n  createClass(ObjectSeq, KeyedSeq);\n    function ObjectSeq(object) {\n      var keys = Object.keys(object);\n      this._object = object;\n      this._keys = keys;\n      this.size = keys.length;\n    }\n\n    ObjectSeq.prototype.get = function(key, notSetValue) {\n      if (notSetValue !== undefined && !this.has(key)) {\n        return notSetValue;\n      }\n      return this._object[key];\n    };\n\n    ObjectSeq.prototype.has = function(key) {\n      return this._object.hasOwnProperty(key);\n    };\n\n    ObjectSeq.prototype.__iterate = function(fn, reverse) {\n      var object = this._object;\n      var keys = this._keys;\n      var maxIndex = keys.length - 1;\n      for (var ii = 0; ii <= maxIndex; ii++) {\n        var key = keys[reverse ? maxIndex - ii : ii];\n        if (fn(object[key], key, this) === false) {\n          return ii + 1;\n        }\n      }\n      return ii;\n    };\n\n    ObjectSeq.prototype.__iterator = function(type, reverse) {\n      var object = this._object;\n      var keys = this._keys;\n      var maxIndex = keys.length - 1;\n      var ii = 0;\n      return new Iterator(function()  {\n        var key = keys[reverse ? maxIndex - ii : ii];\n        return ii++ > maxIndex ?\n          iteratorDone() :\n          iteratorValue(type, key, object[key]);\n      });\n    };\n\n  ObjectSeq.prototype[IS_ORDERED_SENTINEL] = true;\n\n\n  createClass(IterableSeq, IndexedSeq);\n    function IterableSeq(iterable) {\n      this._iterable = iterable;\n      this.size = iterable.length || iterable.size;\n    }\n\n    IterableSeq.prototype.__iterateUncached = function(fn, reverse) {\n      if (reverse) {\n        return this.cacheResult().__iterate(fn, reverse);\n      }\n      var iterable = this._iterable;\n      var iterator = getIterator(iterable);\n      var iterations = 0;\n      if (isIterator(iterator)) {\n        var step;\n        while (!(step = iterator.next()).done) {\n          if (fn(step.value, iterations++, this) === false) {\n            break;\n          }\n        }\n      }\n      return iterations;\n    };\n\n    IterableSeq.prototype.__iteratorUncached = function(type, reverse) {\n      if (reverse) {\n        return this.cacheResult().__iterator(type, reverse);\n      }\n      var iterable = this._iterable;\n      var iterator = getIterator(iterable);\n      if (!isIterator(iterator)) {\n        return new Iterator(iteratorDone);\n      }\n      var iterations = 0;\n      return new Iterator(function()  {\n        var step = iterator.next();\n        return step.done ? step : iteratorValue(type, iterations++, step.value);\n      });\n    };\n\n\n\n  createClass(IteratorSeq, IndexedSeq);\n    function IteratorSeq(iterator) {\n      this._iterator = iterator;\n      this._iteratorCache = [];\n    }\n\n    IteratorSeq.prototype.__iterateUncached = function(fn, reverse) {\n      if (reverse) {\n        return this.cacheResult().__iterate(fn, reverse);\n      }\n      var iterator = this._iterator;\n      var cache = this._iteratorCache;\n      var iterations = 0;\n      while (iterations < cache.length) {\n        if (fn(cache[iterations], iterations++, this) === false) {\n          return iterations;\n        }\n      }\n      var step;\n      while (!(step = iterator.next()).done) {\n        var val = step.value;\n        cache[iterations] = val;\n        if (fn(val, iterations++, this) === false) {\n          break;\n        }\n      }\n      return iterations;\n    };\n\n    IteratorSeq.prototype.__iteratorUncached = function(type, reverse) {\n      if (reverse) {\n        return this.cacheResult().__iterator(type, reverse);\n      }\n      var iterator = this._iterator;\n      var cache = this._iteratorCache;\n      var iterations = 0;\n      return new Iterator(function()  {\n        if (iterations >= cache.length) {\n          var step = iterator.next();\n          if (step.done) {\n            return step;\n          }\n          cache[iterations] = step.value;\n        }\n        return iteratorValue(type, iterations, cache[iterations++]);\n      });\n    };\n\n\n\n\n  // # pragma Helper functions\n\n  function isSeq(maybeSeq) {\n    return !!(maybeSeq && maybeSeq[IS_SEQ_SENTINEL]);\n  }\n\n  var EMPTY_SEQ;\n\n  function emptySequence() {\n    return EMPTY_SEQ || (EMPTY_SEQ = new ArraySeq([]));\n  }\n\n  function keyedSeqFromValue(value) {\n    var seq =\n      Array.isArray(value) ? new ArraySeq(value).fromEntrySeq() :\n      isIterator(value) ? new IteratorSeq(value).fromEntrySeq() :\n      hasIterator(value) ? new IterableSeq(value).fromEntrySeq() :\n      typeof value === 'object' ? new ObjectSeq(value) :\n      undefined;\n    if (!seq) {\n      throw new TypeError(\n        'Expected Array or iterable object of [k, v] entries, '+\n        'or keyed object: ' + value\n      );\n    }\n    return seq;\n  }\n\n  function indexedSeqFromValue(value) {\n    var seq = maybeIndexedSeqFromValue(value);\n    if (!seq) {\n      throw new TypeError(\n        'Expected Array or iterable object of values: ' + value\n      );\n    }\n    return seq;\n  }\n\n  function seqFromValue(value) {\n    var seq = maybeIndexedSeqFromValue(value) ||\n      (typeof value === 'object' && new ObjectSeq(value));\n    if (!seq) {\n      throw new TypeError(\n        'Expected Array or iterable object of values, or keyed object: ' + value\n      );\n    }\n    return seq;\n  }\n\n  function maybeIndexedSeqFromValue(value) {\n    return (\n      isArrayLike(value) ? new ArraySeq(value) :\n      isIterator(value) ? new IteratorSeq(value) :\n      hasIterator(value) ? new IterableSeq(value) :\n      undefined\n    );\n  }\n\n  function seqIterate(seq, fn, reverse, useKeys) {\n    var cache = seq._cache;\n    if (cache) {\n      var maxIndex = cache.length - 1;\n      for (var ii = 0; ii <= maxIndex; ii++) {\n        var entry = cache[reverse ? maxIndex - ii : ii];\n        if (fn(entry[1], useKeys ? entry[0] : ii, seq) === false) {\n          return ii + 1;\n        }\n      }\n      return ii;\n    }\n    return seq.__iterateUncached(fn, reverse);\n  }\n\n  function seqIterator(seq, type, reverse, useKeys) {\n    var cache = seq._cache;\n    if (cache) {\n      var maxIndex = cache.length - 1;\n      var ii = 0;\n      return new Iterator(function()  {\n        var entry = cache[reverse ? maxIndex - ii : ii];\n        return ii++ > maxIndex ?\n          iteratorDone() :\n          iteratorValue(type, useKeys ? entry[0] : ii - 1, entry[1]);\n      });\n    }\n    return seq.__iteratorUncached(type, reverse);\n  }\n\n  function fromJS(json, converter) {\n    return converter ?\n      fromJSWith(converter, json, '', {'': json}) :\n      fromJSDefault(json);\n  }\n\n  function fromJSWith(converter, json, key, parentJSON) {\n    if (Array.isArray(json)) {\n      return converter.call(parentJSON, key, IndexedSeq(json).map(function(v, k)  {return fromJSWith(converter, v, k, json)}));\n    }\n    if (isPlainObj(json)) {\n      return converter.call(parentJSON, key, KeyedSeq(json).map(function(v, k)  {return fromJSWith(converter, v, k, json)}));\n    }\n    return json;\n  }\n\n  function fromJSDefault(json) {\n    if (Array.isArray(json)) {\n      return IndexedSeq(json).map(fromJSDefault).toList();\n    }\n    if (isPlainObj(json)) {\n      return KeyedSeq(json).map(fromJSDefault).toMap();\n    }\n    return json;\n  }\n\n  function isPlainObj(value) {\n    return value && (value.constructor === Object || value.constructor === undefined);\n  }\n\n  /**\n   * An extension of the \"same-value\" algorithm as [described for use by ES6 Map\n   * and Set](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map#Key_equality)\n   *\n   * NaN is considered the same as NaN, however -0 and 0 are considered the same\n   * value, which is different from the algorithm described by\n   * [`Object.is`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is).\n   *\n   * This is extended further to allow Objects to describe the values they\n   * represent, by way of `valueOf` or `equals` (and `hashCode`).\n   *\n   * Note: because of this extension, the key equality of Immutable.Map and the\n   * value equality of Immutable.Set will differ from ES6 Map and Set.\n   *\n   * ### Defining custom values\n   *\n   * The easiest way to describe the value an object represents is by implementing\n   * `valueOf`. For example, `Date` represents a value by returning a unix\n   * timestamp for `valueOf`:\n   *\n   *     var date1 = new Date(1234567890000); // Fri Feb 13 2009 ...\n   *     var date2 = new Date(1234567890000);\n   *     date1.valueOf(); // 1234567890000\n   *     assert( date1 !== date2 );\n   *     assert( Immutable.is( date1, date2 ) );\n   *\n   * Note: overriding `valueOf` may have other implications if you use this object\n   * where JavaScript expects a primitive, such as implicit string coercion.\n   *\n   * For more complex types, especially collections, implementing `valueOf` may\n   * not be performant. An alternative is to implement `equals` and `hashCode`.\n   *\n   * `equals` takes another object, presumably of similar type, and returns true\n   * if the it is equal. Equality is symmetrical, so the same result should be\n   * returned if this and the argument are flipped.\n   *\n   *     assert( a.equals(b) === b.equals(a) );\n   *\n   * `hashCode` returns a 32bit integer number representing the object which will\n   * be used to determine how to store the value object in a Map or Set. You must\n   * provide both or neither methods, one must not exist without the other.\n   *\n   * Also, an important relationship between these methods must be upheld: if two\n   * values are equal, they *must* return the same hashCode. If the values are not\n   * equal, they might have the same hashCode; this is called a hash collision,\n   * and while undesirable for performance reasons, it is acceptable.\n   *\n   *     if (a.equals(b)) {\n   *       assert( a.hashCode() === b.hashCode() );\n   *     }\n   *\n   * All Immutable collections implement `equals` and `hashCode`.\n   *\n   */\n  function is(valueA, valueB) {\n    if (valueA === valueB || (valueA !== valueA && valueB !== valueB)) {\n      return true;\n    }\n    if (!valueA || !valueB) {\n      return false;\n    }\n    if (typeof valueA.valueOf === 'function' &&\n        typeof valueB.valueOf === 'function') {\n      valueA = valueA.valueOf();\n      valueB = valueB.valueOf();\n      if (valueA === valueB || (valueA !== valueA && valueB !== valueB)) {\n        return true;\n      }\n      if (!valueA || !valueB) {\n        return false;\n      }\n    }\n    if (typeof valueA.equals === 'function' &&\n        typeof valueB.equals === 'function' &&\n        valueA.equals(valueB)) {\n      return true;\n    }\n    return false;\n  }\n\n  function deepEqual(a, b) {\n    if (a === b) {\n      return true;\n    }\n\n    if (\n      !isIterable(b) ||\n      a.size !== undefined && b.size !== undefined && a.size !== b.size ||\n      a.__hash !== undefined && b.__hash !== undefined && a.__hash !== b.__hash ||\n      isKeyed(a) !== isKeyed(b) ||\n      isIndexed(a) !== isIndexed(b) ||\n      isOrdered(a) !== isOrdered(b)\n    ) {\n      return false;\n    }\n\n    if (a.size === 0 && b.size === 0) {\n      return true;\n    }\n\n    var notAssociative = !isAssociative(a);\n\n    if (isOrdered(a)) {\n      var entries = a.entries();\n      return b.every(function(v, k)  {\n        var entry = entries.next().value;\n        return entry && is(entry[1], v) && (notAssociative || is(entry[0], k));\n      }) && entries.next().done;\n    }\n\n    var flipped = false;\n\n    if (a.size === undefined) {\n      if (b.size === undefined) {\n        if (typeof a.cacheResult === 'function') {\n          a.cacheResult();\n        }\n      } else {\n        flipped = true;\n        var _ = a;\n        a = b;\n        b = _;\n      }\n    }\n\n    var allEqual = true;\n    var bSize = b.__iterate(function(v, k)  {\n      if (notAssociative ? !a.has(v) :\n          flipped ? !is(v, a.get(k, NOT_SET)) : !is(a.get(k, NOT_SET), v)) {\n        allEqual = false;\n        return false;\n      }\n    });\n\n    return allEqual && a.size === bSize;\n  }\n\n  createClass(Repeat, IndexedSeq);\n\n    function Repeat(value, times) {\n      if (!(this instanceof Repeat)) {\n        return new Repeat(value, times);\n      }\n      this._value = value;\n      this.size = times === undefined ? Infinity : Math.max(0, times);\n      if (this.size === 0) {\n        if (EMPTY_REPEAT) {\n          return EMPTY_REPEAT;\n        }\n        EMPTY_REPEAT = this;\n      }\n    }\n\n    Repeat.prototype.toString = function() {\n      if (this.size === 0) {\n        return 'Repeat []';\n      }\n      return 'Repeat [ ' + this._value + ' ' + this.size + ' times ]';\n    };\n\n    Repeat.prototype.get = function(index, notSetValue) {\n      return this.has(index) ? this._value : notSetValue;\n    };\n\n    Repeat.prototype.includes = function(searchValue) {\n      return is(this._value, searchValue);\n    };\n\n    Repeat.prototype.slice = function(begin, end) {\n      var size = this.size;\n      return wholeSlice(begin, end, size) ? this :\n        new Repeat(this._value, resolveEnd(end, size) - resolveBegin(begin, size));\n    };\n\n    Repeat.prototype.reverse = function() {\n      return this;\n    };\n\n    Repeat.prototype.indexOf = function(searchValue) {\n      if (is(this._value, searchValue)) {\n        return 0;\n      }\n      return -1;\n    };\n\n    Repeat.prototype.lastIndexOf = function(searchValue) {\n      if (is(this._value, searchValue)) {\n        return this.size;\n      }\n      return -1;\n    };\n\n    Repeat.prototype.__iterate = function(fn, reverse) {\n      for (var ii = 0; ii < this.size; ii++) {\n        if (fn(this._value, ii, this) === false) {\n          return ii + 1;\n        }\n      }\n      return ii;\n    };\n\n    Repeat.prototype.__iterator = function(type, reverse) {var this$0 = this;\n      var ii = 0;\n      return new Iterator(function() \n        {return ii < this$0.size ? iteratorValue(type, ii++, this$0._value) : iteratorDone()}\n      );\n    };\n\n    Repeat.prototype.equals = function(other) {\n      return other instanceof Repeat ?\n        is(this._value, other._value) :\n        deepEqual(other);\n    };\n\n\n  var EMPTY_REPEAT;\n\n  function invariant(condition, error) {\n    if (!condition) throw new Error(error);\n  }\n\n  createClass(Range, IndexedSeq);\n\n    function Range(start, end, step) {\n      if (!(this instanceof Range)) {\n        return new Range(start, end, step);\n      }\n      invariant(step !== 0, 'Cannot step a Range by 0');\n      start = start || 0;\n      if (end === undefined) {\n        end = Infinity;\n      }\n      step = step === undefined ? 1 : Math.abs(step);\n      if (end < start) {\n        step = -step;\n      }\n      this._start = start;\n      this._end = end;\n      this._step = step;\n      this.size = Math.max(0, Math.ceil((end - start) / step - 1) + 1);\n      if (this.size === 0) {\n        if (EMPTY_RANGE) {\n          return EMPTY_RANGE;\n        }\n        EMPTY_RANGE = this;\n      }\n    }\n\n    Range.prototype.toString = function() {\n      if (this.size === 0) {\n        return 'Range []';\n      }\n      return 'Range [ ' +\n        this._start + '...' + this._end +\n        (this._step !== 1 ? ' by ' + this._step : '') +\n      ' ]';\n    };\n\n    Range.prototype.get = function(index, notSetValue) {\n      return this.has(index) ?\n        this._start + wrapIndex(this, index) * this._step :\n        notSetValue;\n    };\n\n    Range.prototype.includes = function(searchValue) {\n      var possibleIndex = (searchValue - this._start) / this._step;\n      return possibleIndex >= 0 &&\n        possibleIndex < this.size &&\n        possibleIndex === Math.floor(possibleIndex);\n    };\n\n    Range.prototype.slice = function(begin, end) {\n      if (wholeSlice(begin, end, this.size)) {\n        return this;\n      }\n      begin = resolveBegin(begin, this.size);\n      end = resolveEnd(end, this.size);\n      if (end <= begin) {\n        return new Range(0, 0);\n      }\n      return new Range(this.get(begin, this._end), this.get(end, this._end), this._step);\n    };\n\n    Range.prototype.indexOf = function(searchValue) {\n      var offsetValue = searchValue - this._start;\n      if (offsetValue % this._step === 0) {\n        var index = offsetValue / this._step;\n        if (index >= 0 && index < this.size) {\n          return index\n        }\n      }\n      return -1;\n    };\n\n    Range.prototype.lastIndexOf = function(searchValue) {\n      return this.indexOf(searchValue);\n    };\n\n    Range.prototype.__iterate = function(fn, reverse) {\n      var maxIndex = this.size - 1;\n      var step = this._step;\n      var value = reverse ? this._start + maxIndex * step : this._start;\n      for (var ii = 0; ii <= maxIndex; ii++) {\n        if (fn(value, ii, this) === false) {\n          return ii + 1;\n        }\n        value += reverse ? -step : step;\n      }\n      return ii;\n    };\n\n    Range.prototype.__iterator = function(type, reverse) {\n      var maxIndex = this.size - 1;\n      var step = this._step;\n      var value = reverse ? this._start + maxIndex * step : this._start;\n      var ii = 0;\n      return new Iterator(function()  {\n        var v = value;\n        value += reverse ? -step : step;\n        return ii > maxIndex ? iteratorDone() : iteratorValue(type, ii++, v);\n      });\n    };\n\n    Range.prototype.equals = function(other) {\n      return other instanceof Range ?\n        this._start === other._start &&\n        this._end === other._end &&\n        this._step === other._step :\n        deepEqual(this, other);\n    };\n\n\n  var EMPTY_RANGE;\n\n  createClass(Collection, Iterable);\n    function Collection() {\n      throw TypeError('Abstract');\n    }\n\n\n  createClass(KeyedCollection, Collection);function KeyedCollection() {}\n\n  createClass(IndexedCollection, Collection);function IndexedCollection() {}\n\n  createClass(SetCollection, Collection);function SetCollection() {}\n\n\n  Collection.Keyed = KeyedCollection;\n  Collection.Indexed = IndexedCollection;\n  Collection.Set = SetCollection;\n\n  var imul =\n    typeof Math.imul === 'function' && Math.imul(0xffffffff, 2) === -2 ?\n    Math.imul :\n    function imul(a, b) {\n      a = a | 0; // int\n      b = b | 0; // int\n      var c = a & 0xffff;\n      var d = b & 0xffff;\n      // Shift by 0 fixes the sign on the high part.\n      return (c * d) + ((((a >>> 16) * d + c * (b >>> 16)) << 16) >>> 0) | 0; // int\n    };\n\n  // v8 has an optimization for storing 31-bit signed numbers.\n  // Values which have either 00 or 11 as the high order bits qualify.\n  // This function drops the highest order bit in a signed number, maintaining\n  // the sign bit.\n  function smi(i32) {\n    return ((i32 >>> 1) & 0x40000000) | (i32 & 0xBFFFFFFF);\n  }\n\n  function hash(o) {\n    if (o === false || o === null || o === undefined) {\n      return 0;\n    }\n    if (typeof o.valueOf === 'function') {\n      o = o.valueOf();\n      if (o === false || o === null || o === undefined) {\n        return 0;\n      }\n    }\n    if (o === true) {\n      return 1;\n    }\n    var type = typeof o;\n    if (type === 'number') {\n      if (o !== o || o === Infinity) {\n        return 0;\n      }\n      var h = o | 0;\n      if (h !== o) {\n        h ^= o * 0xFFFFFFFF;\n      }\n      while (o > 0xFFFFFFFF) {\n        o /= 0xFFFFFFFF;\n        h ^= o;\n      }\n      return smi(h);\n    }\n    if (type === 'string') {\n      return o.length > STRING_HASH_CACHE_MIN_STRLEN ? cachedHashString(o) : hashString(o);\n    }\n    if (typeof o.hashCode === 'function') {\n      return o.hashCode();\n    }\n    if (type === 'object') {\n      return hashJSObj(o);\n    }\n    if (typeof o.toString === 'function') {\n      return hashString(o.toString());\n    }\n    throw new Error('Value type ' + type + ' cannot be hashed.');\n  }\n\n  function cachedHashString(string) {\n    var hash = stringHashCache[string];\n    if (hash === undefined) {\n      hash = hashString(string);\n      if (STRING_HASH_CACHE_SIZE === STRING_HASH_CACHE_MAX_SIZE) {\n        STRING_HASH_CACHE_SIZE = 0;\n        stringHashCache = {};\n      }\n      STRING_HASH_CACHE_SIZE++;\n      stringHashCache[string] = hash;\n    }\n    return hash;\n  }\n\n  // http://jsperf.com/hashing-strings\n  function hashString(string) {\n    // This is the hash from JVM\n    // The hash code for a string is computed as\n    // s[0] * 31 ^ (n - 1) + s[1] * 31 ^ (n - 2) + ... + s[n - 1],\n    // where s[i] is the ith character of the string and n is the length of\n    // the string. We \"mod\" the result to make it between 0 (inclusive) and 2^31\n    // (exclusive) by dropping high bits.\n    var hash = 0;\n    for (var ii = 0; ii < string.length; ii++) {\n      hash = 31 * hash + string.charCodeAt(ii) | 0;\n    }\n    return smi(hash);\n  }\n\n  function hashJSObj(obj) {\n    var hash;\n    if (usingWeakMap) {\n      hash = weakMap.get(obj);\n      if (hash !== undefined) {\n        return hash;\n      }\n    }\n\n    hash = obj[UID_HASH_KEY];\n    if (hash !== undefined) {\n      return hash;\n    }\n\n    if (!canDefineProperty) {\n      hash = obj.propertyIsEnumerable && obj.propertyIsEnumerable[UID_HASH_KEY];\n      if (hash !== undefined) {\n        return hash;\n      }\n\n      hash = getIENodeHash(obj);\n      if (hash !== undefined) {\n        return hash;\n      }\n    }\n\n    hash = ++objHashUID;\n    if (objHashUID & 0x40000000) {\n      objHashUID = 0;\n    }\n\n    if (usingWeakMap) {\n      weakMap.set(obj, hash);\n    } else if (isExtensible !== undefined && isExtensible(obj) === false) {\n      throw new Error('Non-extensible objects are not allowed as keys.');\n    } else if (canDefineProperty) {\n      Object.defineProperty(obj, UID_HASH_KEY, {\n        'enumerable': false,\n        'configurable': false,\n        'writable': false,\n        'value': hash\n      });\n    } else if (obj.propertyIsEnumerable !== undefined &&\n               obj.propertyIsEnumerable === obj.constructor.prototype.propertyIsEnumerable) {\n      // Since we can't define a non-enumerable property on the object\n      // we'll hijack one of the less-used non-enumerable properties to\n      // save our hash on it. Since this is a function it will not show up in\n      // `JSON.stringify` which is what we want.\n      obj.propertyIsEnumerable = function() {\n        return this.constructor.prototype.propertyIsEnumerable.apply(this, arguments);\n      };\n      obj.propertyIsEnumerable[UID_HASH_KEY] = hash;\n    } else if (obj.nodeType !== undefined) {\n      // At this point we couldn't get the IE `uniqueID` to use as a hash\n      // and we couldn't use a non-enumerable property to exploit the\n      // dontEnum bug so we simply add the `UID_HASH_KEY` on the node\n      // itself.\n      obj[UID_HASH_KEY] = hash;\n    } else {\n      throw new Error('Unable to set a non-enumerable property on object.');\n    }\n\n    return hash;\n  }\n\n  // Get references to ES5 object methods.\n  var isExtensible = Object.isExtensible;\n\n  // True if Object.defineProperty works as expected. IE8 fails this test.\n  var canDefineProperty = (function() {\n    try {\n      Object.defineProperty({}, '@', {});\n      return true;\n    } catch (e) {\n      return false;\n    }\n  }());\n\n  // IE has a `uniqueID` property on DOM nodes. We can construct the hash from it\n  // and avoid memory leaks from the IE cloneNode bug.\n  function getIENodeHash(node) {\n    if (node && node.nodeType > 0) {\n      switch (node.nodeType) {\n        case 1: // Element\n          return node.uniqueID;\n        case 9: // Document\n          return node.documentElement && node.documentElement.uniqueID;\n      }\n    }\n  }\n\n  // If possible, use a WeakMap.\n  var usingWeakMap = typeof WeakMap === 'function';\n  var weakMap;\n  if (usingWeakMap) {\n    weakMap = new WeakMap();\n  }\n\n  var objHashUID = 0;\n\n  var UID_HASH_KEY = '__immutablehash__';\n  if (typeof Symbol === 'function') {\n    UID_HASH_KEY = Symbol(UID_HASH_KEY);\n  }\n\n  var STRING_HASH_CACHE_MIN_STRLEN = 16;\n  var STRING_HASH_CACHE_MAX_SIZE = 255;\n  var STRING_HASH_CACHE_SIZE = 0;\n  var stringHashCache = {};\n\n  function assertNotInfinite(size) {\n    invariant(\n      size !== Infinity,\n      'Cannot perform this action with an infinite size.'\n    );\n  }\n\n  createClass(Map, KeyedCollection);\n\n    // @pragma Construction\n\n    function Map(value) {\n      return value === null || value === undefined ? emptyMap() :\n        isMap(value) && !isOrdered(value) ? value :\n        emptyMap().withMutations(function(map ) {\n          var iter = KeyedIterable(value);\n          assertNotInfinite(iter.size);\n          iter.forEach(function(v, k)  {return map.set(k, v)});\n        });\n    }\n\n    Map.of = function() {var keyValues = SLICE$0.call(arguments, 0);\n      return emptyMap().withMutations(function(map ) {\n        for (var i = 0; i < keyValues.length; i += 2) {\n          if (i + 1 >= keyValues.length) {\n            throw new Error('Missing value for key: ' + keyValues[i]);\n          }\n          map.set(keyValues[i], keyValues[i + 1]);\n        }\n      });\n    };\n\n    Map.prototype.toString = function() {\n      return this.__toString('Map {', '}');\n    };\n\n    // @pragma Access\n\n    Map.prototype.get = function(k, notSetValue) {\n      return this._root ?\n        this._root.get(0, undefined, k, notSetValue) :\n        notSetValue;\n    };\n\n    // @pragma Modification\n\n    Map.prototype.set = function(k, v) {\n      return updateMap(this, k, v);\n    };\n\n    Map.prototype.setIn = function(keyPath, v) {\n      return this.updateIn(keyPath, NOT_SET, function()  {return v});\n    };\n\n    Map.prototype.remove = function(k) {\n      return updateMap(this, k, NOT_SET);\n    };\n\n    Map.prototype.deleteIn = function(keyPath) {\n      return this.updateIn(keyPath, function()  {return NOT_SET});\n    };\n\n    Map.prototype.update = function(k, notSetValue, updater) {\n      return arguments.length === 1 ?\n        k(this) :\n        this.updateIn([k], notSetValue, updater);\n    };\n\n    Map.prototype.updateIn = function(keyPath, notSetValue, updater) {\n      if (!updater) {\n        updater = notSetValue;\n        notSetValue = undefined;\n      }\n      var updatedValue = updateInDeepMap(\n        this,\n        forceIterator(keyPath),\n        notSetValue,\n        updater\n      );\n      return updatedValue === NOT_SET ? undefined : updatedValue;\n    };\n\n    Map.prototype.clear = function() {\n      if (this.size === 0) {\n        return this;\n      }\n      if (this.__ownerID) {\n        this.size = 0;\n        this._root = null;\n        this.__hash = undefined;\n        this.__altered = true;\n        return this;\n      }\n      return emptyMap();\n    };\n\n    // @pragma Composition\n\n    Map.prototype.merge = function(/*...iters*/) {\n      return mergeIntoMapWith(this, undefined, arguments);\n    };\n\n    Map.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n      return mergeIntoMapWith(this, merger, iters);\n    };\n\n    Map.prototype.mergeIn = function(keyPath) {var iters = SLICE$0.call(arguments, 1);\n      return this.updateIn(\n        keyPath,\n        emptyMap(),\n        function(m ) {return typeof m.merge === 'function' ?\n          m.merge.apply(m, iters) :\n          iters[iters.length - 1]}\n      );\n    };\n\n    Map.prototype.mergeDeep = function(/*...iters*/) {\n      return mergeIntoMapWith(this, deepMerger, arguments);\n    };\n\n    Map.prototype.mergeDeepWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n      return mergeIntoMapWith(this, deepMergerWith(merger), iters);\n    };\n\n    Map.prototype.mergeDeepIn = function(keyPath) {var iters = SLICE$0.call(arguments, 1);\n      return this.updateIn(\n        keyPath,\n        emptyMap(),\n        function(m ) {return typeof m.mergeDeep === 'function' ?\n          m.mergeDeep.apply(m, iters) :\n          iters[iters.length - 1]}\n      );\n    };\n\n    Map.prototype.sort = function(comparator) {\n      // Late binding\n      return OrderedMap(sortFactory(this, comparator));\n    };\n\n    Map.prototype.sortBy = function(mapper, comparator) {\n      // Late binding\n      return OrderedMap(sortFactory(this, comparator, mapper));\n    };\n\n    // @pragma Mutability\n\n    Map.prototype.withMutations = function(fn) {\n      var mutable = this.asMutable();\n      fn(mutable);\n      return mutable.wasAltered() ? mutable.__ensureOwner(this.__ownerID) : this;\n    };\n\n    Map.prototype.asMutable = function() {\n      return this.__ownerID ? this : this.__ensureOwner(new OwnerID());\n    };\n\n    Map.prototype.asImmutable = function() {\n      return this.__ensureOwner();\n    };\n\n    Map.prototype.wasAltered = function() {\n      return this.__altered;\n    };\n\n    Map.prototype.__iterator = function(type, reverse) {\n      return new MapIterator(this, type, reverse);\n    };\n\n    Map.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n      var iterations = 0;\n      this._root && this._root.iterate(function(entry ) {\n        iterations++;\n        return fn(entry[1], entry[0], this$0);\n      }, reverse);\n      return iterations;\n    };\n\n    Map.prototype.__ensureOwner = function(ownerID) {\n      if (ownerID === this.__ownerID) {\n        return this;\n      }\n      if (!ownerID) {\n        this.__ownerID = ownerID;\n        this.__altered = false;\n        return this;\n      }\n      return makeMap(this.size, this._root, ownerID, this.__hash);\n    };\n\n\n  function isMap(maybeMap) {\n    return !!(maybeMap && maybeMap[IS_MAP_SENTINEL]);\n  }\n\n  Map.isMap = isMap;\n\n  var IS_MAP_SENTINEL = '@@__IMMUTABLE_MAP__@@';\n\n  var MapPrototype = Map.prototype;\n  MapPrototype[IS_MAP_SENTINEL] = true;\n  MapPrototype[DELETE] = MapPrototype.remove;\n  MapPrototype.removeIn = MapPrototype.deleteIn;\n\n\n  // #pragma Trie Nodes\n\n\n\n    function ArrayMapNode(ownerID, entries) {\n      this.ownerID = ownerID;\n      this.entries = entries;\n    }\n\n    ArrayMapNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n      var entries = this.entries;\n      for (var ii = 0, len = entries.length; ii < len; ii++) {\n        if (is(key, entries[ii][0])) {\n          return entries[ii][1];\n        }\n      }\n      return notSetValue;\n    };\n\n    ArrayMapNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n      var removed = value === NOT_SET;\n\n      var entries = this.entries;\n      var idx = 0;\n      for (var len = entries.length; idx < len; idx++) {\n        if (is(key, entries[idx][0])) {\n          break;\n        }\n      }\n      var exists = idx < len;\n\n      if (exists ? entries[idx][1] === value : removed) {\n        return this;\n      }\n\n      SetRef(didAlter);\n      (removed || !exists) && SetRef(didChangeSize);\n\n      if (removed && entries.length === 1) {\n        return; // undefined\n      }\n\n      if (!exists && !removed && entries.length >= MAX_ARRAY_MAP_SIZE) {\n        return createNodes(ownerID, entries, key, value);\n      }\n\n      var isEditable = ownerID && ownerID === this.ownerID;\n      var newEntries = isEditable ? entries : arrCopy(entries);\n\n      if (exists) {\n        if (removed) {\n          idx === len - 1 ? newEntries.pop() : (newEntries[idx] = newEntries.pop());\n        } else {\n          newEntries[idx] = [key, value];\n        }\n      } else {\n        newEntries.push([key, value]);\n      }\n\n      if (isEditable) {\n        this.entries = newEntries;\n        return this;\n      }\n\n      return new ArrayMapNode(ownerID, newEntries);\n    };\n\n\n\n\n    function BitmapIndexedNode(ownerID, bitmap, nodes) {\n      this.ownerID = ownerID;\n      this.bitmap = bitmap;\n      this.nodes = nodes;\n    }\n\n    BitmapIndexedNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n      if (keyHash === undefined) {\n        keyHash = hash(key);\n      }\n      var bit = (1 << ((shift === 0 ? keyHash : keyHash >>> shift) & MASK));\n      var bitmap = this.bitmap;\n      return (bitmap & bit) === 0 ? notSetValue :\n        this.nodes[popCount(bitmap & (bit - 1))].get(shift + SHIFT, keyHash, key, notSetValue);\n    };\n\n    BitmapIndexedNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n      if (keyHash === undefined) {\n        keyHash = hash(key);\n      }\n      var keyHashFrag = (shift === 0 ? keyHash : keyHash >>> shift) & MASK;\n      var bit = 1 << keyHashFrag;\n      var bitmap = this.bitmap;\n      var exists = (bitmap & bit) !== 0;\n\n      if (!exists && value === NOT_SET) {\n        return this;\n      }\n\n      var idx = popCount(bitmap & (bit - 1));\n      var nodes = this.nodes;\n      var node = exists ? nodes[idx] : undefined;\n      var newNode = updateNode(node, ownerID, shift + SHIFT, keyHash, key, value, didChangeSize, didAlter);\n\n      if (newNode === node) {\n        return this;\n      }\n\n      if (!exists && newNode && nodes.length >= MAX_BITMAP_INDEXED_SIZE) {\n        return expandNodes(ownerID, nodes, bitmap, keyHashFrag, newNode);\n      }\n\n      if (exists && !newNode && nodes.length === 2 && isLeafNode(nodes[idx ^ 1])) {\n        return nodes[idx ^ 1];\n      }\n\n      if (exists && newNode && nodes.length === 1 && isLeafNode(newNode)) {\n        return newNode;\n      }\n\n      var isEditable = ownerID && ownerID === this.ownerID;\n      var newBitmap = exists ? newNode ? bitmap : bitmap ^ bit : bitmap | bit;\n      var newNodes = exists ? newNode ?\n        setIn(nodes, idx, newNode, isEditable) :\n        spliceOut(nodes, idx, isEditable) :\n        spliceIn(nodes, idx, newNode, isEditable);\n\n      if (isEditable) {\n        this.bitmap = newBitmap;\n        this.nodes = newNodes;\n        return this;\n      }\n\n      return new BitmapIndexedNode(ownerID, newBitmap, newNodes);\n    };\n\n\n\n\n    function HashArrayMapNode(ownerID, count, nodes) {\n      this.ownerID = ownerID;\n      this.count = count;\n      this.nodes = nodes;\n    }\n\n    HashArrayMapNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n      if (keyHash === undefined) {\n        keyHash = hash(key);\n      }\n      var idx = (shift === 0 ? keyHash : keyHash >>> shift) & MASK;\n      var node = this.nodes[idx];\n      return node ? node.get(shift + SHIFT, keyHash, key, notSetValue) : notSetValue;\n    };\n\n    HashArrayMapNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n      if (keyHash === undefined) {\n        keyHash = hash(key);\n      }\n      var idx = (shift === 0 ? keyHash : keyHash >>> shift) & MASK;\n      var removed = value === NOT_SET;\n      var nodes = this.nodes;\n      var node = nodes[idx];\n\n      if (removed && !node) {\n        return this;\n      }\n\n      var newNode = updateNode(node, ownerID, shift + SHIFT, keyHash, key, value, didChangeSize, didAlter);\n      if (newNode === node) {\n        return this;\n      }\n\n      var newCount = this.count;\n      if (!node) {\n        newCount++;\n      } else if (!newNode) {\n        newCount--;\n        if (newCount < MIN_HASH_ARRAY_MAP_SIZE) {\n          return packNodes(ownerID, nodes, newCount, idx);\n        }\n      }\n\n      var isEditable = ownerID && ownerID === this.ownerID;\n      var newNodes = setIn(nodes, idx, newNode, isEditable);\n\n      if (isEditable) {\n        this.count = newCount;\n        this.nodes = newNodes;\n        return this;\n      }\n\n      return new HashArrayMapNode(ownerID, newCount, newNodes);\n    };\n\n\n\n\n    function HashCollisionNode(ownerID, keyHash, entries) {\n      this.ownerID = ownerID;\n      this.keyHash = keyHash;\n      this.entries = entries;\n    }\n\n    HashCollisionNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n      var entries = this.entries;\n      for (var ii = 0, len = entries.length; ii < len; ii++) {\n        if (is(key, entries[ii][0])) {\n          return entries[ii][1];\n        }\n      }\n      return notSetValue;\n    };\n\n    HashCollisionNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n      if (keyHash === undefined) {\n        keyHash = hash(key);\n      }\n\n      var removed = value === NOT_SET;\n\n      if (keyHash !== this.keyHash) {\n        if (removed) {\n          return this;\n        }\n        SetRef(didAlter);\n        SetRef(didChangeSize);\n        return mergeIntoNode(this, ownerID, shift, keyHash, [key, value]);\n      }\n\n      var entries = this.entries;\n      var idx = 0;\n      for (var len = entries.length; idx < len; idx++) {\n        if (is(key, entries[idx][0])) {\n          break;\n        }\n      }\n      var exists = idx < len;\n\n      if (exists ? entries[idx][1] === value : removed) {\n        return this;\n      }\n\n      SetRef(didAlter);\n      (removed || !exists) && SetRef(didChangeSize);\n\n      if (removed && len === 2) {\n        return new ValueNode(ownerID, this.keyHash, entries[idx ^ 1]);\n      }\n\n      var isEditable = ownerID && ownerID === this.ownerID;\n      var newEntries = isEditable ? entries : arrCopy(entries);\n\n      if (exists) {\n        if (removed) {\n          idx === len - 1 ? newEntries.pop() : (newEntries[idx] = newEntries.pop());\n        } else {\n          newEntries[idx] = [key, value];\n        }\n      } else {\n        newEntries.push([key, value]);\n      }\n\n      if (isEditable) {\n        this.entries = newEntries;\n        return this;\n      }\n\n      return new HashCollisionNode(ownerID, this.keyHash, newEntries);\n    };\n\n\n\n\n    function ValueNode(ownerID, keyHash, entry) {\n      this.ownerID = ownerID;\n      this.keyHash = keyHash;\n      this.entry = entry;\n    }\n\n    ValueNode.prototype.get = function(shift, keyHash, key, notSetValue) {\n      return is(key, this.entry[0]) ? this.entry[1] : notSetValue;\n    };\n\n    ValueNode.prototype.update = function(ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n      var removed = value === NOT_SET;\n      var keyMatch = is(key, this.entry[0]);\n      if (keyMatch ? value === this.entry[1] : removed) {\n        return this;\n      }\n\n      SetRef(didAlter);\n\n      if (removed) {\n        SetRef(didChangeSize);\n        return; // undefined\n      }\n\n      if (keyMatch) {\n        if (ownerID && ownerID === this.ownerID) {\n          this.entry[1] = value;\n          return this;\n        }\n        return new ValueNode(ownerID, this.keyHash, [key, value]);\n      }\n\n      SetRef(didChangeSize);\n      return mergeIntoNode(this, ownerID, shift, hash(key), [key, value]);\n    };\n\n\n\n  // #pragma Iterators\n\n  ArrayMapNode.prototype.iterate =\n  HashCollisionNode.prototype.iterate = function (fn, reverse) {\n    var entries = this.entries;\n    for (var ii = 0, maxIndex = entries.length - 1; ii <= maxIndex; ii++) {\n      if (fn(entries[reverse ? maxIndex - ii : ii]) === false) {\n        return false;\n      }\n    }\n  }\n\n  BitmapIndexedNode.prototype.iterate =\n  HashArrayMapNode.prototype.iterate = function (fn, reverse) {\n    var nodes = this.nodes;\n    for (var ii = 0, maxIndex = nodes.length - 1; ii <= maxIndex; ii++) {\n      var node = nodes[reverse ? maxIndex - ii : ii];\n      if (node && node.iterate(fn, reverse) === false) {\n        return false;\n      }\n    }\n  }\n\n  ValueNode.prototype.iterate = function (fn, reverse) {\n    return fn(this.entry);\n  }\n\n  createClass(MapIterator, Iterator);\n\n    function MapIterator(map, type, reverse) {\n      this._type = type;\n      this._reverse = reverse;\n      this._stack = map._root && mapIteratorFrame(map._root);\n    }\n\n    MapIterator.prototype.next = function() {\n      var type = this._type;\n      var stack = this._stack;\n      while (stack) {\n        var node = stack.node;\n        var index = stack.index++;\n        var maxIndex;\n        if (node.entry) {\n          if (index === 0) {\n            return mapIteratorValue(type, node.entry);\n          }\n        } else if (node.entries) {\n          maxIndex = node.entries.length - 1;\n          if (index <= maxIndex) {\n            return mapIteratorValue(type, node.entries[this._reverse ? maxIndex - index : index]);\n          }\n        } else {\n          maxIndex = node.nodes.length - 1;\n          if (index <= maxIndex) {\n            var subNode = node.nodes[this._reverse ? maxIndex - index : index];\n            if (subNode) {\n              if (subNode.entry) {\n                return mapIteratorValue(type, subNode.entry);\n              }\n              stack = this._stack = mapIteratorFrame(subNode, stack);\n            }\n            continue;\n          }\n        }\n        stack = this._stack = this._stack.__prev;\n      }\n      return iteratorDone();\n    };\n\n\n  function mapIteratorValue(type, entry) {\n    return iteratorValue(type, entry[0], entry[1]);\n  }\n\n  function mapIteratorFrame(node, prev) {\n    return {\n      node: node,\n      index: 0,\n      __prev: prev\n    };\n  }\n\n  function makeMap(size, root, ownerID, hash) {\n    var map = Object.create(MapPrototype);\n    map.size = size;\n    map._root = root;\n    map.__ownerID = ownerID;\n    map.__hash = hash;\n    map.__altered = false;\n    return map;\n  }\n\n  var EMPTY_MAP;\n  function emptyMap() {\n    return EMPTY_MAP || (EMPTY_MAP = makeMap(0));\n  }\n\n  function updateMap(map, k, v) {\n    var newRoot;\n    var newSize;\n    if (!map._root) {\n      if (v === NOT_SET) {\n        return map;\n      }\n      newSize = 1;\n      newRoot = new ArrayMapNode(map.__ownerID, [[k, v]]);\n    } else {\n      var didChangeSize = MakeRef(CHANGE_LENGTH);\n      var didAlter = MakeRef(DID_ALTER);\n      newRoot = updateNode(map._root, map.__ownerID, 0, undefined, k, v, didChangeSize, didAlter);\n      if (!didAlter.value) {\n        return map;\n      }\n      newSize = map.size + (didChangeSize.value ? v === NOT_SET ? -1 : 1 : 0);\n    }\n    if (map.__ownerID) {\n      map.size = newSize;\n      map._root = newRoot;\n      map.__hash = undefined;\n      map.__altered = true;\n      return map;\n    }\n    return newRoot ? makeMap(newSize, newRoot) : emptyMap();\n  }\n\n  function updateNode(node, ownerID, shift, keyHash, key, value, didChangeSize, didAlter) {\n    if (!node) {\n      if (value === NOT_SET) {\n        return node;\n      }\n      SetRef(didAlter);\n      SetRef(didChangeSize);\n      return new ValueNode(ownerID, keyHash, [key, value]);\n    }\n    return node.update(ownerID, shift, keyHash, key, value, didChangeSize, didAlter);\n  }\n\n  function isLeafNode(node) {\n    return node.constructor === ValueNode || node.constructor === HashCollisionNode;\n  }\n\n  function mergeIntoNode(node, ownerID, shift, keyHash, entry) {\n    if (node.keyHash === keyHash) {\n      return new HashCollisionNode(ownerID, keyHash, [node.entry, entry]);\n    }\n\n    var idx1 = (shift === 0 ? node.keyHash : node.keyHash >>> shift) & MASK;\n    var idx2 = (shift === 0 ? keyHash : keyHash >>> shift) & MASK;\n\n    var newNode;\n    var nodes = idx1 === idx2 ?\n      [mergeIntoNode(node, ownerID, shift + SHIFT, keyHash, entry)] :\n      ((newNode = new ValueNode(ownerID, keyHash, entry)), idx1 < idx2 ? [node, newNode] : [newNode, node]);\n\n    return new BitmapIndexedNode(ownerID, (1 << idx1) | (1 << idx2), nodes);\n  }\n\n  function createNodes(ownerID, entries, key, value) {\n    if (!ownerID) {\n      ownerID = new OwnerID();\n    }\n    var node = new ValueNode(ownerID, hash(key), [key, value]);\n    for (var ii = 0; ii < entries.length; ii++) {\n      var entry = entries[ii];\n      node = node.update(ownerID, 0, undefined, entry[0], entry[1]);\n    }\n    return node;\n  }\n\n  function packNodes(ownerID, nodes, count, excluding) {\n    var bitmap = 0;\n    var packedII = 0;\n    var packedNodes = new Array(count);\n    for (var ii = 0, bit = 1, len = nodes.length; ii < len; ii++, bit <<= 1) {\n      var node = nodes[ii];\n      if (node !== undefined && ii !== excluding) {\n        bitmap |= bit;\n        packedNodes[packedII++] = node;\n      }\n    }\n    return new BitmapIndexedNode(ownerID, bitmap, packedNodes);\n  }\n\n  function expandNodes(ownerID, nodes, bitmap, including, node) {\n    var count = 0;\n    var expandedNodes = new Array(SIZE);\n    for (var ii = 0; bitmap !== 0; ii++, bitmap >>>= 1) {\n      expandedNodes[ii] = bitmap & 1 ? nodes[count++] : undefined;\n    }\n    expandedNodes[including] = node;\n    return new HashArrayMapNode(ownerID, count + 1, expandedNodes);\n  }\n\n  function mergeIntoMapWith(map, merger, iterables) {\n    var iters = [];\n    for (var ii = 0; ii < iterables.length; ii++) {\n      var value = iterables[ii];\n      var iter = KeyedIterable(value);\n      if (!isIterable(value)) {\n        iter = iter.map(function(v ) {return fromJS(v)});\n      }\n      iters.push(iter);\n    }\n    return mergeIntoCollectionWith(map, merger, iters);\n  }\n\n  function deepMerger(existing, value, key) {\n    return existing && existing.mergeDeep && isIterable(value) ?\n      existing.mergeDeep(value) :\n      is(existing, value) ? existing : value;\n  }\n\n  function deepMergerWith(merger) {\n    return function(existing, value, key)  {\n      if (existing && existing.mergeDeepWith && isIterable(value)) {\n        return existing.mergeDeepWith(merger, value);\n      }\n      var nextValue = merger(existing, value, key);\n      return is(existing, nextValue) ? existing : nextValue;\n    };\n  }\n\n  function mergeIntoCollectionWith(collection, merger, iters) {\n    iters = iters.filter(function(x ) {return x.size !== 0});\n    if (iters.length === 0) {\n      return collection;\n    }\n    if (collection.size === 0 && !collection.__ownerID && iters.length === 1) {\n      return collection.constructor(iters[0]);\n    }\n    return collection.withMutations(function(collection ) {\n      var mergeIntoMap = merger ?\n        function(value, key)  {\n          collection.update(key, NOT_SET, function(existing )\n            {return existing === NOT_SET ? value : merger(existing, value, key)}\n          );\n        } :\n        function(value, key)  {\n          collection.set(key, value);\n        }\n      for (var ii = 0; ii < iters.length; ii++) {\n        iters[ii].forEach(mergeIntoMap);\n      }\n    });\n  }\n\n  function updateInDeepMap(existing, keyPathIter, notSetValue, updater) {\n    var isNotSet = existing === NOT_SET;\n    var step = keyPathIter.next();\n    if (step.done) {\n      var existingValue = isNotSet ? notSetValue : existing;\n      var newValue = updater(existingValue);\n      return newValue === existingValue ? existing : newValue;\n    }\n    invariant(\n      isNotSet || (existing && existing.set),\n      'invalid keyPath'\n    );\n    var key = step.value;\n    var nextExisting = isNotSet ? NOT_SET : existing.get(key, NOT_SET);\n    var nextUpdated = updateInDeepMap(\n      nextExisting,\n      keyPathIter,\n      notSetValue,\n      updater\n    );\n    return nextUpdated === nextExisting ? existing :\n      nextUpdated === NOT_SET ? existing.remove(key) :\n      (isNotSet ? emptyMap() : existing).set(key, nextUpdated);\n  }\n\n  function popCount(x) {\n    x = x - ((x >> 1) & 0x55555555);\n    x = (x & 0x33333333) + ((x >> 2) & 0x33333333);\n    x = (x + (x >> 4)) & 0x0f0f0f0f;\n    x = x + (x >> 8);\n    x = x + (x >> 16);\n    return x & 0x7f;\n  }\n\n  function setIn(array, idx, val, canEdit) {\n    var newArray = canEdit ? array : arrCopy(array);\n    newArray[idx] = val;\n    return newArray;\n  }\n\n  function spliceIn(array, idx, val, canEdit) {\n    var newLen = array.length + 1;\n    if (canEdit && idx + 1 === newLen) {\n      array[idx] = val;\n      return array;\n    }\n    var newArray = new Array(newLen);\n    var after = 0;\n    for (var ii = 0; ii < newLen; ii++) {\n      if (ii === idx) {\n        newArray[ii] = val;\n        after = -1;\n      } else {\n        newArray[ii] = array[ii + after];\n      }\n    }\n    return newArray;\n  }\n\n  function spliceOut(array, idx, canEdit) {\n    var newLen = array.length - 1;\n    if (canEdit && idx === newLen) {\n      array.pop();\n      return array;\n    }\n    var newArray = new Array(newLen);\n    var after = 0;\n    for (var ii = 0; ii < newLen; ii++) {\n      if (ii === idx) {\n        after = 1;\n      }\n      newArray[ii] = array[ii + after];\n    }\n    return newArray;\n  }\n\n  var MAX_ARRAY_MAP_SIZE = SIZE / 4;\n  var MAX_BITMAP_INDEXED_SIZE = SIZE / 2;\n  var MIN_HASH_ARRAY_MAP_SIZE = SIZE / 4;\n\n  createClass(List, IndexedCollection);\n\n    // @pragma Construction\n\n    function List(value) {\n      var empty = emptyList();\n      if (value === null || value === undefined) {\n        return empty;\n      }\n      if (isList(value)) {\n        return value;\n      }\n      var iter = IndexedIterable(value);\n      var size = iter.size;\n      if (size === 0) {\n        return empty;\n      }\n      assertNotInfinite(size);\n      if (size > 0 && size < SIZE) {\n        return makeList(0, size, SHIFT, null, new VNode(iter.toArray()));\n      }\n      return empty.withMutations(function(list ) {\n        list.setSize(size);\n        iter.forEach(function(v, i)  {return list.set(i, v)});\n      });\n    }\n\n    List.of = function(/*...values*/) {\n      return this(arguments);\n    };\n\n    List.prototype.toString = function() {\n      return this.__toString('List [', ']');\n    };\n\n    // @pragma Access\n\n    List.prototype.get = function(index, notSetValue) {\n      index = wrapIndex(this, index);\n      if (index >= 0 && index < this.size) {\n        index += this._origin;\n        var node = listNodeFor(this, index);\n        return node && node.array[index & MASK];\n      }\n      return notSetValue;\n    };\n\n    // @pragma Modification\n\n    List.prototype.set = function(index, value) {\n      return updateList(this, index, value);\n    };\n\n    List.prototype.remove = function(index) {\n      return !this.has(index) ? this :\n        index === 0 ? this.shift() :\n        index === this.size - 1 ? this.pop() :\n        this.splice(index, 1);\n    };\n\n    List.prototype.insert = function(index, value) {\n      return this.splice(index, 0, value);\n    };\n\n    List.prototype.clear = function() {\n      if (this.size === 0) {\n        return this;\n      }\n      if (this.__ownerID) {\n        this.size = this._origin = this._capacity = 0;\n        this._level = SHIFT;\n        this._root = this._tail = null;\n        this.__hash = undefined;\n        this.__altered = true;\n        return this;\n      }\n      return emptyList();\n    };\n\n    List.prototype.push = function(/*...values*/) {\n      var values = arguments;\n      var oldSize = this.size;\n      return this.withMutations(function(list ) {\n        setListBounds(list, 0, oldSize + values.length);\n        for (var ii = 0; ii < values.length; ii++) {\n          list.set(oldSize + ii, values[ii]);\n        }\n      });\n    };\n\n    List.prototype.pop = function() {\n      return setListBounds(this, 0, -1);\n    };\n\n    List.prototype.unshift = function(/*...values*/) {\n      var values = arguments;\n      return this.withMutations(function(list ) {\n        setListBounds(list, -values.length);\n        for (var ii = 0; ii < values.length; ii++) {\n          list.set(ii, values[ii]);\n        }\n      });\n    };\n\n    List.prototype.shift = function() {\n      return setListBounds(this, 1);\n    };\n\n    // @pragma Composition\n\n    List.prototype.merge = function(/*...iters*/) {\n      return mergeIntoListWith(this, undefined, arguments);\n    };\n\n    List.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n      return mergeIntoListWith(this, merger, iters);\n    };\n\n    List.prototype.mergeDeep = function(/*...iters*/) {\n      return mergeIntoListWith(this, deepMerger, arguments);\n    };\n\n    List.prototype.mergeDeepWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n      return mergeIntoListWith(this, deepMergerWith(merger), iters);\n    };\n\n    List.prototype.setSize = function(size) {\n      return setListBounds(this, 0, size);\n    };\n\n    // @pragma Iteration\n\n    List.prototype.slice = function(begin, end) {\n      var size = this.size;\n      if (wholeSlice(begin, end, size)) {\n        return this;\n      }\n      return setListBounds(\n        this,\n        resolveBegin(begin, size),\n        resolveEnd(end, size)\n      );\n    };\n\n    List.prototype.__iterator = function(type, reverse) {\n      var index = 0;\n      var values = iterateList(this, reverse);\n      return new Iterator(function()  {\n        var value = values();\n        return value === DONE ?\n          iteratorDone() :\n          iteratorValue(type, index++, value);\n      });\n    };\n\n    List.prototype.__iterate = function(fn, reverse) {\n      var index = 0;\n      var values = iterateList(this, reverse);\n      var value;\n      while ((value = values()) !== DONE) {\n        if (fn(value, index++, this) === false) {\n          break;\n        }\n      }\n      return index;\n    };\n\n    List.prototype.__ensureOwner = function(ownerID) {\n      if (ownerID === this.__ownerID) {\n        return this;\n      }\n      if (!ownerID) {\n        this.__ownerID = ownerID;\n        return this;\n      }\n      return makeList(this._origin, this._capacity, this._level, this._root, this._tail, ownerID, this.__hash);\n    };\n\n\n  function isList(maybeList) {\n    return !!(maybeList && maybeList[IS_LIST_SENTINEL]);\n  }\n\n  List.isList = isList;\n\n  var IS_LIST_SENTINEL = '@@__IMMUTABLE_LIST__@@';\n\n  var ListPrototype = List.prototype;\n  ListPrototype[IS_LIST_SENTINEL] = true;\n  ListPrototype[DELETE] = ListPrototype.remove;\n  ListPrototype.setIn = MapPrototype.setIn;\n  ListPrototype.deleteIn =\n  ListPrototype.removeIn = MapPrototype.removeIn;\n  ListPrototype.update = MapPrototype.update;\n  ListPrototype.updateIn = MapPrototype.updateIn;\n  ListPrototype.mergeIn = MapPrototype.mergeIn;\n  ListPrototype.mergeDeepIn = MapPrototype.mergeDeepIn;\n  ListPrototype.withMutations = MapPrototype.withMutations;\n  ListPrototype.asMutable = MapPrototype.asMutable;\n  ListPrototype.asImmutable = MapPrototype.asImmutable;\n  ListPrototype.wasAltered = MapPrototype.wasAltered;\n\n\n\n    function VNode(array, ownerID) {\n      this.array = array;\n      this.ownerID = ownerID;\n    }\n\n    // TODO: seems like these methods are very similar\n\n    VNode.prototype.removeBefore = function(ownerID, level, index) {\n      if (index === level ? 1 << level : 0 || this.array.length === 0) {\n        return this;\n      }\n      var originIndex = (index >>> level) & MASK;\n      if (originIndex >= this.array.length) {\n        return new VNode([], ownerID);\n      }\n      var removingFirst = originIndex === 0;\n      var newChild;\n      if (level > 0) {\n        var oldChild = this.array[originIndex];\n        newChild = oldChild && oldChild.removeBefore(ownerID, level - SHIFT, index);\n        if (newChild === oldChild && removingFirst) {\n          return this;\n        }\n      }\n      if (removingFirst && !newChild) {\n        return this;\n      }\n      var editable = editableVNode(this, ownerID);\n      if (!removingFirst) {\n        for (var ii = 0; ii < originIndex; ii++) {\n          editable.array[ii] = undefined;\n        }\n      }\n      if (newChild) {\n        editable.array[originIndex] = newChild;\n      }\n      return editable;\n    };\n\n    VNode.prototype.removeAfter = function(ownerID, level, index) {\n      if (index === (level ? 1 << level : 0) || this.array.length === 0) {\n        return this;\n      }\n      var sizeIndex = ((index - 1) >>> level) & MASK;\n      if (sizeIndex >= this.array.length) {\n        return this;\n      }\n\n      var newChild;\n      if (level > 0) {\n        var oldChild = this.array[sizeIndex];\n        newChild = oldChild && oldChild.removeAfter(ownerID, level - SHIFT, index);\n        if (newChild === oldChild && sizeIndex === this.array.length - 1) {\n          return this;\n        }\n      }\n\n      var editable = editableVNode(this, ownerID);\n      editable.array.splice(sizeIndex + 1);\n      if (newChild) {\n        editable.array[sizeIndex] = newChild;\n      }\n      return editable;\n    };\n\n\n\n  var DONE = {};\n\n  function iterateList(list, reverse) {\n    var left = list._origin;\n    var right = list._capacity;\n    var tailPos = getTailOffset(right);\n    var tail = list._tail;\n\n    return iterateNodeOrLeaf(list._root, list._level, 0);\n\n    function iterateNodeOrLeaf(node, level, offset) {\n      return level === 0 ?\n        iterateLeaf(node, offset) :\n        iterateNode(node, level, offset);\n    }\n\n    function iterateLeaf(node, offset) {\n      var array = offset === tailPos ? tail && tail.array : node && node.array;\n      var from = offset > left ? 0 : left - offset;\n      var to = right - offset;\n      if (to > SIZE) {\n        to = SIZE;\n      }\n      return function()  {\n        if (from === to) {\n          return DONE;\n        }\n        var idx = reverse ? --to : from++;\n        return array && array[idx];\n      };\n    }\n\n    function iterateNode(node, level, offset) {\n      var values;\n      var array = node && node.array;\n      var from = offset > left ? 0 : (left - offset) >> level;\n      var to = ((right - offset) >> level) + 1;\n      if (to > SIZE) {\n        to = SIZE;\n      }\n      return function()  {\n        do {\n          if (values) {\n            var value = values();\n            if (value !== DONE) {\n              return value;\n            }\n            values = null;\n          }\n          if (from === to) {\n            return DONE;\n          }\n          var idx = reverse ? --to : from++;\n          values = iterateNodeOrLeaf(\n            array && array[idx], level - SHIFT, offset + (idx << level)\n          );\n        } while (true);\n      };\n    }\n  }\n\n  function makeList(origin, capacity, level, root, tail, ownerID, hash) {\n    var list = Object.create(ListPrototype);\n    list.size = capacity - origin;\n    list._origin = origin;\n    list._capacity = capacity;\n    list._level = level;\n    list._root = root;\n    list._tail = tail;\n    list.__ownerID = ownerID;\n    list.__hash = hash;\n    list.__altered = false;\n    return list;\n  }\n\n  var EMPTY_LIST;\n  function emptyList() {\n    return EMPTY_LIST || (EMPTY_LIST = makeList(0, 0, SHIFT));\n  }\n\n  function updateList(list, index, value) {\n    index = wrapIndex(list, index);\n\n    if (index !== index) {\n      return list;\n    }\n\n    if (index >= list.size || index < 0) {\n      return list.withMutations(function(list ) {\n        index < 0 ?\n          setListBounds(list, index).set(0, value) :\n          setListBounds(list, 0, index + 1).set(index, value)\n      });\n    }\n\n    index += list._origin;\n\n    var newTail = list._tail;\n    var newRoot = list._root;\n    var didAlter = MakeRef(DID_ALTER);\n    if (index >= getTailOffset(list._capacity)) {\n      newTail = updateVNode(newTail, list.__ownerID, 0, index, value, didAlter);\n    } else {\n      newRoot = updateVNode(newRoot, list.__ownerID, list._level, index, value, didAlter);\n    }\n\n    if (!didAlter.value) {\n      return list;\n    }\n\n    if (list.__ownerID) {\n      list._root = newRoot;\n      list._tail = newTail;\n      list.__hash = undefined;\n      list.__altered = true;\n      return list;\n    }\n    return makeList(list._origin, list._capacity, list._level, newRoot, newTail);\n  }\n\n  function updateVNode(node, ownerID, level, index, value, didAlter) {\n    var idx = (index >>> level) & MASK;\n    var nodeHas = node && idx < node.array.length;\n    if (!nodeHas && value === undefined) {\n      return node;\n    }\n\n    var newNode;\n\n    if (level > 0) {\n      var lowerNode = node && node.array[idx];\n      var newLowerNode = updateVNode(lowerNode, ownerID, level - SHIFT, index, value, didAlter);\n      if (newLowerNode === lowerNode) {\n        return node;\n      }\n      newNode = editableVNode(node, ownerID);\n      newNode.array[idx] = newLowerNode;\n      return newNode;\n    }\n\n    if (nodeHas && node.array[idx] === value) {\n      return node;\n    }\n\n    SetRef(didAlter);\n\n    newNode = editableVNode(node, ownerID);\n    if (value === undefined && idx === newNode.array.length - 1) {\n      newNode.array.pop();\n    } else {\n      newNode.array[idx] = value;\n    }\n    return newNode;\n  }\n\n  function editableVNode(node, ownerID) {\n    if (ownerID && node && ownerID === node.ownerID) {\n      return node;\n    }\n    return new VNode(node ? node.array.slice() : [], ownerID);\n  }\n\n  function listNodeFor(list, rawIndex) {\n    if (rawIndex >= getTailOffset(list._capacity)) {\n      return list._tail;\n    }\n    if (rawIndex < 1 << (list._level + SHIFT)) {\n      var node = list._root;\n      var level = list._level;\n      while (node && level > 0) {\n        node = node.array[(rawIndex >>> level) & MASK];\n        level -= SHIFT;\n      }\n      return node;\n    }\n  }\n\n  function setListBounds(list, begin, end) {\n    // Sanitize begin & end using this shorthand for ToInt32(argument)\n    // http://www.ecma-international.org/ecma-262/6.0/#sec-toint32\n    if (begin !== undefined) {\n      begin = begin | 0;\n    }\n    if (end !== undefined) {\n      end = end | 0;\n    }\n    var owner = list.__ownerID || new OwnerID();\n    var oldOrigin = list._origin;\n    var oldCapacity = list._capacity;\n    var newOrigin = oldOrigin + begin;\n    var newCapacity = end === undefined ? oldCapacity : end < 0 ? oldCapacity + end : oldOrigin + end;\n    if (newOrigin === oldOrigin && newCapacity === oldCapacity) {\n      return list;\n    }\n\n    // If it's going to end after it starts, it's empty.\n    if (newOrigin >= newCapacity) {\n      return list.clear();\n    }\n\n    var newLevel = list._level;\n    var newRoot = list._root;\n\n    // New origin might need creating a higher root.\n    var offsetShift = 0;\n    while (newOrigin + offsetShift < 0) {\n      newRoot = new VNode(newRoot && newRoot.array.length ? [undefined, newRoot] : [], owner);\n      newLevel += SHIFT;\n      offsetShift += 1 << newLevel;\n    }\n    if (offsetShift) {\n      newOrigin += offsetShift;\n      oldOrigin += offsetShift;\n      newCapacity += offsetShift;\n      oldCapacity += offsetShift;\n    }\n\n    var oldTailOffset = getTailOffset(oldCapacity);\n    var newTailOffset = getTailOffset(newCapacity);\n\n    // New size might need creating a higher root.\n    while (newTailOffset >= 1 << (newLevel + SHIFT)) {\n      newRoot = new VNode(newRoot && newRoot.array.length ? [newRoot] : [], owner);\n      newLevel += SHIFT;\n    }\n\n    // Locate or create the new tail.\n    var oldTail = list._tail;\n    var newTail = newTailOffset < oldTailOffset ?\n      listNodeFor(list, newCapacity - 1) :\n      newTailOffset > oldTailOffset ? new VNode([], owner) : oldTail;\n\n    // Merge Tail into tree.\n    if (oldTail && newTailOffset > oldTailOffset && newOrigin < oldCapacity && oldTail.array.length) {\n      newRoot = editableVNode(newRoot, owner);\n      var node = newRoot;\n      for (var level = newLevel; level > SHIFT; level -= SHIFT) {\n        var idx = (oldTailOffset >>> level) & MASK;\n        node = node.array[idx] = editableVNode(node.array[idx], owner);\n      }\n      node.array[(oldTailOffset >>> SHIFT) & MASK] = oldTail;\n    }\n\n    // If the size has been reduced, there's a chance the tail needs to be trimmed.\n    if (newCapacity < oldCapacity) {\n      newTail = newTail && newTail.removeAfter(owner, 0, newCapacity);\n    }\n\n    // If the new origin is within the tail, then we do not need a root.\n    if (newOrigin >= newTailOffset) {\n      newOrigin -= newTailOffset;\n      newCapacity -= newTailOffset;\n      newLevel = SHIFT;\n      newRoot = null;\n      newTail = newTail && newTail.removeBefore(owner, 0, newOrigin);\n\n    // Otherwise, if the root has been trimmed, garbage collect.\n    } else if (newOrigin > oldOrigin || newTailOffset < oldTailOffset) {\n      offsetShift = 0;\n\n      // Identify the new top root node of the subtree of the old root.\n      while (newRoot) {\n        var beginIndex = (newOrigin >>> newLevel) & MASK;\n        if (beginIndex !== (newTailOffset >>> newLevel) & MASK) {\n          break;\n        }\n        if (beginIndex) {\n          offsetShift += (1 << newLevel) * beginIndex;\n        }\n        newLevel -= SHIFT;\n        newRoot = newRoot.array[beginIndex];\n      }\n\n      // Trim the new sides of the new root.\n      if (newRoot && newOrigin > oldOrigin) {\n        newRoot = newRoot.removeBefore(owner, newLevel, newOrigin - offsetShift);\n      }\n      if (newRoot && newTailOffset < oldTailOffset) {\n        newRoot = newRoot.removeAfter(owner, newLevel, newTailOffset - offsetShift);\n      }\n      if (offsetShift) {\n        newOrigin -= offsetShift;\n        newCapacity -= offsetShift;\n      }\n    }\n\n    if (list.__ownerID) {\n      list.size = newCapacity - newOrigin;\n      list._origin = newOrigin;\n      list._capacity = newCapacity;\n      list._level = newLevel;\n      list._root = newRoot;\n      list._tail = newTail;\n      list.__hash = undefined;\n      list.__altered = true;\n      return list;\n    }\n    return makeList(newOrigin, newCapacity, newLevel, newRoot, newTail);\n  }\n\n  function mergeIntoListWith(list, merger, iterables) {\n    var iters = [];\n    var maxSize = 0;\n    for (var ii = 0; ii < iterables.length; ii++) {\n      var value = iterables[ii];\n      var iter = IndexedIterable(value);\n      if (iter.size > maxSize) {\n        maxSize = iter.size;\n      }\n      if (!isIterable(value)) {\n        iter = iter.map(function(v ) {return fromJS(v)});\n      }\n      iters.push(iter);\n    }\n    if (maxSize > list.size) {\n      list = list.setSize(maxSize);\n    }\n    return mergeIntoCollectionWith(list, merger, iters);\n  }\n\n  function getTailOffset(size) {\n    return size < SIZE ? 0 : (((size - 1) >>> SHIFT) << SHIFT);\n  }\n\n  createClass(OrderedMap, Map);\n\n    // @pragma Construction\n\n    function OrderedMap(value) {\n      return value === null || value === undefined ? emptyOrderedMap() :\n        isOrderedMap(value) ? value :\n        emptyOrderedMap().withMutations(function(map ) {\n          var iter = KeyedIterable(value);\n          assertNotInfinite(iter.size);\n          iter.forEach(function(v, k)  {return map.set(k, v)});\n        });\n    }\n\n    OrderedMap.of = function(/*...values*/) {\n      return this(arguments);\n    };\n\n    OrderedMap.prototype.toString = function() {\n      return this.__toString('OrderedMap {', '}');\n    };\n\n    // @pragma Access\n\n    OrderedMap.prototype.get = function(k, notSetValue) {\n      var index = this._map.get(k);\n      return index !== undefined ? this._list.get(index)[1] : notSetValue;\n    };\n\n    // @pragma Modification\n\n    OrderedMap.prototype.clear = function() {\n      if (this.size === 0) {\n        return this;\n      }\n      if (this.__ownerID) {\n        this.size = 0;\n        this._map.clear();\n        this._list.clear();\n        return this;\n      }\n      return emptyOrderedMap();\n    };\n\n    OrderedMap.prototype.set = function(k, v) {\n      return updateOrderedMap(this, k, v);\n    };\n\n    OrderedMap.prototype.remove = function(k) {\n      return updateOrderedMap(this, k, NOT_SET);\n    };\n\n    OrderedMap.prototype.wasAltered = function() {\n      return this._map.wasAltered() || this._list.wasAltered();\n    };\n\n    OrderedMap.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n      return this._list.__iterate(\n        function(entry ) {return entry && fn(entry[1], entry[0], this$0)},\n        reverse\n      );\n    };\n\n    OrderedMap.prototype.__iterator = function(type, reverse) {\n      return this._list.fromEntrySeq().__iterator(type, reverse);\n    };\n\n    OrderedMap.prototype.__ensureOwner = function(ownerID) {\n      if (ownerID === this.__ownerID) {\n        return this;\n      }\n      var newMap = this._map.__ensureOwner(ownerID);\n      var newList = this._list.__ensureOwner(ownerID);\n      if (!ownerID) {\n        this.__ownerID = ownerID;\n        this._map = newMap;\n        this._list = newList;\n        return this;\n      }\n      return makeOrderedMap(newMap, newList, ownerID, this.__hash);\n    };\n\n\n  function isOrderedMap(maybeOrderedMap) {\n    return isMap(maybeOrderedMap) && isOrdered(maybeOrderedMap);\n  }\n\n  OrderedMap.isOrderedMap = isOrderedMap;\n\n  OrderedMap.prototype[IS_ORDERED_SENTINEL] = true;\n  OrderedMap.prototype[DELETE] = OrderedMap.prototype.remove;\n\n\n\n  function makeOrderedMap(map, list, ownerID, hash) {\n    var omap = Object.create(OrderedMap.prototype);\n    omap.size = map ? map.size : 0;\n    omap._map = map;\n    omap._list = list;\n    omap.__ownerID = ownerID;\n    omap.__hash = hash;\n    return omap;\n  }\n\n  var EMPTY_ORDERED_MAP;\n  function emptyOrderedMap() {\n    return EMPTY_ORDERED_MAP || (EMPTY_ORDERED_MAP = makeOrderedMap(emptyMap(), emptyList()));\n  }\n\n  function updateOrderedMap(omap, k, v) {\n    var map = omap._map;\n    var list = omap._list;\n    var i = map.get(k);\n    var has = i !== undefined;\n    var newMap;\n    var newList;\n    if (v === NOT_SET) { // removed\n      if (!has) {\n        return omap;\n      }\n      if (list.size >= SIZE && list.size >= map.size * 2) {\n        newList = list.filter(function(entry, idx)  {return entry !== undefined && i !== idx});\n        newMap = newList.toKeyedSeq().map(function(entry ) {return entry[0]}).flip().toMap();\n        if (omap.__ownerID) {\n          newMap.__ownerID = newList.__ownerID = omap.__ownerID;\n        }\n      } else {\n        newMap = map.remove(k);\n        newList = i === list.size - 1 ? list.pop() : list.set(i, undefined);\n      }\n    } else {\n      if (has) {\n        if (v === list.get(i)[1]) {\n          return omap;\n        }\n        newMap = map;\n        newList = list.set(i, [k, v]);\n      } else {\n        newMap = map.set(k, list.size);\n        newList = list.set(list.size, [k, v]);\n      }\n    }\n    if (omap.__ownerID) {\n      omap.size = newMap.size;\n      omap._map = newMap;\n      omap._list = newList;\n      omap.__hash = undefined;\n      return omap;\n    }\n    return makeOrderedMap(newMap, newList);\n  }\n\n  createClass(ToKeyedSequence, KeyedSeq);\n    function ToKeyedSequence(indexed, useKeys) {\n      this._iter = indexed;\n      this._useKeys = useKeys;\n      this.size = indexed.size;\n    }\n\n    ToKeyedSequence.prototype.get = function(key, notSetValue) {\n      return this._iter.get(key, notSetValue);\n    };\n\n    ToKeyedSequence.prototype.has = function(key) {\n      return this._iter.has(key);\n    };\n\n    ToKeyedSequence.prototype.valueSeq = function() {\n      return this._iter.valueSeq();\n    };\n\n    ToKeyedSequence.prototype.reverse = function() {var this$0 = this;\n      var reversedSequence = reverseFactory(this, true);\n      if (!this._useKeys) {\n        reversedSequence.valueSeq = function()  {return this$0._iter.toSeq().reverse()};\n      }\n      return reversedSequence;\n    };\n\n    ToKeyedSequence.prototype.map = function(mapper, context) {var this$0 = this;\n      var mappedSequence = mapFactory(this, mapper, context);\n      if (!this._useKeys) {\n        mappedSequence.valueSeq = function()  {return this$0._iter.toSeq().map(mapper, context)};\n      }\n      return mappedSequence;\n    };\n\n    ToKeyedSequence.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n      var ii;\n      return this._iter.__iterate(\n        this._useKeys ?\n          function(v, k)  {return fn(v, k, this$0)} :\n          ((ii = reverse ? resolveSize(this) : 0),\n            function(v ) {return fn(v, reverse ? --ii : ii++, this$0)}),\n        reverse\n      );\n    };\n\n    ToKeyedSequence.prototype.__iterator = function(type, reverse) {\n      if (this._useKeys) {\n        return this._iter.__iterator(type, reverse);\n      }\n      var iterator = this._iter.__iterator(ITERATE_VALUES, reverse);\n      var ii = reverse ? resolveSize(this) : 0;\n      return new Iterator(function()  {\n        var step = iterator.next();\n        return step.done ? step :\n          iteratorValue(type, reverse ? --ii : ii++, step.value, step);\n      });\n    };\n\n  ToKeyedSequence.prototype[IS_ORDERED_SENTINEL] = true;\n\n\n  createClass(ToIndexedSequence, IndexedSeq);\n    function ToIndexedSequence(iter) {\n      this._iter = iter;\n      this.size = iter.size;\n    }\n\n    ToIndexedSequence.prototype.includes = function(value) {\n      return this._iter.includes(value);\n    };\n\n    ToIndexedSequence.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n      var iterations = 0;\n      return this._iter.__iterate(function(v ) {return fn(v, iterations++, this$0)}, reverse);\n    };\n\n    ToIndexedSequence.prototype.__iterator = function(type, reverse) {\n      var iterator = this._iter.__iterator(ITERATE_VALUES, reverse);\n      var iterations = 0;\n      return new Iterator(function()  {\n        var step = iterator.next();\n        return step.done ? step :\n          iteratorValue(type, iterations++, step.value, step)\n      });\n    };\n\n\n\n  createClass(ToSetSequence, SetSeq);\n    function ToSetSequence(iter) {\n      this._iter = iter;\n      this.size = iter.size;\n    }\n\n    ToSetSequence.prototype.has = function(key) {\n      return this._iter.includes(key);\n    };\n\n    ToSetSequence.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n      return this._iter.__iterate(function(v ) {return fn(v, v, this$0)}, reverse);\n    };\n\n    ToSetSequence.prototype.__iterator = function(type, reverse) {\n      var iterator = this._iter.__iterator(ITERATE_VALUES, reverse);\n      return new Iterator(function()  {\n        var step = iterator.next();\n        return step.done ? step :\n          iteratorValue(type, step.value, step.value, step);\n      });\n    };\n\n\n\n  createClass(FromEntriesSequence, KeyedSeq);\n    function FromEntriesSequence(entries) {\n      this._iter = entries;\n      this.size = entries.size;\n    }\n\n    FromEntriesSequence.prototype.entrySeq = function() {\n      return this._iter.toSeq();\n    };\n\n    FromEntriesSequence.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n      return this._iter.__iterate(function(entry ) {\n        // Check if entry exists first so array access doesn't throw for holes\n        // in the parent iteration.\n        if (entry) {\n          validateEntry(entry);\n          var indexedIterable = isIterable(entry);\n          return fn(\n            indexedIterable ? entry.get(1) : entry[1],\n            indexedIterable ? entry.get(0) : entry[0],\n            this$0\n          );\n        }\n      }, reverse);\n    };\n\n    FromEntriesSequence.prototype.__iterator = function(type, reverse) {\n      var iterator = this._iter.__iterator(ITERATE_VALUES, reverse);\n      return new Iterator(function()  {\n        while (true) {\n          var step = iterator.next();\n          if (step.done) {\n            return step;\n          }\n          var entry = step.value;\n          // Check if entry exists first so array access doesn't throw for holes\n          // in the parent iteration.\n          if (entry) {\n            validateEntry(entry);\n            var indexedIterable = isIterable(entry);\n            return iteratorValue(\n              type,\n              indexedIterable ? entry.get(0) : entry[0],\n              indexedIterable ? entry.get(1) : entry[1],\n              step\n            );\n          }\n        }\n      });\n    };\n\n\n  ToIndexedSequence.prototype.cacheResult =\n  ToKeyedSequence.prototype.cacheResult =\n  ToSetSequence.prototype.cacheResult =\n  FromEntriesSequence.prototype.cacheResult =\n    cacheResultThrough;\n\n\n  function flipFactory(iterable) {\n    var flipSequence = makeSequence(iterable);\n    flipSequence._iter = iterable;\n    flipSequence.size = iterable.size;\n    flipSequence.flip = function()  {return iterable};\n    flipSequence.reverse = function () {\n      var reversedSequence = iterable.reverse.apply(this); // super.reverse()\n      reversedSequence.flip = function()  {return iterable.reverse()};\n      return reversedSequence;\n    };\n    flipSequence.has = function(key ) {return iterable.includes(key)};\n    flipSequence.includes = function(key ) {return iterable.has(key)};\n    flipSequence.cacheResult = cacheResultThrough;\n    flipSequence.__iterateUncached = function (fn, reverse) {var this$0 = this;\n      return iterable.__iterate(function(v, k)  {return fn(k, v, this$0) !== false}, reverse);\n    }\n    flipSequence.__iteratorUncached = function(type, reverse) {\n      if (type === ITERATE_ENTRIES) {\n        var iterator = iterable.__iterator(type, reverse);\n        return new Iterator(function()  {\n          var step = iterator.next();\n          if (!step.done) {\n            var k = step.value[0];\n            step.value[0] = step.value[1];\n            step.value[1] = k;\n          }\n          return step;\n        });\n      }\n      return iterable.__iterator(\n        type === ITERATE_VALUES ? ITERATE_KEYS : ITERATE_VALUES,\n        reverse\n      );\n    }\n    return flipSequence;\n  }\n\n\n  function mapFactory(iterable, mapper, context) {\n    var mappedSequence = makeSequence(iterable);\n    mappedSequence.size = iterable.size;\n    mappedSequence.has = function(key ) {return iterable.has(key)};\n    mappedSequence.get = function(key, notSetValue)  {\n      var v = iterable.get(key, NOT_SET);\n      return v === NOT_SET ?\n        notSetValue :\n        mapper.call(context, v, key, iterable);\n    };\n    mappedSequence.__iterateUncached = function (fn, reverse) {var this$0 = this;\n      return iterable.__iterate(\n        function(v, k, c)  {return fn(mapper.call(context, v, k, c), k, this$0) !== false},\n        reverse\n      );\n    }\n    mappedSequence.__iteratorUncached = function (type, reverse) {\n      var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse);\n      return new Iterator(function()  {\n        var step = iterator.next();\n        if (step.done) {\n          return step;\n        }\n        var entry = step.value;\n        var key = entry[0];\n        return iteratorValue(\n          type,\n          key,\n          mapper.call(context, entry[1], key, iterable),\n          step\n        );\n      });\n    }\n    return mappedSequence;\n  }\n\n\n  function reverseFactory(iterable, useKeys) {\n    var reversedSequence = makeSequence(iterable);\n    reversedSequence._iter = iterable;\n    reversedSequence.size = iterable.size;\n    reversedSequence.reverse = function()  {return iterable};\n    if (iterable.flip) {\n      reversedSequence.flip = function () {\n        var flipSequence = flipFactory(iterable);\n        flipSequence.reverse = function()  {return iterable.flip()};\n        return flipSequence;\n      };\n    }\n    reversedSequence.get = function(key, notSetValue) \n      {return iterable.get(useKeys ? key : -1 - key, notSetValue)};\n    reversedSequence.has = function(key )\n      {return iterable.has(useKeys ? key : -1 - key)};\n    reversedSequence.includes = function(value ) {return iterable.includes(value)};\n    reversedSequence.cacheResult = cacheResultThrough;\n    reversedSequence.__iterate = function (fn, reverse) {var this$0 = this;\n      return iterable.__iterate(function(v, k)  {return fn(v, k, this$0)}, !reverse);\n    };\n    reversedSequence.__iterator =\n      function(type, reverse)  {return iterable.__iterator(type, !reverse)};\n    return reversedSequence;\n  }\n\n\n  function filterFactory(iterable, predicate, context, useKeys) {\n    var filterSequence = makeSequence(iterable);\n    if (useKeys) {\n      filterSequence.has = function(key ) {\n        var v = iterable.get(key, NOT_SET);\n        return v !== NOT_SET && !!predicate.call(context, v, key, iterable);\n      };\n      filterSequence.get = function(key, notSetValue)  {\n        var v = iterable.get(key, NOT_SET);\n        return v !== NOT_SET && predicate.call(context, v, key, iterable) ?\n          v : notSetValue;\n      };\n    }\n    filterSequence.__iterateUncached = function (fn, reverse) {var this$0 = this;\n      var iterations = 0;\n      iterable.__iterate(function(v, k, c)  {\n        if (predicate.call(context, v, k, c)) {\n          iterations++;\n          return fn(v, useKeys ? k : iterations - 1, this$0);\n        }\n      }, reverse);\n      return iterations;\n    };\n    filterSequence.__iteratorUncached = function (type, reverse) {\n      var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse);\n      var iterations = 0;\n      return new Iterator(function()  {\n        while (true) {\n          var step = iterator.next();\n          if (step.done) {\n            return step;\n          }\n          var entry = step.value;\n          var key = entry[0];\n          var value = entry[1];\n          if (predicate.call(context, value, key, iterable)) {\n            return iteratorValue(type, useKeys ? key : iterations++, value, step);\n          }\n        }\n      });\n    }\n    return filterSequence;\n  }\n\n\n  function countByFactory(iterable, grouper, context) {\n    var groups = Map().asMutable();\n    iterable.__iterate(function(v, k)  {\n      groups.update(\n        grouper.call(context, v, k, iterable),\n        0,\n        function(a ) {return a + 1}\n      );\n    });\n    return groups.asImmutable();\n  }\n\n\n  function groupByFactory(iterable, grouper, context) {\n    var isKeyedIter = isKeyed(iterable);\n    var groups = (isOrdered(iterable) ? OrderedMap() : Map()).asMutable();\n    iterable.__iterate(function(v, k)  {\n      groups.update(\n        grouper.call(context, v, k, iterable),\n        function(a ) {return (a = a || [], a.push(isKeyedIter ? [k, v] : v), a)}\n      );\n    });\n    var coerce = iterableClass(iterable);\n    return groups.map(function(arr ) {return reify(iterable, coerce(arr))});\n  }\n\n\n  function sliceFactory(iterable, begin, end, useKeys) {\n    var originalSize = iterable.size;\n\n    // Sanitize begin & end using this shorthand for ToInt32(argument)\n    // http://www.ecma-international.org/ecma-262/6.0/#sec-toint32\n    if (begin !== undefined) {\n      begin = begin | 0;\n    }\n    if (end !== undefined) {\n      if (end === Infinity) {\n        end = originalSize;\n      } else {\n        end = end | 0;\n      }\n    }\n\n    if (wholeSlice(begin, end, originalSize)) {\n      return iterable;\n    }\n\n    var resolvedBegin = resolveBegin(begin, originalSize);\n    var resolvedEnd = resolveEnd(end, originalSize);\n\n    // begin or end will be NaN if they were provided as negative numbers and\n    // this iterable's size is unknown. In that case, cache first so there is\n    // a known size and these do not resolve to NaN.\n    if (resolvedBegin !== resolvedBegin || resolvedEnd !== resolvedEnd) {\n      return sliceFactory(iterable.toSeq().cacheResult(), begin, end, useKeys);\n    }\n\n    // Note: resolvedEnd is undefined when the original sequence's length is\n    // unknown and this slice did not supply an end and should contain all\n    // elements after resolvedBegin.\n    // In that case, resolvedSize will be NaN and sliceSize will remain undefined.\n    var resolvedSize = resolvedEnd - resolvedBegin;\n    var sliceSize;\n    if (resolvedSize === resolvedSize) {\n      sliceSize = resolvedSize < 0 ? 0 : resolvedSize;\n    }\n\n    var sliceSeq = makeSequence(iterable);\n\n    // If iterable.size is undefined, the size of the realized sliceSeq is\n    // unknown at this point unless the number of items to slice is 0\n    sliceSeq.size = sliceSize === 0 ? sliceSize : iterable.size && sliceSize || undefined;\n\n    if (!useKeys && isSeq(iterable) && sliceSize >= 0) {\n      sliceSeq.get = function (index, notSetValue) {\n        index = wrapIndex(this, index);\n        return index >= 0 && index < sliceSize ?\n          iterable.get(index + resolvedBegin, notSetValue) :\n          notSetValue;\n      }\n    }\n\n    sliceSeq.__iterateUncached = function(fn, reverse) {var this$0 = this;\n      if (sliceSize === 0) {\n        return 0;\n      }\n      if (reverse) {\n        return this.cacheResult().__iterate(fn, reverse);\n      }\n      var skipped = 0;\n      var isSkipping = true;\n      var iterations = 0;\n      iterable.__iterate(function(v, k)  {\n        if (!(isSkipping && (isSkipping = skipped++ < resolvedBegin))) {\n          iterations++;\n          return fn(v, useKeys ? k : iterations - 1, this$0) !== false &&\n                 iterations !== sliceSize;\n        }\n      });\n      return iterations;\n    };\n\n    sliceSeq.__iteratorUncached = function(type, reverse) {\n      if (sliceSize !== 0 && reverse) {\n        return this.cacheResult().__iterator(type, reverse);\n      }\n      // Don't bother instantiating parent iterator if taking 0.\n      var iterator = sliceSize !== 0 && iterable.__iterator(type, reverse);\n      var skipped = 0;\n      var iterations = 0;\n      return new Iterator(function()  {\n        while (skipped++ < resolvedBegin) {\n          iterator.next();\n        }\n        if (++iterations > sliceSize) {\n          return iteratorDone();\n        }\n        var step = iterator.next();\n        if (useKeys || type === ITERATE_VALUES) {\n          return step;\n        } else if (type === ITERATE_KEYS) {\n          return iteratorValue(type, iterations - 1, undefined, step);\n        } else {\n          return iteratorValue(type, iterations - 1, step.value[1], step);\n        }\n      });\n    }\n\n    return sliceSeq;\n  }\n\n\n  function takeWhileFactory(iterable, predicate, context) {\n    var takeSequence = makeSequence(iterable);\n    takeSequence.__iterateUncached = function(fn, reverse) {var this$0 = this;\n      if (reverse) {\n        return this.cacheResult().__iterate(fn, reverse);\n      }\n      var iterations = 0;\n      iterable.__iterate(function(v, k, c) \n        {return predicate.call(context, v, k, c) && ++iterations && fn(v, k, this$0)}\n      );\n      return iterations;\n    };\n    takeSequence.__iteratorUncached = function(type, reverse) {var this$0 = this;\n      if (reverse) {\n        return this.cacheResult().__iterator(type, reverse);\n      }\n      var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse);\n      var iterating = true;\n      return new Iterator(function()  {\n        if (!iterating) {\n          return iteratorDone();\n        }\n        var step = iterator.next();\n        if (step.done) {\n          return step;\n        }\n        var entry = step.value;\n        var k = entry[0];\n        var v = entry[1];\n        if (!predicate.call(context, v, k, this$0)) {\n          iterating = false;\n          return iteratorDone();\n        }\n        return type === ITERATE_ENTRIES ? step :\n          iteratorValue(type, k, v, step);\n      });\n    };\n    return takeSequence;\n  }\n\n\n  function skipWhileFactory(iterable, predicate, context, useKeys) {\n    var skipSequence = makeSequence(iterable);\n    skipSequence.__iterateUncached = function (fn, reverse) {var this$0 = this;\n      if (reverse) {\n        return this.cacheResult().__iterate(fn, reverse);\n      }\n      var isSkipping = true;\n      var iterations = 0;\n      iterable.__iterate(function(v, k, c)  {\n        if (!(isSkipping && (isSkipping = predicate.call(context, v, k, c)))) {\n          iterations++;\n          return fn(v, useKeys ? k : iterations - 1, this$0);\n        }\n      });\n      return iterations;\n    };\n    skipSequence.__iteratorUncached = function(type, reverse) {var this$0 = this;\n      if (reverse) {\n        return this.cacheResult().__iterator(type, reverse);\n      }\n      var iterator = iterable.__iterator(ITERATE_ENTRIES, reverse);\n      var skipping = true;\n      var iterations = 0;\n      return new Iterator(function()  {\n        var step, k, v;\n        do {\n          step = iterator.next();\n          if (step.done) {\n            if (useKeys || type === ITERATE_VALUES) {\n              return step;\n            } else if (type === ITERATE_KEYS) {\n              return iteratorValue(type, iterations++, undefined, step);\n            } else {\n              return iteratorValue(type, iterations++, step.value[1], step);\n            }\n          }\n          var entry = step.value;\n          k = entry[0];\n          v = entry[1];\n          skipping && (skipping = predicate.call(context, v, k, this$0));\n        } while (skipping);\n        return type === ITERATE_ENTRIES ? step :\n          iteratorValue(type, k, v, step);\n      });\n    };\n    return skipSequence;\n  }\n\n\n  function concatFactory(iterable, values) {\n    var isKeyedIterable = isKeyed(iterable);\n    var iters = [iterable].concat(values).map(function(v ) {\n      if (!isIterable(v)) {\n        v = isKeyedIterable ?\n          keyedSeqFromValue(v) :\n          indexedSeqFromValue(Array.isArray(v) ? v : [v]);\n      } else if (isKeyedIterable) {\n        v = KeyedIterable(v);\n      }\n      return v;\n    }).filter(function(v ) {return v.size !== 0});\n\n    if (iters.length === 0) {\n      return iterable;\n    }\n\n    if (iters.length === 1) {\n      var singleton = iters[0];\n      if (singleton === iterable ||\n          isKeyedIterable && isKeyed(singleton) ||\n          isIndexed(iterable) && isIndexed(singleton)) {\n        return singleton;\n      }\n    }\n\n    var concatSeq = new ArraySeq(iters);\n    if (isKeyedIterable) {\n      concatSeq = concatSeq.toKeyedSeq();\n    } else if (!isIndexed(iterable)) {\n      concatSeq = concatSeq.toSetSeq();\n    }\n    concatSeq = concatSeq.flatten(true);\n    concatSeq.size = iters.reduce(\n      function(sum, seq)  {\n        if (sum !== undefined) {\n          var size = seq.size;\n          if (size !== undefined) {\n            return sum + size;\n          }\n        }\n      },\n      0\n    );\n    return concatSeq;\n  }\n\n\n  function flattenFactory(iterable, depth, useKeys) {\n    var flatSequence = makeSequence(iterable);\n    flatSequence.__iterateUncached = function(fn, reverse) {\n      var iterations = 0;\n      var stopped = false;\n      function flatDeep(iter, currentDepth) {var this$0 = this;\n        iter.__iterate(function(v, k)  {\n          if ((!depth || currentDepth < depth) && isIterable(v)) {\n            flatDeep(v, currentDepth + 1);\n          } else if (fn(v, useKeys ? k : iterations++, this$0) === false) {\n            stopped = true;\n          }\n          return !stopped;\n        }, reverse);\n      }\n      flatDeep(iterable, 0);\n      return iterations;\n    }\n    flatSequence.__iteratorUncached = function(type, reverse) {\n      var iterator = iterable.__iterator(type, reverse);\n      var stack = [];\n      var iterations = 0;\n      return new Iterator(function()  {\n        while (iterator) {\n          var step = iterator.next();\n          if (step.done !== false) {\n            iterator = stack.pop();\n            continue;\n          }\n          var v = step.value;\n          if (type === ITERATE_ENTRIES) {\n            v = v[1];\n          }\n          if ((!depth || stack.length < depth) && isIterable(v)) {\n            stack.push(iterator);\n            iterator = v.__iterator(type, reverse);\n          } else {\n            return useKeys ? step : iteratorValue(type, iterations++, v, step);\n          }\n        }\n        return iteratorDone();\n      });\n    }\n    return flatSequence;\n  }\n\n\n  function flatMapFactory(iterable, mapper, context) {\n    var coerce = iterableClass(iterable);\n    return iterable.toSeq().map(\n      function(v, k)  {return coerce(mapper.call(context, v, k, iterable))}\n    ).flatten(true);\n  }\n\n\n  function interposeFactory(iterable, separator) {\n    var interposedSequence = makeSequence(iterable);\n    interposedSequence.size = iterable.size && iterable.size * 2 -1;\n    interposedSequence.__iterateUncached = function(fn, reverse) {var this$0 = this;\n      var iterations = 0;\n      iterable.__iterate(function(v, k) \n        {return (!iterations || fn(separator, iterations++, this$0) !== false) &&\n        fn(v, iterations++, this$0) !== false},\n        reverse\n      );\n      return iterations;\n    };\n    interposedSequence.__iteratorUncached = function(type, reverse) {\n      var iterator = iterable.__iterator(ITERATE_VALUES, reverse);\n      var iterations = 0;\n      var step;\n      return new Iterator(function()  {\n        if (!step || iterations % 2) {\n          step = iterator.next();\n          if (step.done) {\n            return step;\n          }\n        }\n        return iterations % 2 ?\n          iteratorValue(type, iterations++, separator) :\n          iteratorValue(type, iterations++, step.value, step);\n      });\n    };\n    return interposedSequence;\n  }\n\n\n  function sortFactory(iterable, comparator, mapper) {\n    if (!comparator) {\n      comparator = defaultComparator;\n    }\n    var isKeyedIterable = isKeyed(iterable);\n    var index = 0;\n    var entries = iterable.toSeq().map(\n      function(v, k)  {return [k, v, index++, mapper ? mapper(v, k, iterable) : v]}\n    ).toArray();\n    entries.sort(function(a, b)  {return comparator(a[3], b[3]) || a[2] - b[2]}).forEach(\n      isKeyedIterable ?\n      function(v, i)  { entries[i].length = 2; } :\n      function(v, i)  { entries[i] = v[1]; }\n    );\n    return isKeyedIterable ? KeyedSeq(entries) :\n      isIndexed(iterable) ? IndexedSeq(entries) :\n      SetSeq(entries);\n  }\n\n\n  function maxFactory(iterable, comparator, mapper) {\n    if (!comparator) {\n      comparator = defaultComparator;\n    }\n    if (mapper) {\n      var entry = iterable.toSeq()\n        .map(function(v, k)  {return [v, mapper(v, k, iterable)]})\n        .reduce(function(a, b)  {return maxCompare(comparator, a[1], b[1]) ? b : a});\n      return entry && entry[0];\n    } else {\n      return iterable.reduce(function(a, b)  {return maxCompare(comparator, a, b) ? b : a});\n    }\n  }\n\n  function maxCompare(comparator, a, b) {\n    var comp = comparator(b, a);\n    // b is considered the new max if the comparator declares them equal, but\n    // they are not equal and b is in fact a nullish value.\n    return (comp === 0 && b !== a && (b === undefined || b === null || b !== b)) || comp > 0;\n  }\n\n\n  function zipWithFactory(keyIter, zipper, iters) {\n    var zipSequence = makeSequence(keyIter);\n    zipSequence.size = new ArraySeq(iters).map(function(i ) {return i.size}).min();\n    // Note: this a generic base implementation of __iterate in terms of\n    // __iterator which may be more generically useful in the future.\n    zipSequence.__iterate = function(fn, reverse) {\n      /* generic:\n      var iterator = this.__iterator(ITERATE_ENTRIES, reverse);\n      var step;\n      var iterations = 0;\n      while (!(step = iterator.next()).done) {\n        iterations++;\n        if (fn(step.value[1], step.value[0], this) === false) {\n          break;\n        }\n      }\n      return iterations;\n      */\n      // indexed:\n      var iterator = this.__iterator(ITERATE_VALUES, reverse);\n      var step;\n      var iterations = 0;\n      while (!(step = iterator.next()).done) {\n        if (fn(step.value, iterations++, this) === false) {\n          break;\n        }\n      }\n      return iterations;\n    };\n    zipSequence.__iteratorUncached = function(type, reverse) {\n      var iterators = iters.map(function(i )\n        {return (i = Iterable(i), getIterator(reverse ? i.reverse() : i))}\n      );\n      var iterations = 0;\n      var isDone = false;\n      return new Iterator(function()  {\n        var steps;\n        if (!isDone) {\n          steps = iterators.map(function(i ) {return i.next()});\n          isDone = steps.some(function(s ) {return s.done});\n        }\n        if (isDone) {\n          return iteratorDone();\n        }\n        return iteratorValue(\n          type,\n          iterations++,\n          zipper.apply(null, steps.map(function(s ) {return s.value}))\n        );\n      });\n    };\n    return zipSequence\n  }\n\n\n  // #pragma Helper Functions\n\n  function reify(iter, seq) {\n    return isSeq(iter) ? seq : iter.constructor(seq);\n  }\n\n  function validateEntry(entry) {\n    if (entry !== Object(entry)) {\n      throw new TypeError('Expected [K, V] tuple: ' + entry);\n    }\n  }\n\n  function resolveSize(iter) {\n    assertNotInfinite(iter.size);\n    return ensureSize(iter);\n  }\n\n  function iterableClass(iterable) {\n    return isKeyed(iterable) ? KeyedIterable :\n      isIndexed(iterable) ? IndexedIterable :\n      SetIterable;\n  }\n\n  function makeSequence(iterable) {\n    return Object.create(\n      (\n        isKeyed(iterable) ? KeyedSeq :\n        isIndexed(iterable) ? IndexedSeq :\n        SetSeq\n      ).prototype\n    );\n  }\n\n  function cacheResultThrough() {\n    if (this._iter.cacheResult) {\n      this._iter.cacheResult();\n      this.size = this._iter.size;\n      return this;\n    } else {\n      return Seq.prototype.cacheResult.call(this);\n    }\n  }\n\n  function defaultComparator(a, b) {\n    return a > b ? 1 : a < b ? -1 : 0;\n  }\n\n  function forceIterator(keyPath) {\n    var iter = getIterator(keyPath);\n    if (!iter) {\n      // Array might not be iterable in this environment, so we need a fallback\n      // to our wrapped type.\n      if (!isArrayLike(keyPath)) {\n        throw new TypeError('Expected iterable or array-like: ' + keyPath);\n      }\n      iter = getIterator(Iterable(keyPath));\n    }\n    return iter;\n  }\n\n  createClass(Record, KeyedCollection);\n\n    function Record(defaultValues, name) {\n      var hasInitialized;\n\n      var RecordType = function Record(values) {\n        if (values instanceof RecordType) {\n          return values;\n        }\n        if (!(this instanceof RecordType)) {\n          return new RecordType(values);\n        }\n        if (!hasInitialized) {\n          hasInitialized = true;\n          var keys = Object.keys(defaultValues);\n          setProps(RecordTypePrototype, keys);\n          RecordTypePrototype.size = keys.length;\n          RecordTypePrototype._name = name;\n          RecordTypePrototype._keys = keys;\n          RecordTypePrototype._defaultValues = defaultValues;\n        }\n        this._map = Map(values);\n      };\n\n      var RecordTypePrototype = RecordType.prototype = Object.create(RecordPrototype);\n      RecordTypePrototype.constructor = RecordType;\n\n      return RecordType;\n    }\n\n    Record.prototype.toString = function() {\n      return this.__toString(recordName(this) + ' {', '}');\n    };\n\n    // @pragma Access\n\n    Record.prototype.has = function(k) {\n      return this._defaultValues.hasOwnProperty(k);\n    };\n\n    Record.prototype.get = function(k, notSetValue) {\n      if (!this.has(k)) {\n        return notSetValue;\n      }\n      var defaultVal = this._defaultValues[k];\n      return this._map ? this._map.get(k, defaultVal) : defaultVal;\n    };\n\n    // @pragma Modification\n\n    Record.prototype.clear = function() {\n      if (this.__ownerID) {\n        this._map && this._map.clear();\n        return this;\n      }\n      var RecordType = this.constructor;\n      return RecordType._empty || (RecordType._empty = makeRecord(this, emptyMap()));\n    };\n\n    Record.prototype.set = function(k, v) {\n      if (!this.has(k)) {\n        throw new Error('Cannot set unknown key \"' + k + '\" on ' + recordName(this));\n      }\n      if (this._map && !this._map.has(k)) {\n        var defaultVal = this._defaultValues[k];\n        if (v === defaultVal) {\n          return this;\n        }\n      }\n      var newMap = this._map && this._map.set(k, v);\n      if (this.__ownerID || newMap === this._map) {\n        return this;\n      }\n      return makeRecord(this, newMap);\n    };\n\n    Record.prototype.remove = function(k) {\n      if (!this.has(k)) {\n        return this;\n      }\n      var newMap = this._map && this._map.remove(k);\n      if (this.__ownerID || newMap === this._map) {\n        return this;\n      }\n      return makeRecord(this, newMap);\n    };\n\n    Record.prototype.wasAltered = function() {\n      return this._map.wasAltered();\n    };\n\n    Record.prototype.__iterator = function(type, reverse) {var this$0 = this;\n      return KeyedIterable(this._defaultValues).map(function(_, k)  {return this$0.get(k)}).__iterator(type, reverse);\n    };\n\n    Record.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n      return KeyedIterable(this._defaultValues).map(function(_, k)  {return this$0.get(k)}).__iterate(fn, reverse);\n    };\n\n    Record.prototype.__ensureOwner = function(ownerID) {\n      if (ownerID === this.__ownerID) {\n        return this;\n      }\n      var newMap = this._map && this._map.__ensureOwner(ownerID);\n      if (!ownerID) {\n        this.__ownerID = ownerID;\n        this._map = newMap;\n        return this;\n      }\n      return makeRecord(this, newMap, ownerID);\n    };\n\n\n  var RecordPrototype = Record.prototype;\n  RecordPrototype[DELETE] = RecordPrototype.remove;\n  RecordPrototype.deleteIn =\n  RecordPrototype.removeIn = MapPrototype.removeIn;\n  RecordPrototype.merge = MapPrototype.merge;\n  RecordPrototype.mergeWith = MapPrototype.mergeWith;\n  RecordPrototype.mergeIn = MapPrototype.mergeIn;\n  RecordPrototype.mergeDeep = MapPrototype.mergeDeep;\n  RecordPrototype.mergeDeepWith = MapPrototype.mergeDeepWith;\n  RecordPrototype.mergeDeepIn = MapPrototype.mergeDeepIn;\n  RecordPrototype.setIn = MapPrototype.setIn;\n  RecordPrototype.update = MapPrototype.update;\n  RecordPrototype.updateIn = MapPrototype.updateIn;\n  RecordPrototype.withMutations = MapPrototype.withMutations;\n  RecordPrototype.asMutable = MapPrototype.asMutable;\n  RecordPrototype.asImmutable = MapPrototype.asImmutable;\n\n\n  function makeRecord(likeRecord, map, ownerID) {\n    var record = Object.create(Object.getPrototypeOf(likeRecord));\n    record._map = map;\n    record.__ownerID = ownerID;\n    return record;\n  }\n\n  function recordName(record) {\n    return record._name || record.constructor.name || 'Record';\n  }\n\n  function setProps(prototype, names) {\n    try {\n      names.forEach(setProp.bind(undefined, prototype));\n    } catch (error) {\n      // Object.defineProperty failed. Probably IE8.\n    }\n  }\n\n  function setProp(prototype, name) {\n    Object.defineProperty(prototype, name, {\n      get: function() {\n        return this.get(name);\n      },\n      set: function(value) {\n        invariant(this.__ownerID, 'Cannot set on an immutable record.');\n        this.set(name, value);\n      }\n    });\n  }\n\n  createClass(Set, SetCollection);\n\n    // @pragma Construction\n\n    function Set(value) {\n      return value === null || value === undefined ? emptySet() :\n        isSet(value) && !isOrdered(value) ? value :\n        emptySet().withMutations(function(set ) {\n          var iter = SetIterable(value);\n          assertNotInfinite(iter.size);\n          iter.forEach(function(v ) {return set.add(v)});\n        });\n    }\n\n    Set.of = function(/*...values*/) {\n      return this(arguments);\n    };\n\n    Set.fromKeys = function(value) {\n      return this(KeyedIterable(value).keySeq());\n    };\n\n    Set.prototype.toString = function() {\n      return this.__toString('Set {', '}');\n    };\n\n    // @pragma Access\n\n    Set.prototype.has = function(value) {\n      return this._map.has(value);\n    };\n\n    // @pragma Modification\n\n    Set.prototype.add = function(value) {\n      return updateSet(this, this._map.set(value, true));\n    };\n\n    Set.prototype.remove = function(value) {\n      return updateSet(this, this._map.remove(value));\n    };\n\n    Set.prototype.clear = function() {\n      return updateSet(this, this._map.clear());\n    };\n\n    // @pragma Composition\n\n    Set.prototype.union = function() {var iters = SLICE$0.call(arguments, 0);\n      iters = iters.filter(function(x ) {return x.size !== 0});\n      if (iters.length === 0) {\n        return this;\n      }\n      if (this.size === 0 && !this.__ownerID && iters.length === 1) {\n        return this.constructor(iters[0]);\n      }\n      return this.withMutations(function(set ) {\n        for (var ii = 0; ii < iters.length; ii++) {\n          SetIterable(iters[ii]).forEach(function(value ) {return set.add(value)});\n        }\n      });\n    };\n\n    Set.prototype.intersect = function() {var iters = SLICE$0.call(arguments, 0);\n      if (iters.length === 0) {\n        return this;\n      }\n      iters = iters.map(function(iter ) {return SetIterable(iter)});\n      var originalSet = this;\n      return this.withMutations(function(set ) {\n        originalSet.forEach(function(value ) {\n          if (!iters.every(function(iter ) {return iter.includes(value)})) {\n            set.remove(value);\n          }\n        });\n      });\n    };\n\n    Set.prototype.subtract = function() {var iters = SLICE$0.call(arguments, 0);\n      if (iters.length === 0) {\n        return this;\n      }\n      iters = iters.map(function(iter ) {return SetIterable(iter)});\n      var originalSet = this;\n      return this.withMutations(function(set ) {\n        originalSet.forEach(function(value ) {\n          if (iters.some(function(iter ) {return iter.includes(value)})) {\n            set.remove(value);\n          }\n        });\n      });\n    };\n\n    Set.prototype.merge = function() {\n      return this.union.apply(this, arguments);\n    };\n\n    Set.prototype.mergeWith = function(merger) {var iters = SLICE$0.call(arguments, 1);\n      return this.union.apply(this, iters);\n    };\n\n    Set.prototype.sort = function(comparator) {\n      // Late binding\n      return OrderedSet(sortFactory(this, comparator));\n    };\n\n    Set.prototype.sortBy = function(mapper, comparator) {\n      // Late binding\n      return OrderedSet(sortFactory(this, comparator, mapper));\n    };\n\n    Set.prototype.wasAltered = function() {\n      return this._map.wasAltered();\n    };\n\n    Set.prototype.__iterate = function(fn, reverse) {var this$0 = this;\n      return this._map.__iterate(function(_, k)  {return fn(k, k, this$0)}, reverse);\n    };\n\n    Set.prototype.__iterator = function(type, reverse) {\n      return this._map.map(function(_, k)  {return k}).__iterator(type, reverse);\n    };\n\n    Set.prototype.__ensureOwner = function(ownerID) {\n      if (ownerID === this.__ownerID) {\n        return this;\n      }\n      var newMap = this._map.__ensureOwner(ownerID);\n      if (!ownerID) {\n        this.__ownerID = ownerID;\n        this._map = newMap;\n        return this;\n      }\n      return this.__make(newMap, ownerID);\n    };\n\n\n  function isSet(maybeSet) {\n    return !!(maybeSet && maybeSet[IS_SET_SENTINEL]);\n  }\n\n  Set.isSet = isSet;\n\n  var IS_SET_SENTINEL = '@@__IMMUTABLE_SET__@@';\n\n  var SetPrototype = Set.prototype;\n  SetPrototype[IS_SET_SENTINEL] = true;\n  SetPrototype[DELETE] = SetPrototype.remove;\n  SetPrototype.mergeDeep = SetPrototype.merge;\n  SetPrototype.mergeDeepWith = SetPrototype.mergeWith;\n  SetPrototype.withMutations = MapPrototype.withMutations;\n  SetPrototype.asMutable = MapPrototype.asMutable;\n  SetPrototype.asImmutable = MapPrototype.asImmutable;\n\n  SetPrototype.__empty = emptySet;\n  SetPrototype.__make = makeSet;\n\n  function updateSet(set, newMap) {\n    if (set.__ownerID) {\n      set.size = newMap.size;\n      set._map = newMap;\n      return set;\n    }\n    return newMap === set._map ? set :\n      newMap.size === 0 ? set.__empty() :\n      set.__make(newMap);\n  }\n\n  function makeSet(map, ownerID) {\n    var set = Object.create(SetPrototype);\n    set.size = map ? map.size : 0;\n    set._map = map;\n    set.__ownerID = ownerID;\n    return set;\n  }\n\n  var EMPTY_SET;\n  function emptySet() {\n    return EMPTY_SET || (EMPTY_SET = makeSet(emptyMap()));\n  }\n\n  createClass(OrderedSet, Set);\n\n    // @pragma Construction\n\n    function OrderedSet(value) {\n      return value === null || value === undefined ? emptyOrderedSet() :\n        isOrderedSet(value) ? value :\n        emptyOrderedSet().withMutations(function(set ) {\n          var iter = SetIterable(value);\n          assertNotInfinite(iter.size);\n          iter.forEach(function(v ) {return set.add(v)});\n        });\n    }\n\n    OrderedSet.of = function(/*...values*/) {\n      return this(arguments);\n    };\n\n    OrderedSet.fromKeys = function(value) {\n      return this(KeyedIterable(value).keySeq());\n    };\n\n    OrderedSet.prototype.toString = function() {\n      return this.__toString('OrderedSet {', '}');\n    };\n\n\n  function isOrderedSet(maybeOrderedSet) {\n    return isSet(maybeOrderedSet) && isOrdered(maybeOrderedSet);\n  }\n\n  OrderedSet.isOrderedSet = isOrderedSet;\n\n  var OrderedSetPrototype = OrderedSet.prototype;\n  OrderedSetPrototype[IS_ORDERED_SENTINEL] = true;\n\n  OrderedSetPrototype.__empty = emptyOrderedSet;\n  OrderedSetPrototype.__make = makeOrderedSet;\n\n  function makeOrderedSet(map, ownerID) {\n    var set = Object.create(OrderedSetPrototype);\n    set.size = map ? map.size : 0;\n    set._map = map;\n    set.__ownerID = ownerID;\n    return set;\n  }\n\n  var EMPTY_ORDERED_SET;\n  function emptyOrderedSet() {\n    return EMPTY_ORDERED_SET || (EMPTY_ORDERED_SET = makeOrderedSet(emptyOrderedMap()));\n  }\n\n  createClass(Stack, IndexedCollection);\n\n    // @pragma Construction\n\n    function Stack(value) {\n      return value === null || value === undefined ? emptyStack() :\n        isStack(value) ? value :\n        emptyStack().unshiftAll(value);\n    }\n\n    Stack.of = function(/*...values*/) {\n      return this(arguments);\n    };\n\n    Stack.prototype.toString = function() {\n      return this.__toString('Stack [', ']');\n    };\n\n    // @pragma Access\n\n    Stack.prototype.get = function(index, notSetValue) {\n      var head = this._head;\n      index = wrapIndex(this, index);\n      while (head && index--) {\n        head = head.next;\n      }\n      return head ? head.value : notSetValue;\n    };\n\n    Stack.prototype.peek = function() {\n      return this._head && this._head.value;\n    };\n\n    // @pragma Modification\n\n    Stack.prototype.push = function(/*...values*/) {\n      if (arguments.length === 0) {\n        return this;\n      }\n      var newSize = this.size + arguments.length;\n      var head = this._head;\n      for (var ii = arguments.length - 1; ii >= 0; ii--) {\n        head = {\n          value: arguments[ii],\n          next: head\n        };\n      }\n      if (this.__ownerID) {\n        this.size = newSize;\n        this._head = head;\n        this.__hash = undefined;\n        this.__altered = true;\n        return this;\n      }\n      return makeStack(newSize, head);\n    };\n\n    Stack.prototype.pushAll = function(iter) {\n      iter = IndexedIterable(iter);\n      if (iter.size === 0) {\n        return this;\n      }\n      assertNotInfinite(iter.size);\n      var newSize = this.size;\n      var head = this._head;\n      iter.reverse().forEach(function(value ) {\n        newSize++;\n        head = {\n          value: value,\n          next: head\n        };\n      });\n      if (this.__ownerID) {\n        this.size = newSize;\n        this._head = head;\n        this.__hash = undefined;\n        this.__altered = true;\n        return this;\n      }\n      return makeStack(newSize, head);\n    };\n\n    Stack.prototype.pop = function() {\n      return this.slice(1);\n    };\n\n    Stack.prototype.unshift = function(/*...values*/) {\n      return this.push.apply(this, arguments);\n    };\n\n    Stack.prototype.unshiftAll = function(iter) {\n      return this.pushAll(iter);\n    };\n\n    Stack.prototype.shift = function() {\n      return this.pop.apply(this, arguments);\n    };\n\n    Stack.prototype.clear = function() {\n      if (this.size === 0) {\n        return this;\n      }\n      if (this.__ownerID) {\n        this.size = 0;\n        this._head = undefined;\n        this.__hash = undefined;\n        this.__altered = true;\n        return this;\n      }\n      return emptyStack();\n    };\n\n    Stack.prototype.slice = function(begin, end) {\n      if (wholeSlice(begin, end, this.size)) {\n        return this;\n      }\n      var resolvedBegin = resolveBegin(begin, this.size);\n      var resolvedEnd = resolveEnd(end, this.size);\n      if (resolvedEnd !== this.size) {\n        // super.slice(begin, end);\n        return IndexedCollection.prototype.slice.call(this, begin, end);\n      }\n      var newSize = this.size - resolvedBegin;\n      var head = this._head;\n      while (resolvedBegin--) {\n        head = head.next;\n      }\n      if (this.__ownerID) {\n        this.size = newSize;\n        this._head = head;\n        this.__hash = undefined;\n        this.__altered = true;\n        return this;\n      }\n      return makeStack(newSize, head);\n    };\n\n    // @pragma Mutability\n\n    Stack.prototype.__ensureOwner = function(ownerID) {\n      if (ownerID === this.__ownerID) {\n        return this;\n      }\n      if (!ownerID) {\n        this.__ownerID = ownerID;\n        this.__altered = false;\n        return this;\n      }\n      return makeStack(this.size, this._head, ownerID, this.__hash);\n    };\n\n    // @pragma Iteration\n\n    Stack.prototype.__iterate = function(fn, reverse) {\n      if (reverse) {\n        return this.reverse().__iterate(fn);\n      }\n      var iterations = 0;\n      var node = this._head;\n      while (node) {\n        if (fn(node.value, iterations++, this) === false) {\n          break;\n        }\n        node = node.next;\n      }\n      return iterations;\n    };\n\n    Stack.prototype.__iterator = function(type, reverse) {\n      if (reverse) {\n        return this.reverse().__iterator(type);\n      }\n      var iterations = 0;\n      var node = this._head;\n      return new Iterator(function()  {\n        if (node) {\n          var value = node.value;\n          node = node.next;\n          return iteratorValue(type, iterations++, value);\n        }\n        return iteratorDone();\n      });\n    };\n\n\n  function isStack(maybeStack) {\n    return !!(maybeStack && maybeStack[IS_STACK_SENTINEL]);\n  }\n\n  Stack.isStack = isStack;\n\n  var IS_STACK_SENTINEL = '@@__IMMUTABLE_STACK__@@';\n\n  var StackPrototype = Stack.prototype;\n  StackPrototype[IS_STACK_SENTINEL] = true;\n  StackPrototype.withMutations = MapPrototype.withMutations;\n  StackPrototype.asMutable = MapPrototype.asMutable;\n  StackPrototype.asImmutable = MapPrototype.asImmutable;\n  StackPrototype.wasAltered = MapPrototype.wasAltered;\n\n\n  function makeStack(size, head, ownerID, hash) {\n    var map = Object.create(StackPrototype);\n    map.size = size;\n    map._head = head;\n    map.__ownerID = ownerID;\n    map.__hash = hash;\n    map.__altered = false;\n    return map;\n  }\n\n  var EMPTY_STACK;\n  function emptyStack() {\n    return EMPTY_STACK || (EMPTY_STACK = makeStack(0));\n  }\n\n  /**\n   * Contributes additional methods to a constructor\n   */\n  function mixin(ctor, methods) {\n    var keyCopier = function(key ) { ctor.prototype[key] = methods[key]; };\n    Object.keys(methods).forEach(keyCopier);\n    Object.getOwnPropertySymbols &&\n      Object.getOwnPropertySymbols(methods).forEach(keyCopier);\n    return ctor;\n  }\n\n  Iterable.Iterator = Iterator;\n\n  mixin(Iterable, {\n\n    // ### Conversion to other types\n\n    toArray: function() {\n      assertNotInfinite(this.size);\n      var array = new Array(this.size || 0);\n      this.valueSeq().__iterate(function(v, i)  { array[i] = v; });\n      return array;\n    },\n\n    toIndexedSeq: function() {\n      return new ToIndexedSequence(this);\n    },\n\n    toJS: function() {\n      return this.toSeq().map(\n        function(value ) {return value && typeof value.toJS === 'function' ? value.toJS() : value}\n      ).__toJS();\n    },\n\n    toJSON: function() {\n      return this.toSeq().map(\n        function(value ) {return value && typeof value.toJSON === 'function' ? value.toJSON() : value}\n      ).__toJS();\n    },\n\n    toKeyedSeq: function() {\n      return new ToKeyedSequence(this, true);\n    },\n\n    toMap: function() {\n      // Use Late Binding here to solve the circular dependency.\n      return Map(this.toKeyedSeq());\n    },\n\n    toObject: function() {\n      assertNotInfinite(this.size);\n      var object = {};\n      this.__iterate(function(v, k)  { object[k] = v; });\n      return object;\n    },\n\n    toOrderedMap: function() {\n      // Use Late Binding here to solve the circular dependency.\n      return OrderedMap(this.toKeyedSeq());\n    },\n\n    toOrderedSet: function() {\n      // Use Late Binding here to solve the circular dependency.\n      return OrderedSet(isKeyed(this) ? this.valueSeq() : this);\n    },\n\n    toSet: function() {\n      // Use Late Binding here to solve the circular dependency.\n      return Set(isKeyed(this) ? this.valueSeq() : this);\n    },\n\n    toSetSeq: function() {\n      return new ToSetSequence(this);\n    },\n\n    toSeq: function() {\n      return isIndexed(this) ? this.toIndexedSeq() :\n        isKeyed(this) ? this.toKeyedSeq() :\n        this.toSetSeq();\n    },\n\n    toStack: function() {\n      // Use Late Binding here to solve the circular dependency.\n      return Stack(isKeyed(this) ? this.valueSeq() : this);\n    },\n\n    toList: function() {\n      // Use Late Binding here to solve the circular dependency.\n      return List(isKeyed(this) ? this.valueSeq() : this);\n    },\n\n\n    // ### Common JavaScript methods and properties\n\n    toString: function() {\n      return '[Iterable]';\n    },\n\n    __toString: function(head, tail) {\n      if (this.size === 0) {\n        return head + tail;\n      }\n      return head + ' ' + this.toSeq().map(this.__toStringMapper).join(', ') + ' ' + tail;\n    },\n\n\n    // ### ES6 Collection methods (ES6 Array and Map)\n\n    concat: function() {var values = SLICE$0.call(arguments, 0);\n      return reify(this, concatFactory(this, values));\n    },\n\n    includes: function(searchValue) {\n      return this.some(function(value ) {return is(value, searchValue)});\n    },\n\n    entries: function() {\n      return this.__iterator(ITERATE_ENTRIES);\n    },\n\n    every: function(predicate, context) {\n      assertNotInfinite(this.size);\n      var returnValue = true;\n      this.__iterate(function(v, k, c)  {\n        if (!predicate.call(context, v, k, c)) {\n          returnValue = false;\n          return false;\n        }\n      });\n      return returnValue;\n    },\n\n    filter: function(predicate, context) {\n      return reify(this, filterFactory(this, predicate, context, true));\n    },\n\n    find: function(predicate, context, notSetValue) {\n      var entry = this.findEntry(predicate, context);\n      return entry ? entry[1] : notSetValue;\n    },\n\n    forEach: function(sideEffect, context) {\n      assertNotInfinite(this.size);\n      return this.__iterate(context ? sideEffect.bind(context) : sideEffect);\n    },\n\n    join: function(separator) {\n      assertNotInfinite(this.size);\n      separator = separator !== undefined ? '' + separator : ',';\n      var joined = '';\n      var isFirst = true;\n      this.__iterate(function(v ) {\n        isFirst ? (isFirst = false) : (joined += separator);\n        joined += v !== null && v !== undefined ? v.toString() : '';\n      });\n      return joined;\n    },\n\n    keys: function() {\n      return this.__iterator(ITERATE_KEYS);\n    },\n\n    map: function(mapper, context) {\n      return reify(this, mapFactory(this, mapper, context));\n    },\n\n    reduce: function(reducer, initialReduction, context) {\n      assertNotInfinite(this.size);\n      var reduction;\n      var useFirst;\n      if (arguments.length < 2) {\n        useFirst = true;\n      } else {\n        reduction = initialReduction;\n      }\n      this.__iterate(function(v, k, c)  {\n        if (useFirst) {\n          useFirst = false;\n          reduction = v;\n        } else {\n          reduction = reducer.call(context, reduction, v, k, c);\n        }\n      });\n      return reduction;\n    },\n\n    reduceRight: function(reducer, initialReduction, context) {\n      var reversed = this.toKeyedSeq().reverse();\n      return reversed.reduce.apply(reversed, arguments);\n    },\n\n    reverse: function() {\n      return reify(this, reverseFactory(this, true));\n    },\n\n    slice: function(begin, end) {\n      return reify(this, sliceFactory(this, begin, end, true));\n    },\n\n    some: function(predicate, context) {\n      return !this.every(not(predicate), context);\n    },\n\n    sort: function(comparator) {\n      return reify(this, sortFactory(this, comparator));\n    },\n\n    values: function() {\n      return this.__iterator(ITERATE_VALUES);\n    },\n\n\n    // ### More sequential methods\n\n    butLast: function() {\n      return this.slice(0, -1);\n    },\n\n    isEmpty: function() {\n      return this.size !== undefined ? this.size === 0 : !this.some(function()  {return true});\n    },\n\n    count: function(predicate, context) {\n      return ensureSize(\n        predicate ? this.toSeq().filter(predicate, context) : this\n      );\n    },\n\n    countBy: function(grouper, context) {\n      return countByFactory(this, grouper, context);\n    },\n\n    equals: function(other) {\n      return deepEqual(this, other);\n    },\n\n    entrySeq: function() {\n      var iterable = this;\n      if (iterable._cache) {\n        // We cache as an entries array, so we can just return the cache!\n        return new ArraySeq(iterable._cache);\n      }\n      var entriesSequence = iterable.toSeq().map(entryMapper).toIndexedSeq();\n      entriesSequence.fromEntrySeq = function()  {return iterable.toSeq()};\n      return entriesSequence;\n    },\n\n    filterNot: function(predicate, context) {\n      return this.filter(not(predicate), context);\n    },\n\n    findEntry: function(predicate, context, notSetValue) {\n      var found = notSetValue;\n      this.__iterate(function(v, k, c)  {\n        if (predicate.call(context, v, k, c)) {\n          found = [k, v];\n          return false;\n        }\n      });\n      return found;\n    },\n\n    findKey: function(predicate, context) {\n      var entry = this.findEntry(predicate, context);\n      return entry && entry[0];\n    },\n\n    findLast: function(predicate, context, notSetValue) {\n      return this.toKeyedSeq().reverse().find(predicate, context, notSetValue);\n    },\n\n    findLastEntry: function(predicate, context, notSetValue) {\n      return this.toKeyedSeq().reverse().findEntry(predicate, context, notSetValue);\n    },\n\n    findLastKey: function(predicate, context) {\n      return this.toKeyedSeq().reverse().findKey(predicate, context);\n    },\n\n    first: function() {\n      return this.find(returnTrue);\n    },\n\n    flatMap: function(mapper, context) {\n      return reify(this, flatMapFactory(this, mapper, context));\n    },\n\n    flatten: function(depth) {\n      return reify(this, flattenFactory(this, depth, true));\n    },\n\n    fromEntrySeq: function() {\n      return new FromEntriesSequence(this);\n    },\n\n    get: function(searchKey, notSetValue) {\n      return this.find(function(_, key)  {return is(key, searchKey)}, undefined, notSetValue);\n    },\n\n    getIn: function(searchKeyPath, notSetValue) {\n      var nested = this;\n      // Note: in an ES6 environment, we would prefer:\n      // for (var key of searchKeyPath) {\n      var iter = forceIterator(searchKeyPath);\n      var step;\n      while (!(step = iter.next()).done) {\n        var key = step.value;\n        nested = nested && nested.get ? nested.get(key, NOT_SET) : NOT_SET;\n        if (nested === NOT_SET) {\n          return notSetValue;\n        }\n      }\n      return nested;\n    },\n\n    groupBy: function(grouper, context) {\n      return groupByFactory(this, grouper, context);\n    },\n\n    has: function(searchKey) {\n      return this.get(searchKey, NOT_SET) !== NOT_SET;\n    },\n\n    hasIn: function(searchKeyPath) {\n      return this.getIn(searchKeyPath, NOT_SET) !== NOT_SET;\n    },\n\n    isSubset: function(iter) {\n      iter = typeof iter.includes === 'function' ? iter : Iterable(iter);\n      return this.every(function(value ) {return iter.includes(value)});\n    },\n\n    isSuperset: function(iter) {\n      iter = typeof iter.isSubset === 'function' ? iter : Iterable(iter);\n      return iter.isSubset(this);\n    },\n\n    keyOf: function(searchValue) {\n      return this.findKey(function(value ) {return is(value, searchValue)});\n    },\n\n    keySeq: function() {\n      return this.toSeq().map(keyMapper).toIndexedSeq();\n    },\n\n    last: function() {\n      return this.toSeq().reverse().first();\n    },\n\n    lastKeyOf: function(searchValue) {\n      return this.toKeyedSeq().reverse().keyOf(searchValue);\n    },\n\n    max: function(comparator) {\n      return maxFactory(this, comparator);\n    },\n\n    maxBy: function(mapper, comparator) {\n      return maxFactory(this, comparator, mapper);\n    },\n\n    min: function(comparator) {\n      return maxFactory(this, comparator ? neg(comparator) : defaultNegComparator);\n    },\n\n    minBy: function(mapper, comparator) {\n      return maxFactory(this, comparator ? neg(comparator) : defaultNegComparator, mapper);\n    },\n\n    rest: function() {\n      return this.slice(1);\n    },\n\n    skip: function(amount) {\n      return this.slice(Math.max(0, amount));\n    },\n\n    skipLast: function(amount) {\n      return reify(this, this.toSeq().reverse().skip(amount).reverse());\n    },\n\n    skipWhile: function(predicate, context) {\n      return reify(this, skipWhileFactory(this, predicate, context, true));\n    },\n\n    skipUntil: function(predicate, context) {\n      return this.skipWhile(not(predicate), context);\n    },\n\n    sortBy: function(mapper, comparator) {\n      return reify(this, sortFactory(this, comparator, mapper));\n    },\n\n    take: function(amount) {\n      return this.slice(0, Math.max(0, amount));\n    },\n\n    takeLast: function(amount) {\n      return reify(this, this.toSeq().reverse().take(amount).reverse());\n    },\n\n    takeWhile: function(predicate, context) {\n      return reify(this, takeWhileFactory(this, predicate, context));\n    },\n\n    takeUntil: function(predicate, context) {\n      return this.takeWhile(not(predicate), context);\n    },\n\n    valueSeq: function() {\n      return this.toIndexedSeq();\n    },\n\n\n    // ### Hashable Object\n\n    hashCode: function() {\n      return this.__hash || (this.__hash = hashIterable(this));\n    }\n\n\n    // ### Internal\n\n    // abstract __iterate(fn, reverse)\n\n    // abstract __iterator(type, reverse)\n  });\n\n  // var IS_ITERABLE_SENTINEL = '@@__IMMUTABLE_ITERABLE__@@';\n  // var IS_KEYED_SENTINEL = '@@__IMMUTABLE_KEYED__@@';\n  // var IS_INDEXED_SENTINEL = '@@__IMMUTABLE_INDEXED__@@';\n  // var IS_ORDERED_SENTINEL = '@@__IMMUTABLE_ORDERED__@@';\n\n  var IterablePrototype = Iterable.prototype;\n  IterablePrototype[IS_ITERABLE_SENTINEL] = true;\n  IterablePrototype[ITERATOR_SYMBOL] = IterablePrototype.values;\n  IterablePrototype.__toJS = IterablePrototype.toArray;\n  IterablePrototype.__toStringMapper = quoteString;\n  IterablePrototype.inspect =\n  IterablePrototype.toSource = function() { return this.toString(); };\n  IterablePrototype.chain = IterablePrototype.flatMap;\n  IterablePrototype.contains = IterablePrototype.includes;\n\n  mixin(KeyedIterable, {\n\n    // ### More sequential methods\n\n    flip: function() {\n      return reify(this, flipFactory(this));\n    },\n\n    mapEntries: function(mapper, context) {var this$0 = this;\n      var iterations = 0;\n      return reify(this,\n        this.toSeq().map(\n          function(v, k)  {return mapper.call(context, [k, v], iterations++, this$0)}\n        ).fromEntrySeq()\n      );\n    },\n\n    mapKeys: function(mapper, context) {var this$0 = this;\n      return reify(this,\n        this.toSeq().flip().map(\n          function(k, v)  {return mapper.call(context, k, v, this$0)}\n        ).flip()\n      );\n    }\n\n  });\n\n  var KeyedIterablePrototype = KeyedIterable.prototype;\n  KeyedIterablePrototype[IS_KEYED_SENTINEL] = true;\n  KeyedIterablePrototype[ITERATOR_SYMBOL] = IterablePrototype.entries;\n  KeyedIterablePrototype.__toJS = IterablePrototype.toObject;\n  KeyedIterablePrototype.__toStringMapper = function(v, k)  {return JSON.stringify(k) + ': ' + quoteString(v)};\n\n\n\n  mixin(IndexedIterable, {\n\n    // ### Conversion to other types\n\n    toKeyedSeq: function() {\n      return new ToKeyedSequence(this, false);\n    },\n\n\n    // ### ES6 Collection methods (ES6 Array and Map)\n\n    filter: function(predicate, context) {\n      return reify(this, filterFactory(this, predicate, context, false));\n    },\n\n    findIndex: function(predicate, context) {\n      var entry = this.findEntry(predicate, context);\n      return entry ? entry[0] : -1;\n    },\n\n    indexOf: function(searchValue) {\n      var key = this.keyOf(searchValue);\n      return key === undefined ? -1 : key;\n    },\n\n    lastIndexOf: function(searchValue) {\n      var key = this.lastKeyOf(searchValue);\n      return key === undefined ? -1 : key;\n    },\n\n    reverse: function() {\n      return reify(this, reverseFactory(this, false));\n    },\n\n    slice: function(begin, end) {\n      return reify(this, sliceFactory(this, begin, end, false));\n    },\n\n    splice: function(index, removeNum /*, ...values*/) {\n      var numArgs = arguments.length;\n      removeNum = Math.max(removeNum | 0, 0);\n      if (numArgs === 0 || (numArgs === 2 && !removeNum)) {\n        return this;\n      }\n      // If index is negative, it should resolve relative to the size of the\n      // collection. However size may be expensive to compute if not cached, so\n      // only call count() if the number is in fact negative.\n      index = resolveBegin(index, index < 0 ? this.count() : this.size);\n      var spliced = this.slice(0, index);\n      return reify(\n        this,\n        numArgs === 1 ?\n          spliced :\n          spliced.concat(arrCopy(arguments, 2), this.slice(index + removeNum))\n      );\n    },\n\n\n    // ### More collection methods\n\n    findLastIndex: function(predicate, context) {\n      var entry = this.findLastEntry(predicate, context);\n      return entry ? entry[0] : -1;\n    },\n\n    first: function() {\n      return this.get(0);\n    },\n\n    flatten: function(depth) {\n      return reify(this, flattenFactory(this, depth, false));\n    },\n\n    get: function(index, notSetValue) {\n      index = wrapIndex(this, index);\n      return (index < 0 || (this.size === Infinity ||\n          (this.size !== undefined && index > this.size))) ?\n        notSetValue :\n        this.find(function(_, key)  {return key === index}, undefined, notSetValue);\n    },\n\n    has: function(index) {\n      index = wrapIndex(this, index);\n      return index >= 0 && (this.size !== undefined ?\n        this.size === Infinity || index < this.size :\n        this.indexOf(index) !== -1\n      );\n    },\n\n    interpose: function(separator) {\n      return reify(this, interposeFactory(this, separator));\n    },\n\n    interleave: function(/*...iterables*/) {\n      var iterables = [this].concat(arrCopy(arguments));\n      var zipped = zipWithFactory(this.toSeq(), IndexedSeq.of, iterables);\n      var interleaved = zipped.flatten(true);\n      if (zipped.size) {\n        interleaved.size = zipped.size * iterables.length;\n      }\n      return reify(this, interleaved);\n    },\n\n    keySeq: function() {\n      return Range(0, this.size);\n    },\n\n    last: function() {\n      return this.get(-1);\n    },\n\n    skipWhile: function(predicate, context) {\n      return reify(this, skipWhileFactory(this, predicate, context, false));\n    },\n\n    zip: function(/*, ...iterables */) {\n      var iterables = [this].concat(arrCopy(arguments));\n      return reify(this, zipWithFactory(this, defaultZipper, iterables));\n    },\n\n    zipWith: function(zipper/*, ...iterables */) {\n      var iterables = arrCopy(arguments);\n      iterables[0] = this;\n      return reify(this, zipWithFactory(this, zipper, iterables));\n    }\n\n  });\n\n  IndexedIterable.prototype[IS_INDEXED_SENTINEL] = true;\n  IndexedIterable.prototype[IS_ORDERED_SENTINEL] = true;\n\n\n\n  mixin(SetIterable, {\n\n    // ### ES6 Collection methods (ES6 Array and Map)\n\n    get: function(value, notSetValue) {\n      return this.has(value) ? value : notSetValue;\n    },\n\n    includes: function(value) {\n      return this.has(value);\n    },\n\n\n    // ### More sequential methods\n\n    keySeq: function() {\n      return this.valueSeq();\n    }\n\n  });\n\n  SetIterable.prototype.has = IterablePrototype.includes;\n  SetIterable.prototype.contains = SetIterable.prototype.includes;\n\n\n  // Mixin subclasses\n\n  mixin(KeyedSeq, KeyedIterable.prototype);\n  mixin(IndexedSeq, IndexedIterable.prototype);\n  mixin(SetSeq, SetIterable.prototype);\n\n  mixin(KeyedCollection, KeyedIterable.prototype);\n  mixin(IndexedCollection, IndexedIterable.prototype);\n  mixin(SetCollection, SetIterable.prototype);\n\n\n  // #pragma Helper functions\n\n  function keyMapper(v, k) {\n    return k;\n  }\n\n  function entryMapper(v, k) {\n    return [k, v];\n  }\n\n  function not(predicate) {\n    return function() {\n      return !predicate.apply(this, arguments);\n    }\n  }\n\n  function neg(predicate) {\n    return function() {\n      return -predicate.apply(this, arguments);\n    }\n  }\n\n  function quoteString(value) {\n    return typeof value === 'string' ? JSON.stringify(value) : String(value);\n  }\n\n  function defaultZipper() {\n    return arrCopy(arguments);\n  }\n\n  function defaultNegComparator(a, b) {\n    return a < b ? 1 : a > b ? -1 : 0;\n  }\n\n  function hashIterable(iterable) {\n    if (iterable.size === Infinity) {\n      return 0;\n    }\n    var ordered = isOrdered(iterable);\n    var keyed = isKeyed(iterable);\n    var h = ordered ? 1 : 0;\n    var size = iterable.__iterate(\n      keyed ?\n        ordered ?\n          function(v, k)  { h = 31 * h + hashMerge(hash(v), hash(k)) | 0; } :\n          function(v, k)  { h = h + hashMerge(hash(v), hash(k)) | 0; } :\n        ordered ?\n          function(v ) { h = 31 * h + hash(v) | 0; } :\n          function(v ) { h = h + hash(v) | 0; }\n    );\n    return murmurHashOfSize(size, h);\n  }\n\n  function murmurHashOfSize(size, h) {\n    h = imul(h, 0xCC9E2D51);\n    h = imul(h << 15 | h >>> -15, 0x1B873593);\n    h = imul(h << 13 | h >>> -13, 5);\n    h = (h + 0xE6546B64 | 0) ^ size;\n    h = imul(h ^ h >>> 16, 0x85EBCA6B);\n    h = imul(h ^ h >>> 13, 0xC2B2AE35);\n    h = smi(h ^ h >>> 16);\n    return h;\n  }\n\n  function hashMerge(a, b) {\n    return a ^ b + 0x9E3779B9 + (a << 6) + (a >> 2) | 0; // int\n  }\n\n  var Immutable = {\n\n    Iterable: Iterable,\n\n    Seq: Seq,\n    Collection: Collection,\n    Map: Map,\n    OrderedMap: OrderedMap,\n    List: List,\n    Stack: Stack,\n    Set: Set,\n    OrderedSet: OrderedSet,\n\n    Record: Record,\n    Range: Range,\n    Repeat: Repeat,\n\n    is: is,\n    fromJS: fromJS\n\n  };\n\n  return Immutable;\n\n}));"],"mappings":"AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,WACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA","sourceRoot":""}\n//# sourceURL=webpack-internal:///./node_modules/immutable/dist/immutable.js\n");
/***/ })
}]);
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
# credit goes to @snapdragon and @devpatel_73 for making it work on this userbot.
#
# Original author of the UniBorg module 'lydia' @Zero_cool7870 (Jaskaran)
#
"""
Userbot module to use an AI To respond to people
"""
import asyncio
from coffeehouse.lydia import LydiaAI
from coffeehouse.api import API
from userbot import LYDIA_API_KEY
from userbot import CMD_HELP
from userbot.events import register
# Non-SQL Mode
ACC_LYDIA = {}
SESSION_ID = {}
if LYDIA_API_KEY:
lydiaAI = LydiaAI(API(LYDIA_API_KEY))
@register(outgoing=True, pattern="^.repcf$")
async def repcf(event):
if event.fwd_from:
return
await event.edit("Processing...")
try:
session = lydiaAI.create_session()
reply = await event.get_reply_message()
msg = reply.text
text_rep = session.think_thought(msg)
await event.edit("**Lydia says**: {0}".format(text_rep))
except Exception as e:
await event.edit(str(e))
@register(outgoing=True, pattern="^.addcf$")
async def addcf(event):
if event.fwd_from:
return
await event.edit("Running on SQL mode for now...")
await asyncio.sleep(4)
await event.edit("Processing...")
reply_msg = await event.get_reply_message()
if reply_msg:
session = lydiaAI.create_session()
ACC_LYDIA.update({str(event.chat_id) + " " + str(reply_msg.from_id): session})
await event.edit("Lydia successfully enabled for user: {} in chat: {}"
.format(str(reply_msg.from_id), str(event.chat_id)))
else:
await event.edit("Reply to a user to activate Lydia AI on them")
@register(outgoing=True, pattern="^.remcf$")
async def remcf(event):
if event.fwd_from:
return
await event.edit("Running on SQL mode for now...")
await asyncio.sleep(4)
await event.edit("Processing...")
reply_msg = await event.get_reply_message()
try:
del ACC_LYDIA[str(event.chat_id) + " " + str(reply_msg.from_id)]
await event.edit("Lydia successfully disabled for user: {} in chat: {}"
.format(str(reply_msg.from_id), str(event.chat_id)))
except KeyError:
await event.edit("This person does not have Lydia activated on him/her.")
@register(incoming=True, disable_edited=True)
async def user(event):
try:
session = ACC_LYDIA[str(event.chat_id) + " " + str(event.from_id)]
msg = event.text
async with event.client.action(event.chat_id, "typing"):
text_rep = session.think_thought(msg)
wait_time = 0
for i in range(len(text_rep)):
wait_time = wait_time + 0.1
await asyncio.sleep(wait_time)
await event.reply(text_rep)
except KeyError:
return
CMD_HELP.update({
"lydia":
">`.addcf <username/reply>`"
"\nUsage: add's lydia auto chat request in the chat."
"\n\n>`.remcf <username/reply>`"
"\nUsage: remove's lydia auto chat request in the chat."
"\n\n>`.repcf <username/reply>`"
"\nUsage: starts lydia repling to perticular person in the chat."
})
|
import sys
input_file = sys.argv[1]
sampler_printout = sys.argv[2]
with open(sampler_printout, 'r') as f:
while True:
line = f.readline()
if 'Obtained total momentum:' in line:
energy = float(line.split('(')[1].split()[0])
break
pi0 = 0
rho0 = 0
eta = 0
with open(input_file, 'r') as f:
while True:
line = f.readline()
if (not line): break
if ('#' in line): continue
if (' 111 ' in line): pi0 += 1
if (' 113 ' in line): rho0 += 1
if (' 221 ' in line): eta += 1
print energy, pi0, rho0, eta
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common layers."""
import copy
import math
import numbers
from typing import Optional, Tuple, Union
import lingvo.compat as tf
from lingvo.core import activations
from lingvo.core import base_layer
from lingvo.core import bn_layers
from lingvo.core import builder_layers
from lingvo.core import computation_cost
from lingvo.core import conv_layers_with_time_padding
from lingvo.core import gshard_utils
from lingvo.core import pruning_utils
from lingvo.core import py_utils
from lingvo.core import quant_utils
from lingvo.core import recurrent
from lingvo.core import schedule
from lingvo.core import summary_utils
from lingvo.core import symbolic
from lingvo.core import tshape
import numpy as np
import sympy
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops
# pylint:enable=g-direct-tensorflow-import
class DeconvLayer(base_layer.BaseLayer):
"""Deconv (transposed conv2d) layer.
DeconvLayer is different from ConvTransposeLayer in that
DeconvLayer does not support padding and biasing. Hence,
it's simpler and more basic than ConvTransposeLayer.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'filter_shape', (0, 0, 0, 0),
'Filter shape. Must be a sequence of length 4. Elements are in'
' the order of height, width, out_channel, in_channel.')
p.Define(
'filter_stride', (0, 0),
'Filter stride to use. Must be a pair of ints. The first int'
' specifies the stride on the height dimension. The second int'
' specifies the stride on the width dimension.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert len(p.filter_shape) == 4
assert len(p.filter_stride) == 2
assert all(x > 0 for x in p.filter_shape)
assert all(x > 0 for x in p.filter_stride)
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
w_pc = py_utils.WeightParams(
shape=p.filter_shape,
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('w', w_pc)
def OutShape(self, in_shape):
"""Compute the output shape given the input shape."""
p = self.params
t_stride = p.filter_stride[0]
f_stride = p.filter_stride[1]
return tf.stack([
in_shape[0], in_shape[1] * t_stride, in_shape[2] * f_stride,
p.filter_shape[2]
])
def _ApplyConv(self, theta, inputs):
p = self.params
w = theta.w
strides = [1, p.filter_stride[0], p.filter_stride[1], 1]
# TODO(miachen): remove casting once tf.nn.conv2d supports tf.float64.
assert inputs.dtype == w.dtype
dtype = inputs.dtype
if dtype != tf.float32:
inputs = tf.cast(inputs, tf.float32)
w = tf.cast(w, tf.float32)
# TODO(zhifengc): Try some better way to do Deconv. Search for
# "resize-convolution".
out = tf.nn.conv2d_transpose(
inputs,
w,
output_shape=self.OutShape(tf.shape(inputs)),
strides=strides,
padding='SAME')
if dtype != tf.float32:
out = tf.cast(out, dtype)
return py_utils.HasShape(out, [-1, -1, -1, p.filter_shape[2]])
def FProp(self, theta, inputs):
"""Apply deconvolution to inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, height,
width, channel].
Returns:
outputs. outputs is expected to have shape [batch, height * height_stride,
width * width_stride, out_channel].
"""
p = self.params
inputs = py_utils.HasShape(inputs, [-1, -1, -1, p.filter_shape[3]])
return self._ApplyConv(theta, inputs)
# A subset of activation functions are supported by TFLite as fused activation
# functions with a preceding matmul or conv. If this is the case, then they
# require special treatment for quantization.
_TFLITE_FUSED_ACTIVATION_NAMES = (
'RELU',
'RELU6',
)
LOG_SCALE_CLAMP_BOUND = 20.0
class IdentityLayer(base_layer.BaseLayer):
"""Identity layer, adds name and propagates its input."""
def FProp(self, theta, inputs, *args):
"""Identity mapping.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The input tensor or the input NestedMap.
*args: Arguments to be ignored.
Returns:
Tensor with the same shape and type of inputs.
"""
p = self.params
with tf.name_scope(p.name):
return tf.nest.map_structure(tf.identity, inputs)
@classmethod
def FPropMeta(cls, p, inputs, *args):
py_utils.CheckShapes((inputs,))
return py_utils.NestedMap(flops=0, out_shapes=(inputs,))
# TODO(yonghui/jonathanasdf): Remove the forwarded links.
_ComputeConvOutputShape = conv_layers_with_time_padding.ComputeConvOutputShape
_ComputeConvOutputPadding = (
conv_layers_with_time_padding.ComputeConvOutputPadding)
BatchNormLayer = bn_layers.BatchNormLayer
BatchNormLayerNoPadding = bn_layers.BatchNormLayerNoPadding
AddingAccumulator = bn_layers.AddingAccumulator
class BaseConv2DLayer(quant_utils.QuantizableLayer):
"""Base class for 2D convolution layers.
Has support for optional batch-normalization, activation and sequence
padding.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'filter_shape', (0, 0, 0, 0),
'Filter shape. Must be a sequence of length 4. Elements are in'
' the order of height (time), width (frequency), in_channel,'
' out_channel. When causal_convolution is True, filter_shape[1]'
' is the actual number of trained weights in the time dimension'
' of the kernel.')
p.Define(
'filter_stride', (0, 0),
'Filter stride to use. Must be a pair of ints. The first int'
' specifies the stride on the time dimension. The second int'
' specifies the stride on the frequency dimension.')
p.Define(
'dilation_rate', (1, 1),
'If > 1, dilation rate for atrous convolution. '
'Must be a pair of ints. '
'The first int specifies the dilation rate on the time dimension. '
'The second int specifies the dilation rate on the frequency '
'dimension. '
'If any value of dilation_rate is > 1, then all values of strides '
'must be 1.')
p.Define(
'activation', 'RELU',
'Activation function to use. Options are RELU, RELU6, SIGMOID, '
'TANH, NONE.')
p.Define('bias', False, 'Whether or not to apply a bias before activation.')
p.Define('batch_norm', True, 'Whether or not to apply batch norm.')
p.Define(
'bn_decay', 0.999,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define(
'bn_fold_weights', None,
'Fold the batch norm parameters into the convolution weights at '
'eval/inference time as per https://arxiv.org/pdf/1712.05877.pdf. '
'Requires that batch_norm be True and is incompatible with some other '
'parameters (conv_last=True).')
p.Define(
'causal_convolution', False,
'If true, conv layer output only depends on time steps in'
' the past.')
p.Define(
'conv_last', False,
'If true, apply the convolution transformation as the last step, '
'i.e., first apply batch normalization on the input, followed '
'by activation, and finally the convolution. '
'Otherwise, apply convolution first, followed by batch '
'normalization and activation. Not compatible with bn_fold_weights '
'or quantization.')
p.Define(
'weight_norm', False,
'If true, apply weight normalization to weights as proposed by'
' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868')
p.Define(
'disable_activation_quantization', False,
'Disables the quantization tracking/clamping for the output '
'activation. This is most often used in conjunction with a concat '
'layer which needs to have a merged set of statistics.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert len(p.filter_shape) == 4
assert len(p.filter_stride) == 2
assert len(p.dilation_rate) == 2
assert all(x > 0 for x in p.filter_stride)
assert all(x > 0 for x in p.dilation_rate)
if any(x > 1 for x in p.dilation_rate):
assert all(x == 1 for x in p.filter_stride)
# Bias is not needed with batch_norm=True.
if p.batch_norm:
assert not p.bias
assert (p.activation == 'NONE' or activations.IsSupported(p.activation))
if p.batch_norm:
# batch normalization dimension is number of input channels
# (filter_shape[2]) if we apply batch_norm on input and convolution
# in the end, number of output channels otherwise.
bn_dim = p.filter_shape[2] if p.conv_last else self.output_channels
bn_params = BatchNormLayer.Params().Set(
dim=bn_dim, decay=p.bn_decay, name=p.name, params_init=p.params_init)
self.CreateChild('bn', bn_params)
if self._is_bn_folded:
assert p.batch_norm, 'bn_fold_weights requires batch_norm = True'
assert not p.conv_last, 'bn_fold_weights requires conv_last = False'
# TODO(yonghui): implement the variational noise logic.
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
w_pc = py_utils.WeightParams(
shape=p.filter_shape,
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('w', w_pc)
if p.bias:
self.CreateVariable(
'b',
py_utils.WeightParams(
shape=[self.output_channels],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars']))
if p.weight_norm:
self.CreateVariable(
'g',
py_utils.WeightParams(
shape=self.filter_output_shape,
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars']))
if not p.disable_activation_quantization:
self.TrackQTensor('activation')
if (p.activation not in _TFLITE_FUSED_ACTIVATION_NAMES and
p.activation != 'NONE'):
self.TrackQTensor('pre_activation')
def _child_variable_scope_override(self):
return {**super()._child_variable_scope_override(), 'bn': []}
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
# Normal convolution filter shape is [..., out_channels].
p = self.params
return p.filter_shape[-1]
@property
def filter_output_shape(self):
"""Final dims of the filter corresponding to the output channels.
Returns:
A one (standard conv) or two (depthwise conv) element shape representing
the final dimensions of the filter weights that are output channel
specific for this layer. This shape is needed for any arithmetic that
needs to convert between a linear list of filter weights and the
arrangement in the actual filter.
"""
# Standard convolution has all output channels in the last dim.
p = self.params
return [p.filter_shape[-1]]
@property
def _is_bn_folded(self):
"""Whether batchnorm folded weights are effectively enabled."""
p = self.params
if not p.batch_norm:
return False
return (p.bn_fold_weights or
(p.bn_fold_weights is None and p.qdomain.default is not None))
def _EvaluateConvKernel(self, inputs, filter_w, strides, dilation_rate,
padding_algorithm, data_format):
"""Evaluates the lower level convolution kernel.
Args:
inputs: As to tf.nn.convolution.
filter_w: As to tf.nn.depthwise_conv2d.
strides: As to tf.nn.convolution.
dilation_rate: As to tf.nn.convolution.
padding_algorithm: As to tf.nn.convolution (padding argument).
data_format: As to tf.nn.convolution.
Returns:
Convolution kernel output.
"""
raise NotImplementedError()
@classmethod
def OutputShape(cls, params, in_shape):
return _ComputeConvOutputShape(in_shape, params.filter_stride[0],
params.filter_stride[1],
params.filter_shape[-1])
def OutShape(self, in_shape):
"""Compute the output shape given the input shape."""
p = self.params
return _ComputeConvOutputShape(in_shape, p.filter_stride[0],
p.filter_stride[1], self.output_channels)
def _GetWeights(self,
theta,
convolution_lambda,
folded_bn_padding,
cast_dtype=None):
"""Gets a dictionary of weights and biases for the convolution.
This is necessary for some operating modes where the weights are fused
with batch normalization differently for training vs eval.
Args:
theta: A `.NestedMap` object containing underlying weights values of this
layer and its children layers.
convolution_lambda: Lambda which takes the convolution weights and runs
the convolution.
folded_bn_padding: Padding to apply to folded batch normalization moment
computation (or None for no padding).
cast_dtype: If not None, cast weights to the given dtype.
Returns:
Tuple of (filter, biases).
"""
p = self.params
# Original weights.
filter_w = theta.w
filter_output_shape = self.filter_output_shape
# TODO(miachen): remove casting once tf.nn.conv2d supports tf.float64.
if cast_dtype:
filter_w = tf.cast(filter_w, tf.float32)
if p.weight_norm:
if len(filter_output_shape) == 1:
# Normalize along the last dim (standard conv).
filter_w = tf.nn.l2_normalize(filter_w, [0, 1, 2]) * tf.reshape(
(theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]])
elif len(filter_output_shape) == 2:
# Normalize along the last two dimensions (depthwise conv).
filter_w = tf.nn.l2_normalize(filter_w, [0, 1]) * tf.reshape(
(theta.g + 1.0), [1, 1] + filter_output_shape)
else:
assert False, 'Unsupported weight norm filter shape'
# Original bias.
if p.bias:
b = theta.b
else:
b = tf.zeros([symbolic.ToStatic(self.output_channels)],
dtype=filter_w.dtype)
# Pass-through if weights are not folded with batch normalization.
if not self._is_bn_folded:
return filter_w, b
# If batch norm is fused with weights, then compute the weights as from
# figure C.8 of https://arxiv.org/pdf/1712.05877.pdf for training and
# figure C.6 for eval.
if self.do_eval:
# Gets current moments without updating.
mean, variance, beta, gamma = self.bn.GetCurrentMoments(theta.bn)
else:
# Updates moments based on a trial run of the convolution.
raw_conv_output = convolution_lambda(filter_w)
mean, variance, beta, gamma = self.bn.ComputeAndUpdateMoments(
theta.bn, raw_conv_output, folded_bn_padding)
# Fold weights and bias. Note that this layer's bias is not used (not
# applicable for batch norm case).
sigma_recip = tf.math.rsqrt(variance + self.bn.epsilon)
scale_correction = gamma * sigma_recip
# Normal conv will have all weights in the last dim
# ([_, _, _, output_channels]), which matches the 1D layout from
# batch norm. Depthwise uses the last two dims so reshape
# ([_, _, in_c, c_multiplier]).
scale_correction = tf.reshape(scale_correction, filter_output_shape)
filter_w = filter_w * scale_correction
b = (beta - (gamma * mean * sigma_recip))
return filter_w, b
def _ApplyConv(self, theta, inputs, folded_bn_padding=None):
p = self.params
strides = [p.filter_stride[0], p.filter_stride[1]]
dtype = inputs.dtype
cast_dtype = None
if dtype != tf.float32:
cast_dtype = tf.float32
inputs = tf.cast(inputs, cast_dtype)
padding_algorithm = 'SAME'
if p.causal_convolution:
# Causal convolution is only applied in time (height) dimension.
# Use VALID padding and shift the inputs to the right to ensure that the
# first output only depends on the first input and so on. The output is
# the same size as the input, as if the convolution used SAME padding.
padding_algorithm = 'VALID'
# The effective spatial filter size for dilated convolutions is
# (kernel - 1) * dilation_rate + 1 as according to
# https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]
# Apply padding in width dimension to mimic SAME padding.
# Using the similar logic as above to produce the same number of output
# as if SAME padding is used.
width_pad_size = (p.filter_shape[1] - 1) * p.dilation_rate[1]
# The amount of padding on the left is tricky. If stride > 1, total
# padding required for SAME padding would be:
# pad = ceil(input_size / stride - 1) * stride + eff_kernel - input_size
# where eff_kernel = (kernel - 1) * dilation_rate + 1
# TensorFlow also pads more on the right / bottom side if total padding
# required is an odd number, so pad_left = pad // 2
# Therefore pad_left could depend on input size, which might be dynamic.
# Here we only handle two special cases where 1) stride = 1, then
# pad_left = (eff_kernel - 1) // 2
# and 2) kernel = 1, then
# pad_left = 0
if p.filter_stride[1] > 1 and p.filter_shape[1] > 1:
raise ValueError('Causal convolution only supports width stride = 1 '
'or filter width = 1.')
width_pad_left = max(0, width_pad_size - 1) // 2
width_pad_right = width_pad_size - width_pad_left
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0],
[width_pad_left, width_pad_right], [0, 0]])
# Lambda for computing the actual convolution.
def ComputeRawConvolution(filter_w):
return self._EvaluateConvKernel(
inputs,
filter_w=filter_w,
strides=strides,
dilation_rate=p.dilation_rate,
data_format='NHWC',
padding_algorithm=padding_algorithm)
filter_w, b = self._GetWeights(
theta, ComputeRawConvolution, folded_bn_padding, cast_dtype=cast_dtype)
# TODO(miachen): remove casting once tf.nn.conv2d supports tf.float64.
assert inputs.dtype == filter_w.dtype
filter_w = self.QWeight(filter_w)
out = ComputeRawConvolution(filter_w)
# Note that we always apply the bias (which may be zero) because some
# normalization mechanisms do implicitly produce a bias.
b = tf.cast(b, tf.float32)
out = tf.nn.bias_add(out, b)
if dtype != tf.float32:
out = tf.cast(out, dtype)
return out
def FProp(self, theta, inputs, paddings=None):
"""Apply convolution to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor. If None, the inputs have no paddings in the
sense of sequence training (e.g., in CNN models). Otherwise, it is
expected to be of shape [batch, time].
Returns:
outputs, out_paddings pair.
"""
p = self.params
if paddings is None:
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match(
tf.shape(inputs), [-1, -1, -1, p.filter_shape[2]])
], inputs)
else:
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),
py_utils.assert_shape_match(
tf.shape(inputs),
tf.concat([tf.shape(paddings), [-1, p.filter_shape[2]]], 0))
], inputs)
# Zeroing out padded inputs.
qpadding = self.QRAct(
tf.expand_dims(tf.expand_dims(paddings, -1), -1),
quant_utils.QDistribution.PADDING)
# Select based padding is required for quantized inference but is
# causing regressions on other platforms. TODO: Remove use_select
# attribute when root-caused/resolved.
inputs = py_utils.ApplyPadding(
qpadding,
inputs,
use_select=p.is_inference and p.qdomain.default is not None)
with tf.name_scope(p.name):
input_shape = tf.shape(inputs)
if paddings is None:
conv_padding = None
else:
# NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1.
# But there's likely no real problems. Trying to set it gives an error:
# pooling with SAME padding is not implemented for dilation_rate > 1.
# NOTE: window=p.filter_stride[0] means output i will be padded if any
# input in the stride between the two conv centers are padded.
conv_padding = _ComputeConvOutputPadding(
paddings, window=p.filter_stride[0], stride=p.filter_stride[0])
if p.conv_last:
out = self._ComputeConvLast(theta, inputs, paddings, conv_padding)
else:
out = self._Compute(theta, inputs, paddings, conv_padding)
# Lastly zeroing out padded states.
if conv_padding is not None:
qpadding = self.QRAct(
tf.expand_dims(tf.expand_dims(conv_padding, -1), -1),
quant_utils.QDistribution.PADDING)
# Select based padding is required for quantized inference but is
# causing regressions on other platforms. TODO: Remove use_select
# attribute when root-caused/resolved.
out = py_utils.ApplyPadding(
qpadding,
out,
use_select=p.is_inference and p.qdomain.default is not None)
out = py_utils.HasShape(
out, symbolic.ToStatic(BaseConv2DLayer.OutShape(self, input_shape)))
return out, conv_padding
def _Compute(self, theta, inputs, paddings, conv_padding):
"""Computes the forward prop (conv, bn, act)."""
p = self.params
bn_padding = conv_padding
if bn_padding is None:
bn_padding_expanded = None
else:
batch_time = tf.shape(bn_padding)
batch_time_any_any = tf.concat([batch_time, [-1, -1]], 0)
bn_padding_expanded = tf.reshape(bn_padding,
tf.concat([batch_time, [1, 1]], 0))
out = self._ApplyConv(theta, inputs, bn_padding_expanded)
if bn_padding is not None:
out = py_utils.with_dependencies([
py_utils.assert_shape_match(batch_time, [-1, -1]),
py_utils.assert_shape_match(tf.shape(out), batch_time_any_any)
], out)
# Only apply batch norm if it was not folded into the weights.
if p.batch_norm and not p.bn_fold_weights:
out = self.bn.FProp(theta.bn, out, bn_padding_expanded)
# Apply activation.
if p.activation != 'NONE':
if p.activation not in _TFLITE_FUSED_ACTIVATION_NAMES:
out = self.QTensor('pre_activation', out)
out = activations.GetFn(p.activation)(out)
if not p.disable_activation_quantization:
out = self.QTensor('activation', out)
return out
def _ComputeConvLast(self, theta, inputs, paddings, conv_padding):
"""Computes the forward prop in conv_last mode (bn, act, conv)."""
p = self.params
out = inputs
out_padding = paddings
if p.batch_norm:
if out_padding is None:
out_padding_expanded = None
else:
batch_time = tf.shape(out_padding)
batch_time_any_any = tf.concat([batch_time, [-1, -1]], 0)
out = py_utils.with_dependencies([
py_utils.assert_shape_match(batch_time, [-1, -1]),
py_utils.assert_shape_match(tf.shape(out), batch_time_any_any)
], out)
out_padding_expanded = tf.reshape(out_padding,
tf.concat([batch_time, [1, 1]], 0))
out = self.bn.FProp(theta.bn, out, out_padding_expanded)
if p.activation != 'NONE':
out = activations.GetFn(p.activation)(out)
out = self._ApplyConv(theta, out)
return out
class Conv2DLayer(BaseConv2DLayer):
"""Convolution layer, with optional batch-normalization and activation."""
def _EvaluateConvKernel(self, inputs, filter_w, strides, dilation_rate,
padding_algorithm, data_format):
p = self.params
return tf.nn.convolution(
inputs,
filter_w,
strides=strides,
dilations=p.dilation_rate,
data_format='NHWC',
padding=padding_algorithm)
class ConvNN2DLayer(BaseConv2DLayer):
"""Convolution layer, based on tf.nn.conv2d instead of tf.nn.convolution.
tf.nn.convolution is using a different implementation on atrous convolutions,
by wrapping the actual convolution with space_to_batch and batch_to_space.
This implementation is not supported in tflite conversion, hence we need
a different layer for using atrous convolutions.
"""
def _EvaluateConvKernel(self, inputs, filter_w, strides, dilation_rate,
padding_algorithm, data_format):
p = self.params
return tf.nn.conv2d(
inputs,
filter_w,
strides=strides,
dilations=p.dilation_rate,
data_format='NHWC',
padding='SAME')
# Alias of Conv2DLayer (for compatibility with historical uses).
ConvLayer = Conv2DLayer
class DepthwiseConv2DLayer(BaseConv2DLayer):
"""Depthwise conv 2D layer.
paper: https://arxiv.org/abs/1610.02357
"""
@classmethod
def Params(cls):
p = super().Params()
# Redefine 'filter_shape' since the semantic of shape elements is different
# from regular Conv2D.
p.Delete('filter_shape')
p.Define(
'filter_shape', (0, 0, 0, 0),
'Filter shape. Must be a sequence of length 4. Elements are in'
' the order of height (time), width (frequency), in_channel,'
' channel_multipliers. ')
return p
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
# Depthwise convolution filter shape is:
# [..., in_channels, channel_multiplier].
return p.filter_shape[-2] * p.filter_shape[-1]
@property
def filter_output_shape(self):
"""Final dims of the filter corresponding to the output channels."""
# Depthwise convolution uses the final two dims for output channels.
p = self.params
_, _, in_c, c_mul = p.filter_shape
return [in_c, c_mul]
def _EvaluateConvKernel(self, inputs, filter_w, strides, dilation_rate,
padding_algorithm, data_format):
p = self.params
return tf.nn.depthwise_conv2d(
inputs,
filter=filter_w,
strides=[1, strides[0], strides[1], 1],
dilations=p.dilation_rate,
data_format='NHWC',
padding=padding_algorithm)
class SeparableConv2DLayer(Conv2DLayer):
"""Separable 2D convolution.
This class aggregates a DepthwiseConv2DLayer that feeds in to the point
wise convolution defined by this layer. Since the point wise convolution
controls the output, this class is defined in terms of that and delegates
to a depthwise sub-layer.
The `filter_shape` parameter is rewritten on initialization from the form:
(h, w, cin, cout)
To:
Depthwise filter: (h, w, cin, p.depth_multiplier)
Pointwise filter (on this instance): (1, 1, cin * p.depth_multiplier, cout)
This way, the layer is configured as if it were a normal 2D convolution
but is internally reconfigured to be separable.
paper: https://arxiv.org/abs/1610.02357
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'depth_multiplier', 1,
'Number of depthwise convolution output channels per input channel. '
'The total number of depthwise convolution output channels will be.'
'equal to in_channel * depth_multiplier.')
p.Define('depthwise_tpl',
DepthwiseConv2DLayer.Params().Set(activation='NONE'),
'Template for the depthwise conv sub-layer.')
return p
def __init__(self, params):
# Rewrite the filter.
params = params.Copy()
h, w, cin, cout = params.filter_shape
params.filter_shape = (1, 1, cin * params.depth_multiplier, cout)
depthwise_filter_shape = (h, w, cin, params.depth_multiplier)
# Dilation rate and stride go to the depthwise layer and reset ours.
depthwise_filter_stride = params.filter_stride
depthwise_dilation_rate = params.dilation_rate
params.filter_stride = (1, 1)
params.dilation_rate = (1, 1)
super().__init__(params)
p = self.params
del params
# Create the depthwise sub-layer.
depthwise_params = p.depthwise_tpl.Copy().Set(
filter_shape=depthwise_filter_shape,
filter_stride=depthwise_filter_stride,
dilation_rate=depthwise_dilation_rate,
causal_convolution=p.causal_convolution,
weight_norm=p.weight_norm,
batch_norm=p.batch_norm,
bn_decay=p.bn_decay,
bn_fold_weights=p.bn_fold_weights)
depthwise_params.qdomain.default = p.qdomain.default
self.CreateChild('depthwise_conv', depthwise_params)
def FProp(self, theta, inputs, paddings=None):
inputs, paddings = self.depthwise_conv.FProp(theta.depthwise_conv, inputs,
paddings)
return super().FProp(theta, inputs, paddings)
def OutShape(self, in_shape):
"""Compute the output shape given the input shape."""
in_shape = self.depthwise_conv.OutShape(in_shape)
return super().OutShape(in_shape)
class ProjectionLayer(quant_utils.QuantizableLayer):
"""Projection layer, with batch normalization and relu activation."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Depth of the input.')
p.Define('output_dim', 0, 'Depth of the output.')
p.Define(
'activation', 'RELU',
'Activation function to use. Options are RELU, RELU6, SIGMOID, '
'TANH, NONE.')
p.Define('batch_norm', None, 'Whether or not to apply batch norm.')
p.Define('has_bias', False,
'Whether or not to introduce the bias params to the layer.')
p.Define('bias_init', 0.0, 'Initial value for the bias')
p.Define(
'affine_last', False,
'If true, apply the affine transformation as the last step, i.e., '
'first apply batch normalization on the input, followed '
'by activation, and finally the affine transformation. '
'Otherwise, apply affine transformation first, followed by batch '
'normalization and activation.')
p.Define(
'weight_norm', False,
'If true, apply weight normalization to weights as proposed by'
' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868')
p.Define(
'bn_fold_weights', None,
'Fold the batch norm parameters into the convolution weights at '
'eval/inference time as per https://arxiv.org/pdf/1712.05877.pdf. '
'Defaults to None which means that it will be disabled by default '
'and enabled when quantized training is enabled. Not compatible with '
'affine_last=True')
p.Define('bn_params',
BatchNormLayer.Params().Set(decay=0.999),
'Default params for batch norm layer.')
p.Define('apply_pruning', False,
'Whether to prune the weights while training')
p.Define(
'pruning_hparams_dict', None, 'Pruning related hyperparameters. A dict '
'with hyperparameter: value pairs. See google-research.model_pruning.')
p.Define(
'use_einsum', True, 'Whether to use tf.einsum for optimizing '
'computations. When this is set to False, this causes an increase in '
'TPU memory usage (b/158336491). When this is set to True, it might '
' cause problems with model quantization for on device inference '
'(b/146421936)')
p.Define(
'use_blocked_matmul', False, 'Whether to use blocked matrix '
'multiplications. This allows for weight updates to be paralellized '
'across the cores for Shampoo optimizer.')
p.Define('block_dim', 1024, 'Dimension of the block')
p.Define('use_block_diagonal_matmul', False, 'If True, use block diagonal '
'matmul.')
p.Define(
'bd_num_blocks', 1, 'Number of blocks for the block diagonal matmul '
'which should divide both input_dim and output_dim')
p.Define('use_bd_mix', False,
'If True, add a linear mixing for block diagonal '
'matmul.')
# Non-default quantization behaviour for weights.
p.qdomain.Define('weight', None, 'Quantization domain for the weights.')
p.Define('xla_num_partitions', None,
'Obsolete. Kept for backwards compatibility.')
p.Define('w_dtype', None, 'Obsolete. Kept for backwards compatibility.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert symbolic.EvalExpr(symbolic.STATIC_VALUES, p.input_dim) > 0
assert symbolic.EvalExpr(symbolic.STATIC_VALUES, p.output_dim) > 0
assert p.activation == 'NONE' or activations.IsSupported(p.activation)
assert p.xla_num_partitions is None
if p.batch_norm is None:
raise RuntimeError(
'ProjectionLayer.batch_norm not set explicitly for %s' % self.path)
if p.batch_norm and p.has_bias:
tf.logging.warning(
'Projection layer enables both batch_norm and has_bias. '
'This is generally redundant/wasteful and may introduce '
'accuracy problems in some inference scenarios.')
if self._is_bn_folded:
assert not p.use_blocked_matmul, (
'bn_fold_weights requires use_blocked_matmul = False')
assert not p.affine_last, (
'Folded batchnorm is not compatible with affine_last')
if p.use_einsum:
assert not p.use_blocked_matmul, (
'use_einsum requires use_blocked_matmul = False')
if p.device_mesh is not None:
assert not p.use_blocked_matmul, (
'Enabling xla_sharding requires use_blocked_matmul = False')
assert p.weight_split_dims_mapping is not None, self.path
assert len(p.weight_split_dims_mapping) == 2
if p.batch_norm:
bn_params = p.bn_params.Copy()
bn_params.name = p.name
bn_params.dim = p.input_dim if p.affine_last else p.output_dim
self.CreateChild('bn', bn_params)
# TODO(yonghui): implement the variational noise logic.
self.CreateAqtWeight(
'w',
shape=[p.input_dim, p.output_dim],
feature_axis=-1,
legacy_aqt_w_name='projection_aqt')
if p.pruning_hparams_dict:
self.compression_op = None
# only apply compression on tall matrices (input_dim > output_dim)
self.apply_compression = pruning_utils.ApplyCompression(p) and (
p.input_dim > p.output_dim)
if p.use_block_diagonal_matmul:
assert p.bd_num_blocks > 0
assert p.input_dim % p.bd_num_blocks == 0
assert p.output_dim % p.bd_num_blocks == 0
def _GetBlockedMatMulInputOutputMultipliers(self):
"""Get number of input and output blocks."""
p = self.params
# Number of input and output blocks.
w_im = p.input_dim // p.block_dim
w_om = p.output_dim // p.block_dim
# Add padding if input_dim / output_dim is not divisible by block_dim.
if p.input_dim % p.block_dim != 0:
w_im += 1
if p.output_dim % p.block_dim != 0:
w_om += 1
return w_im, w_om
def _GetBlockedWeightMatrix(self, w):
p = self.params
# w is 3D Tensor of shape [i * o, block_dim, block_dim] such that
# i * block_dim = num_inputs (modulo padding).
# j * block_dim = num_outputs
#
# To efficiently apply forward prop, we transpose and reshape w into
# shape [i * block_dim, o, block_dim]
w_im, w_om = self._GetBlockedMatMulInputOutputMultipliers()
block_dim = p.block_dim
w_4d = tf.reshape(w, [w_im, w_om, block_dim, block_dim])
# Transpose to [i, block_dim, o, block_dim].
w_4d_t = tf.transpose(w_4d, [0, 2, 1, 3])
w = tf.reshape(w_4d_t, [w_im * block_dim, w_om, block_dim])
# Slice out padding from the weight matrix.
if p.input_dim % p.block_dim != 0:
w = tf.slice(w, [0, 0, 0], [p.input_dim, w_om, block_dim])
return w
def _GetBlockDiagonalInitScale(self, num_blocks, dense_shape, dtype=None):
m, n = dense_shape
if not dtype:
dtype = tf.float32
scale = math.sqrt(6.0 / (m // num_blocks + n // num_blocks))
return scale
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
if p.use_blocked_matmul:
w_im, w_om = self._GetBlockedMatMulInputOutputMultipliers()
w_pc = py_utils.WeightParams(
shape=[w_im * w_om, p.block_dim, p.block_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
elif p.use_block_diagonal_matmul:
w_pc = py_utils.WeightParams(
shape=(p.bd_num_blocks, p.input_dim // p.bd_num_blocks,
p.output_dim // p.bd_num_blocks),
init=py_utils.WeightInit.Xavier(
scale=self._GetBlockDiagonalInitScale(
p.bd_num_blocks, (p.input_dim, p.output_dim), dtype=p.dtype)),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=p.weight_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars'])
mix_kernel_pc = py_utils.WeightParams(
shape=(p.bd_num_blocks, p.bd_num_blocks),
init=py_utils.WeightInit.CustomVarInit(
tf.keras.initializers.Identity()),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
else:
w_pc = py_utils.WeightParams(
shape=[p.input_dim, p.output_dim],
init=p.params_init,
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=p.weight_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars'])
if p.apply_pruning:
mask_w_pc = py_utils.WeightParams(w_pc.shape,
py_utils.WeightInit.Constant(1.0),
p.dtype)
threshold_w_pc = py_utils.WeightParams([],
py_utils.WeightInit.Constant(0.0),
tf.float32)
if p.has_bias:
if p.device_mesh is not None:
bias_split_dims_mapping = [p.weight_split_dims_mapping[1]]
else:
bias_split_dims_mapping = None
b_pc = py_utils.WeightParams(
shape=[p.output_dim],
init=py_utils.WeightInit.Constant(scale=p.bias_init),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=bias_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars'])
if p.weight_norm:
g_pc = py_utils.WeightParams(
shape=[p.output_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
weights_var_name = 'w'
if p.apply_pruning:
mask_var_name = 'mask'
threshold_var_name = 'threshold'
self.CreateVariable(mask_var_name, mask_w_pc, trainable=False)
self.CreateVariable(threshold_var_name, threshold_w_pc, trainable=False)
self.CreateVariable(weights_var_name, w_pc)
pruning_utils.AddToPruningCollections(
getattr(self.vars, weights_var_name), getattr(self.vars,
mask_var_name),
getattr(self.vars, threshold_var_name))
else:
self.CreateVariable(weights_var_name, w_pc)
if pruning_utils.ApplyCompression(p):
pruning_utils.PruningOp.ApplyPruning(p.pruning_hparams_dict, self,
weights_var_name, w_pc, p.dtype,
p.name)
self.compression_op = pruning_utils.PruningOp.GetLastCompressionOp()
if p.use_block_diagonal_matmul and p.use_bd_mix:
self.CreateVariable('mix_kernel', mix_kernel_pc)
if p.has_bias:
self.CreateVariable('b', b_pc)
if p.weight_norm:
self.CreateVariable('g', g_pc)
# Determine quantization needs based on whether fusing activation
# or not.
self._pre_activation_qt_name = None
self._output_qt_name = ('activation'
if p.activation != 'NONE' else 'affine_matmul')
if (p.activation != 'NONE' and
p.activation not in _TFLITE_FUSED_ACTIVATION_NAMES):
# Not a fused activation function.
# Need a qtensor to track the pre-activation tensor. The name is
# compatible with older checkpoints.
self._pre_activation_qt_name = 'affine_matmul'
self.TrackQTensor(self._output_qt_name)
if self._pre_activation_qt_name:
self.TrackQTensor(self._pre_activation_qt_name)
def _child_variable_scope_override(self):
return {**super()._child_variable_scope_override(), 'bn': []}
def AddGlobalVN(self, theta):
theta = super().AddGlobalVN(theta)
if self.params.apply_pruning:
theta.w = self.AddVN(theta.w)
return theta
@classmethod
def NumOutputNodes(cls, p):
return p.output_dim
@property
def output_qt_name(self):
"""Name of QTensor used for the output value.
Useful for grabbing the quantization of the output.
Returns:
String name of output qtensor.
"""
return self._output_qt_name
def FProp(self, theta, inputs, paddings=None):
"""Apply projection to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., input_dim].
paddings: The paddings tensor. Shaped [..., 1], where all but the last
dimension match.
Returns:
Output after applying projection, and optionally batch normalization and
relu non-linearity.
"""
p = self.params
with tf.name_scope(p.name):
inputs, paddings = self._CastToFPropDtype((inputs, paddings))
if paddings is None:
shape = tf.convert_to_tensor(py_utils.GetShape(inputs)[:-1], tf.int32)
paddings = tf.zeros(tf.concat([shape, [1]], axis=0), dtype=inputs.dtype)
w, b = self._GetWeights(theta, inputs, paddings)
if pruning_utils.ApplyCompression(p):
if p.pruning_hparams_dict[
'compression_option'] == 9 and self.apply_compression:
# compression_option 9 corresponds to input compression
# redirect w to point to c
w = theta.c_matrix_tfvar
w = self.QWeight(w)
proj_kwargs = {
'mix_kernel': theta.mix_kernel
} if p.use_block_diagonal_matmul and p.use_bd_mix else {}
if p.affine_last:
# Reversed computation. Does not handle folding.
out = inputs
if p.batch_norm:
out = self.bn.FProp(theta.bn, out, paddings)
if p.activation != 'NONE':
if not p.is_inference:
out = py_utils.CheckNumerics(out)
out = activations.GetFn(p.activation)(out)
out = self._ApplyProjectionKernel(
w, b, out, with_activation=False, **proj_kwargs)
else:
# Normal ordered projection.
if self._is_bn_folded or not p.batch_norm:
# Everything folded together. This is the only variant that supports
# quantization.
out = self._ApplyProjectionKernel(
w, b, inputs, quant=True, **proj_kwargs)
else:
# Projection kernel(no activation fn) -> BN -> Activation fn.
out = self._ApplyProjectionKernel(
w, b, inputs, with_activation=False, **proj_kwargs)
if p.batch_norm:
out = self.bn.FProp(theta.bn, out, paddings)
if p.activation != 'NONE':
if not p.is_inference:
out = py_utils.CheckNumerics(out)
out = activations.GetFn(p.activation)(out)
paddings = self.QRAct(paddings, quant_utils.QDistribution.PADDING)
return py_utils.ApplyPadding(paddings, out)
def FPropFullSequence(self, theta, inputs, paddings):
return self.FProp(theta, inputs, paddings)
@property
def _is_bn_folded(self):
"""Whether batchnorm folded weights are effectively enabled."""
p = self.params
if not p.batch_norm:
return False
return (p.bn_fold_weights or
(p.bn_fold_weights is None and p.qdomain.default is not None))
def _GetWeights(self, theta, inputs, paddings):
"""Gets the weights for the computation.
Weights will always have weight_norm applied and may have batch_norm
folded if enabled.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: Inputs (needed for batchnorm folding).
paddings: Paddings (needed for batchnorm folding).
Returns:
Tuple of (w, b) to use for the forward pass. b may be None if bias is
disabled.
"""
p = self.params
w = theta.w
if p.apply_pruning:
w = tf.multiply(w, theta.mask, 'masked_w')
b = theta.b if p.has_bias else None
# Cast to fprop_dtype.
fprop_dtype = py_utils.FPropDtype(p)
if w.dtype != fprop_dtype:
w = tf.cast(w, fprop_dtype)
if b is not None and b.dtype != fprop_dtype:
b = tf.cast(b, fprop_dtype)
if p.use_blocked_matmul:
w = self._GetBlockedWeightMatrix(w)
if p.weight_norm:
w = tf.nn.l2_normalize(w, 0)
else:
if p.weight_norm:
w = tf.reshape((theta.g + 1.0) * tf.nn.l2_normalize(w, [0]),
py_utils.ToStaticShape([p.input_dim, p.output_dim]))
if not self._is_bn_folded:
return w, b
# If batch norm is fused with weights, then compute the weights as from
# figure C.8 of https://arxiv.org/pdf/1712.05877.pdf for training and
# figure C.6 for eval.
if self.do_eval:
# Gets current moments without updating.
mean, variance, beta, gamma = self.bn.GetCurrentMoments(theta.bn)
else:
# Updates moments based on a trial run of the kernel (without activation
# function).
proj_kwargs = {
'mix_kernel': theta.mix_kernel
} if p.use_block_diagonal_matmul and p.use_bd_mix else {}
raw_output = self._ApplyProjectionKernel(
w, b, inputs, with_activation=False, **proj_kwargs)
mean, variance, beta, gamma = self.bn.ComputeAndUpdateMoments(
theta.bn, raw_output, paddings)
# Fold weights and bias.
sigma_recip = tf.math.rsqrt(variance + self.bn.epsilon)
scale_correction = gamma * sigma_recip
w = w * scale_correction
b = beta - (gamma * mean * sigma_recip)
return w, b
def _ApplyProjectionKernel(self,
w,
b,
inputs,
with_activation=True,
quant=False,
bn=False,
mix_kernel=None):
"""Applies matmul/bias/activation in one step.
Note that it is important that these three ops be computed in this way as
downstream inference engines (esp. for quantized inference) can recognize
and fuse them. For floating point, this is an optimization, but for
quantization, it is required.
Args:
w: Weight matrix.
b: Bias vector (or None).
inputs: FProp inputs.
with_activation: Whether to also compute the activation function.
quant: Whether to apply quantization.
bn: Apply batchnorm.
mix_kernel: (optional) mix_kernel for block diagonal matmul.
Returns:
Output tensor reshaped.
"""
p = self.params
if not p.use_blocked_matmul:
inputs, w = self.ToAqtInputs('w', act=inputs, weight=w, w_feature_axis=-1)
if p.use_einsum:
if self.apply_compression:
out = pruning_utils.PruningOp.GetProjectLastDim(
inputs, w, p.input_dim, p.output_dim, self)
elif p.use_block_diagonal_matmul:
if mix_kernel is not None:
out = py_utils.BlockDiagonalProjectLastDimWithMix(
inputs, w, p.input_dim, p.output_dim, mix_kernel,
p.bd_num_blocks)
else:
out = py_utils.BlockDiagonalProjectLastDim(inputs, w, p.input_dim,
p.output_dim,
p.bd_num_blocks)
else:
out = py_utils.ProjectLastDim(inputs, w, p.input_dim, p.output_dim)
else:
if p.use_block_diagonal_matmul:
if mix_kernel is not None:
out = py_utils.BlockDiagonalMatmulWithMix(
tf.reshape(inputs, py_utils.ToStaticShape([-1, p.input_dim])),
w, mix_kernel, p.bd_num_blocks)
else:
out = py_utils.BlockDiagonalMatmul(
tf.reshape(inputs, py_utils.ToStaticShape([-1, p.input_dim])),
w, p.bd_num_blocks)
else:
out = py_utils.Matmul(
tf.reshape(inputs, py_utils.ToStaticShape([-1, p.input_dim])), w)
out = self.FromAqtMatmul('w', out)
else:
x = tf.reshape(inputs, py_utils.ToStaticShape([-1, p.input_dim]))
# TODO(shivaniagrawal): There are the following dimensions: bn, nmk, the
# the correct thing to do here might be scaling on every m and every k,
# while we are doing every k only.
x, w = self.ToAqtInputs('w', act=x, weight=w, w_feature_axis=-1)
out = tf.einsum('bn,nmk->bmk', x, w)
out = self.FromAqtMatmul('w', out)
# Create an output layer [b, num_outputs].
bsz = py_utils.GetShape(out)[0]
out = tf.reshape(out, [bsz, -1])
if p.output_dim % p.block_dim != 0:
out_shape = [bsz, p.output_dim]
out = tf.slice(out, [0, 0], out_shape)
if b is not None:
out += b # NOTE: Bias on matmul is never quantized.
out = gshard_utils.MeshSplit(out, p.device_mesh,
p.activation_split_dims_mapping)
return self._ApplyActivationFunction(out, inputs, with_activation, quant)
def _ApplyActivationFunction(self,
out,
inputs,
with_activation=True,
quant=False):
"""Applies the activation function in one step.
Args:
out: The result of applying the weight matrix (and bias) to the inputs.
inputs: FProp inputs.
with_activation: Whether to also compute the activation function.
quant: Whether to apply quantization.
Returns:
Output tensor reshaped.
"""
p = self.params
if with_activation and p.activation != 'NONE':
if self._pre_activation_qt_name:
# Track quantization for unfused activation function.
out = self.QTensor(self._pre_activation_qt_name, out)
if not p.is_inference:
out = py_utils.CheckNumerics(out)
out = activations.GetFn(p.activation)(out)
if quant:
out = self.QTensor(self._output_qt_name, out)
if not p.use_einsum:
out = tf.reshape(
out,
tf.concat([
py_utils.GetShape(inputs)[:-1],
py_utils.ToStaticShape([p.output_dim])
],
axis=0))
return out
@classmethod
def FPropMeta(cls, p, inputs, paddings=None):
py_utils.CheckShapes((inputs,))
assert inputs[-1] == p.input_dim
flops = 0
in_dim = inputs[-1]
other_dims = inputs.num_elements() / in_dim
# matmuls.
flops += other_dims * p.input_dim * p.output_dim * 2
# activations.
flops += other_dims * p.output_dim * activations.GetFlops(p.activation)
if p.has_bias:
flops += p.output_dim
out_shape = tshape.Shape(inputs[:-1] + [p.output_dim])
if p.batch_norm:
bn_meta = p.bn_params.cls.FPropMeta(
p.bn_params.Copy().Set(dim=p.output_dim), out_shape)
flops += bn_meta.flops
if p.weight_norm:
# l2 normalize + element-wise multiply.
flops += 2 * p.input_dim + 2 * p.input_dim * p.output_dim + 2
return py_utils.NestedMap(flops=flops, out_shapes=(out_shape,))
class FCLayer(ProjectionLayer):
"""Fully-connected layer (matmul + bias + optional activation)."""
@classmethod
def Params(cls):
p = super().Params()
p.batch_norm = False
p.has_bias = True
return p
class FeedForwardNet(quant_utils.QuantizableLayer):
"""A simple multiple layer feedforward network.
This class represents a stack of fully connected feedforward network. Each
layer in the network can be configured for whether or not to have batch-norm
applied to its output, its activation function, whether or not to apply
dropout to post-activation output.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Depth of the input to the network.')
p.Define('hidden_layer_dims', [], 'Depth of the hidden layer outputs.')
p.Define(
'projection', ProjectionLayer.Params(),
'Projection layer params. A single parameter that will be shared by'
'all layers, or a list of params matching the number of layers.')
p.Define(
'dropout', DropoutLayer.Params(),
'Dropout layer params. Can be a single params or a tuple/list of params'
' having the same length as the number of layers.')
p.Define(
'batch_norm', False,
'Whether or not to apply BN to hidden layer output. '
'This can be a single bool or a tuple/list of bools having the'
' same length as the number of layers.')
p.Define(
'activation', 'RELU',
'The activation function to use. Can be a single string, or a'
' tuple/list of strings having the same length as the number'
' of layers.')
p.Define(
'has_bias', None, 'Whether or not to use bias for projection layers.'
'This can be a None, single bool or a tuple/list of bools having the '
'same length as the number of layers. If None, the has_bias is set to '
'True whenever batch_norm is False for each projection layer.')
p.Define(
'weight_norm', False,
'Whether or not to apply weight normalization to weights. This can be '
'a single bool or a tuple/list of bools having the same length as the '
'number of layers.')
p.Define('skip_connections', None, 'Must be None.')
p.Define(
'bn_fold_weights', None, 'Force folding the batch normalization '
'weights in the projection layer.')
# TODO(rpang): retire weight_split_dims_mapping_list and
# activation_split_dims_mapping_list. Use
# {weight,activation}_split_dims_mapping (defined in BaseLayer) instead.
p.Define('weight_split_dims_mapping_list', None,
'A list of weight_split_dims_mapping for each sub-layer.')
p.Define('activation_split_dims_mapping_list', None,
'A list of activation_split_dims_mapping for each sub-layer.')
# Non-default quantization behaviour for the weights.
p.qdomain.Define('weight', None, 'Quantization domain for the weights.')
# Block Diagonal matmul parameters.
p.Define(
'use_block_diagonal_matmul_pl', [], 'Boolean array to determine '
'whether to use block diagonal matmul in the projection layer.')
p.Define('num_blocks_pl', [], 'Int array for number of blocks for input '
'and output.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert symbolic.ToStatic(p.input_dim) > 0
assert all(symbolic.ToStatic(x) > 0 for x in p.hidden_layer_dims)
assert p.skip_connections is None
batch_norm = p.batch_norm
num_layers = len(p.hidden_layer_dims)
if isinstance(batch_norm, (list, tuple)):
assert len(batch_norm) == num_layers
else:
batch_norm = [batch_norm] * num_layers
weight_norm = p.weight_norm
if isinstance(weight_norm, (list, tuple)):
assert len(weight_norm) == num_layers
else:
weight_norm = [weight_norm] * num_layers
activation = p.activation
if isinstance(activation, str):
activation = [activation] * num_layers
else:
assert len(activation) == num_layers
has_bias = p.has_bias
if isinstance(has_bias, (list, tuple)):
assert len(has_bias) == num_layers
else:
has_bias = [has_bias] * num_layers
# Set has_bias to (not batch_norm) if None.
for i in range(num_layers):
if has_bias[i] is None:
has_bias[i] = (not batch_norm[i])
params_proj_layers = p.projection
if isinstance(params_proj_layers, (list, tuple)):
assert len(params_proj_layers) == num_layers
else:
params_proj_layers = [params_proj_layers] * num_layers
params_dropout_layers = p.dropout
if isinstance(params_dropout_layers, (list, tuple)):
assert len(params_dropout_layers) == num_layers
else:
params_dropout_layers = [params_dropout_layers] * num_layers
if p.device_mesh is not None:
weight_split_dims_mapping_list = p.weight_split_dims_mapping_list
activation_split_dims_mapping_list = p.activation_split_dims_mapping_list
if activation_split_dims_mapping_list is None:
activation_split_dims_mapping_list = [None] * num_layers
else:
weight_split_dims_mapping_list = [None] * num_layers
activation_split_dims_mapping_list = [None] * num_layers
assert len(weight_split_dims_mapping_list) == num_layers
assert len(activation_split_dims_mapping_list) == num_layers
use_block_diagonal_matmul_pl = [False] * num_layers
num_blocks_pl = [1] * num_layers
if p.use_block_diagonal_matmul_pl and any(p.use_block_diagonal_matmul_pl):
use_block_diagonal_matmul_pl = p.use_block_diagonal_matmul_pl
num_blocks_pl = p.num_blocks_pl
# Residual connections work better in the form of:
# y = x + Affine(Activation(BatchNorm(x)))
params_fc_layers = []
in_dim = p.input_dim
for i in range(num_layers):
out_dim = p.hidden_layer_dims[i]
proj_out_dim = out_dim
name = f'{p.name}_{i}'
params_i = params_proj_layers[i].Copy()
if 'dense_tpl' in params_i:
dense_params = params_i.dense_tpl
else:
dense_params = params_i
dense_params.Set(
batch_norm=batch_norm[i],
weight_norm=weight_norm[i],
has_bias=has_bias[i],
bn_fold_weights=p.bn_fold_weights,
device_mesh=p.device_mesh,
weight_split_dims_mapping=weight_split_dims_mapping_list[i],
activation_split_dims_mapping=activation_split_dims_mapping_list[i],
use_block_diagonal_matmul=use_block_diagonal_matmul_pl[i],
bd_num_blocks=num_blocks_pl[i])
params_i.Set(
input_dim=in_dim,
output_dim=proj_out_dim,
activation=activation[i],
name=name)
params_fc_layers.append(params_i)
in_dim = out_dim
if p.qdomain.default is not None:
params_i.qdomain.default = p.qdomain.default.Copy()
if p.qdomain.weight is not None:
params_i.qdomain.weight = p.qdomain.weight.Copy()
self.CreateChildren('fc', params_fc_layers)
self.CreateChildren('dropout', params_dropout_layers)
@property
def output_dim(self):
"""Returns output dimension of the FeedForwardNet."""
return self.params.hidden_layer_dims[-1]
@classmethod
def NumOutputNodes(cls, p):
return p.hidden_layer_dims[-1]
def FPropAllLayers(self, theta, inputs, paddings=None):
"""FProp, returns all layers including the input and output layers."""
p = self.params
num_layers = len(self.fc)
in_dim, layer_in = p.input_dim, inputs
all_layers = [layer_in]
for i in range(num_layers):
layer_in = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(layer_in)[-1]],
[symbolic.ToStatic(in_dim)])
], layer_in)
out_dim = p.hidden_layer_dims[i]
layer_out = self.fc[i].FProp(theta.fc[i], layer_in, paddings)
layer_out = self.dropout[i].FProp(theta.dropout[i], layer_out)
all_layers.append(layer_out)
layer_in = layer_out
in_dim = out_dim
return all_layers
def FProp(self, theta, inputs, paddings=None):
"""Computes the output of the feed-forward network.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., input_dim].
paddings: The paddings tensor. Shaped [..., 1], where all but the last
dimension match.
Returns:
Output after applying all layers. Shaped [..., p.hidden_layer_dims[-1]].
"""
return self.FPropAllLayers(theta, inputs, paddings)[-1]
@classmethod
def FPropMeta(cls, p, inputs, paddings=None):
py_utils.CheckShapes((inputs,))
assert inputs[-1] == p.input_dim
flops = 0
with tf.Graph().as_default(): # throw-away graph.
instance = p.Instantiate()
for fc in instance.fc:
proj_params = fc.params
proj_shape = tshape.Shape(inputs[:-1] + [proj_params.input_dim])
proj_meta = proj_params.cls.FPropMeta(proj_params, proj_shape)
flops += proj_meta.flops
out_shape = tshape.Shape(inputs[:-1] + [p.hidden_layer_dims[-1]])
return py_utils.NestedMap(flops=flops, out_shapes=(out_shape,))
class StackingOverTime(base_layer.BaseLayer):
"""Stacking applied along the time axis.
At each time step of an input sequence, elements are stacked over the
window of ('left_context' + 1 + 'right_context') steps around the current
time step. Zeros will be padded to the left or right of the sequence for
elements around the boundaries. Finally the stacked outputs are emitted
once every 'stride' steps.
E.g. if an input sequence is: [4], [1], [9], [3], [5], [2], [8]
left_context = 1, right_context = 1, stride = 3,
then the output sequence would be: [0, 4, 1], [9, 3, 5], [2, 8, 0]
Note that this layer only performs tensor transformation, so there are no
learnable parameters.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('left_context', 0,
'Number of time steps to stack on the left to the central step.')
p.Define('right_context', 0,
'Number of time steps to stack on the right to the central step.')
p.Define('stride', 1, 'The stride for emitting the stacked output.')
p.Define('pad_with_left_frame', False,
'Whether to use the left frame for padding instead of 0s.')
p.Define('pad_with_right_frame', False,
'Whether to use the right frame for padding instead of 0s.')
p.Define(
'padding_reduce_option', 'reduce_min',
'reduce_max or reduce_min. How to reduce stacked padding from '
'[b, t / stride, stride] to [b, t / stride, 1].')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.left_context >= 0, p.left_context
assert p.right_context >= 0, p.right_context
assert p.stride >= 1
assert p.padding_reduce_option in ('reduce_min', 'reduce_max')
@classmethod
def WindowSize(cls, p):
"""Returns the stacking window size."""
return p.left_context + p.right_context + 1
@property
def window_size(self):
"""Returns the stacking window size.
The output dimension will be window_size * the input dimension.
Returns:
Window size.
"""
p = self.params
return self.WindowSize(p)
def _ApplyStack(self, inputs, pad_value=0.0):
"""The core function to apply the stacking to inputs.
Args:
inputs: [batch, time, depth].
pad_value: the padding value for left/right context.
Returns:
[batch, ceil(time / stride), depth * stacking_window_length] tensor.
"""
p = self.params
if p.left_context == 0 and p.right_context == 0:
out = inputs
else:
inputs_max_len = py_utils.GetShape(inputs, 3)[1]
left_to_pad = p.left_context
right_to_pad = p.right_context
if p.pad_with_left_frame:
left_pad = tf.repeat(inputs[:, :1, :], repeats=p.left_context, axis=1)
inputs = tf.concat([left_pad, inputs], axis=1)
left_to_pad = 0
if p.pad_with_right_frame:
right_pad = tf.repeat(
inputs[:, -1:, :], repeats=p.right_context, axis=1)
inputs = tf.concat([inputs, right_pad], axis=1)
right_to_pad = 0
# Add zero paddings to the left and right of the input sequence.
inputs = tf.pad(
inputs, [[0, 0], [left_to_pad, right_to_pad], [0, 0]],
constant_values=pad_value)
# Make window_size() copies of the padded sequence with the original
# sequence length, where each copy is offset by 1 time step.
pieces = []
for i in range(self.window_size):
pieces.append(inputs[:, i:i + inputs_max_len])
# Apply stacking.
out = tf.concat(pieces, 2)
# Apply striding.
out = out[:, ::p.stride]
return out
def FProp(self, inputs, paddings=None):
"""Apply the stacking to inputs along the time axis.
Args:
inputs: The inputs tensor. It is expected to be of shape [batch, time,
feature].
paddings: The paddings tensor. It is expected to be of shape [batch, time,
1], where all but the last dimension match inputs. Each value is 0 or 1
indicating whether a time step of a sequence is padded in the inputs to
reach the max length in the batch.
Returns:
(outputs, out_paddings) pair.
outputs is of shape [batch, ceil(time / stride), feature * stacking].
out_paddings is of shape [batch, ceil(time / stride), 1]. out_paddings
will be 0 if any of the corresponding input padding is 0.
"""
p = self.params
if paddings is None:
paddings = tf.zeros(
tf.concat([py_utils.GetShape(inputs)[:-1], [1]], 0),
dtype=inputs.dtype)
inputs = py_utils.with_dependencies(
[
# Checks the inputs shape has 3 dimensions.
py_utils.assert_shape_match(tf.shape(inputs), [-1, -1, -1]),
# Checks the paddings shape has 3 dimensions, and the last one is 1.
py_utils.assert_shape_match(tf.shape(paddings), [-1, -1, 1]),
# Checks the first two dimensions of inputs and paddings match.
py_utils.assert_shape_match(
tf.shape(inputs)[:-1],
tf.shape(paddings)[:-1])
],
inputs)
# Trivia case.
if 0 == p.left_context == p.right_context and 1 == p.stride:
return inputs, paddings
with tf.name_scope(p.name):
outputs = self._ApplyStack(inputs)
# Stack the padding values with the same context and stride parameters.
# Then take the minimum padding values within each stacking window, since
# an output time step becomes a padded one only if all of the underlying
# stacked steps are padded ones.
out_paddings = self._ApplyStack(paddings, pad_value=1)
if p.padding_reduce_option == 'reduce_min':
out_paddings = tf.reduce_min(out_paddings, axis=2, keepdims=True)
else:
out_paddings = tf.reduce_max(out_paddings, axis=2, keepdims=True)
return outputs, out_paddings
def Unstack(self, stacked):
"""Inverts stacking over time.
Given 'stacked' outputs from this StackingOverTime layer,
stacked, _ = this_layer.FProp(inputs),
this method attempts to reconstruct the original 'inputs'.
If stride > window_size, the original input cannot be recovered, and a
ValueError is raised.
Otherwise, if right_context + 1 >= stride, this method returns a Tensor that
is identical to 'inputs' but potentially longer due to paddings.
If right_context + 1 < stride, this method returns a Tensor that may be up
to ```stride - right_context - 1``` frames shorter than the original input,
but identical in the frames that are returned. e.g.::
left_context = 2, right_context = 1, stride = 4
input sequence: 1 2 3 4 5 6 7 8
after padding: 0 0 1 2 3 4 5 6 7 8 0
windows:
[0 0 (1) 2] 3 4 5 6 7 8 0
0 0 1 2 [3 4 (5) 6] 7 8 0
stacked:
[[0 0 1 2], [3 4 5 6]]
unstacked:
[1 2 3 4 5 6], which is 4 - 1 - 1 = 2 (stride - right_context - 1)
frames shorter than the original input.
`Unstack()` can be used to project the outputs of downstream layers back to
the shape of the original unstacked inputs. For example::
inputs = ... # [batch, length, input_dim]
# [batch, ceil(length / stride), rnn_dim]
rnn_out = rnn.FProp(stacking.FProp(inputs)[0])
# [batch, length, rnn_dim]
back_projected_rnn_out = py_utils.PadOrTrimTo(
stacking.Unstack(tf.tile(rnn_out, [1, 1, stacking.window_size])),
py_utils.GetShape(inputs))
Note this method does not take or return a separate padding tensor. The
caller is responsible for knowing which of outputs are padding (e.g. based
on the padding of the original FProp inputs).
Args:
stacked: Tensor of shape [batch, time, window_size * feature_dim], assumed
to be the output of `FProp`.
Returns:
The reconstructed input Tensor, with shape
[batch, (frames - 1) * stride + right_context + 1, feature_dim].
Raises:
ValueError: if stride > window_size.
"""
p = self.params
if 0 == p.left_context == p.right_context and 1 == p.stride:
return stacked
if p.stride > self.window_size:
raise ValueError(
"Can't invert StackingOverTime with stride (%d) > window_size (%d)" %
(p.stride, self.window_size))
# Reshape to allow indexing individual frames within each stacked window.
batch_size, stacked_length, _ = py_utils.GetShape(stacked, 3)
stacked = tf.reshape(stacked,
[batch_size, stacked_length, self.window_size, -1])
# Compute the index of the window and frame in 'stacked' where each frame of
# the original input is located, and extract them with tf.gather_nd.
# First compute for all except the last window, since these elements have
# the potential of being looked up from the next window.
input_indices = tf.range(0, (stacked_length - 1) * p.stride)
mod = input_indices % p.stride
in_next_window = tf.cast(tf.greater(mod, p.right_context), tf.int32)
window_index = input_indices // p.stride + in_next_window
frame_index = p.left_context + mod - p.stride * in_next_window
# Now handle the last window explicitly and concatenate onto the existing
# window_index/frame_index tensors.
last_window_length = p.right_context + 1
window_index = tf.concat(
[window_index,
tf.fill([last_window_length], stacked_length - 1)],
axis=0)
frame_index = tf.concat(
[frame_index, p.left_context + tf.range(last_window_length)], axis=0)
# Stack the indices for tf.gather_nd.
window_and_frame_indices = tf.stack([window_index, frame_index], axis=1)
window_and_frame_indices = tf.tile(
tf.expand_dims(window_and_frame_indices, 0), [batch_size, 1, 1])
return tf.gather_nd(stacked, window_and_frame_indices, batch_dims=1)
class PoolingLayer(quant_utils.QuantizableLayer):
"""Pooling layer, by default performs max-pooling.
Quantization notes: Unlike the common pattern, the pooling layer inputs
and output must be quantized to the same range, so it tracks both (vs
just the output). The preceding layer must have its output quantization
disabled.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'window_shape', (0, 0),
'Window shape. Must be a pair of ints. Elements are in'
' the order of height (time), width (frequency).')
p.Define(
'window_stride', (0, 0),
'Window stride to use. Must be a pair of ints. The first int'
' specifies the stride on the time dimension. The second int'
' specifies the stride on the frequency dimension.')
p.Define('pooling_type', 'MAX', 'Pooling type: MAX|AVG')
p.Define(
'padding_algorithm', 'SAME',
'Padding algorithm. See the "returns" section of '
'`tf.nn.convolution` for details. '
'Roughly, VALID = NO_PADDING and SAME (default) = PAD INPUT')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert len(p.window_shape) == 2
assert len(p.window_stride) == 2
assert all([x > 0 for x in p.window_shape])
assert all([x > 0 for x in p.window_stride])
assert p.pooling_type in ['MAX', 'AVG']
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
self.TrackQTensor('output')
@classmethod
def OutputShape(cls, params, in_shape):
p = params
return _ComputeConvOutputShape(
in_shape,
p.window_stride[0],
p.window_stride[1],
padding=p.padding_algorithm)
def OutShape(self, in_shape):
"""Compute the output shape given the input shape."""
return self.OutputShape(self.params, in_shape)
def FProp(
self,
theta: py_utils.NestedMap,
inputs: tf.Tensor,
paddings: Optional[tf.Tensor] = None,
) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]:
"""Apply pooling to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor. It is expected to be of shape [batch,
time]. Defaults to None, which means there no paddings.
Returns:
An (output, paddings) tensor tuple if paddings is not None, else just
output tensor.
"""
p = self.params
stride = p.window_stride
window = p.window_shape
if paddings is not None:
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),
py_utils.assert_shape_match(tf.shape(inputs)[:2], tf.shape(paddings))
], inputs)
with tf.name_scope(p.name):
if paddings is not None:
out_padding = _ComputeConvOutputPadding(paddings, window[0], stride[0],
p.padding_algorithm)
if p.pooling_type == 'MAX':
# Fill dtype.min in padded positions.
inputs = py_utils.ApplyPadding(paddings[..., tf.newaxis, tf.newaxis],
inputs, inputs.dtype.min)
else:
out_padding = None
inputs = self.QTensor('output', inputs)
out = tf.nn.pool(
inputs,
window,
p.pooling_type,
strides=stride,
padding=p.padding_algorithm,
data_format='NHWC',
)
if paddings is not None and p.pooling_type == 'AVG':
# Count the fraction of non-padding elements inside each pooling window.
in_mask = 1.0 - paddings
non_padding_ratio = tf.nn.pool(
in_mask[:, :, tf.newaxis],
window_shape=(p.window_shape[0],),
pooling_type='AVG',
strides=(p.window_stride[0],),
padding=p.padding_algorithm)
# Divide by non-padding ratios to eliminate the effect of padded values.
out *= tf.math.reciprocal_no_nan(non_padding_ratio)[..., tf.newaxis]
out = self.QTensor('output', out)
if out_padding is not None:
out *= tf.expand_dims(tf.expand_dims(1.0 - out_padding, -1), -1)
return out, out_padding
return out
class BlurPoolLayer(base_layer.BaseLayer):
"""BlurPool from https://arxiv.org/pdf/1904.11486.pdf.
This layer blurs the input with a fixed filter and performs subsampling
afterwards. Only supports 2x1 or 2x2 spatial reduction.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('blur_filter', 'B5', 'One of [R2, T3, B5]; the fixed blur filter.')
p.Define('subsample_type', '1D', 'Choose between [1D, 2D] subsampling.')
p.Define('input_channels', None, 'Number of input channels.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.blur_filter in ['R2', 'T3', 'B5']
assert p.subsample_type in ['1D', '2D']
assert p.input_channels
filter_dict = {
'B5': np.array([1, 4, 6, 4, 1], dtype=np.float32),
'T3': np.array([1, 2, 1], dtype=np.float32),
'R2': np.array([1, 1], dtype=np.float32)
}
base_filter = filter_dict[p.blur_filter]
if p.subsample_type == '2D':
base_filter = base_filter[:, np.newaxis] * base_filter[np.newaxis, :]
else:
base_filter = base_filter[:, np.newaxis]
base_filter /= base_filter.sum()
self._blur_filter = np.tile(base_filter[..., np.newaxis, np.newaxis],
(1, 1, p.input_channels, 1))
conv_params = DepthwiseConv2DLayer.Params().Set(
activation='NONE',
batch_norm=False,
filter_stride=(1, 1),
filter_shape=self._blur_filter.shape)
self.CreateChild('blur_conv', conv_params)
def FProp(
self,
theta: py_utils.NestedMap,
inputs: tf.Tensor,
paddings: Optional[tf.Tensor] = None,
) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]:
"""Apply blur pooling.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor. It is expected to be of shape [batch,
time]. Defaults to None, which means there no paddings.
Returns:
An (output, paddings) tensor tuple if paddings is not None, else just
output tensor.
"""
p = self.params
if paddings is not None:
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),
py_utils.assert_shape_match(tf.shape(inputs)[:2], tf.shape(paddings))
], inputs)
# blur
theta_cp = copy.copy(theta.blur_conv)
theta_cp.w = tf.convert_to_tensor(self._blur_filter, dtype=p.dtype)
out, out_padding = self.blur_conv.FProp(theta_cp, inputs, paddings)
# b/142399320
# Use stride in blur conv for subsampling once non-square stride gets
# supported.
if p.subsample_type == '2D':
out = out[:, ::2, ::2, :]
else:
out = out[:, ::2, :, :]
if out_padding is not None:
out_padding = _ComputeConvOutputPadding(
out_padding, window=2, stride=2, padding_algorithm='SAME')
out *= (1.0 - out_padding)[..., tf.newaxis, tf.newaxis]
return out, out_padding
return out
class SingleShardEmbeddingLayer(base_layer.BaseLayer):
"""Embedding layer that is not sharded.
This embedding layer is expected to be replicated over all compute devices
(e.g. tpu cores). It is intended to support small to medium embedding tables
(< 50k) only.
This is intended to be a unification of EmbeddingLayer and
SimpleEmbeddingLayer (and cleanup of both). It is targeting the most common
use-case we have in speech/nmt/tts/deeprank. Currently we often first
configure a model using EmbeddingLayer, and then call ChangeToSimpleEmbedding
to switch to SimpleEmbedding where we lose some configuration (e.g.
scale_by_sqrt_dim).
TODO(lingvo): Implement the matmul option which should be more efficient for
small vocabs (e.g. < 1k vocab).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('vocab_size', 0, 'Num tokens in vocab.')
p.Define('embedding_dim', 0, 'Depth of the output.')
p.Define(
'scale_sqrt_depth', False, 'If set True, activations are scaled'
' with sqrt(embedding_dim) in EmbLookup.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.vocab_size > 0
assert p.embedding_dim > 0
assert p.name
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
w_pc = py_utils.WeightParams(
shape=[p.vocab_size, p.embedding_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('emb_var', w_pc)
def AddGlobalVN(self, theta):
theta = super().AddGlobalVN(theta)
theta.emb_var = self.AddVN(theta.emb_var)
return theta
def EmbLookupDefaultTheta(self, ids):
return self.EmbLookup(self.theta, ids)
def EmbLookup(self, theta, ids):
"""Looks up embedding vectors for ids.
Args:
theta: Named tuple with the weight matrix for the embedding.
ids: A rank-N int32 tensor.
Returns:
A rank-(N+1) params.dtype tensor.
embs[indices, :] is the embedding vector for ids[indices].
"""
p = self.params
ids = tf.convert_to_tensor(ids)
ids = py_utils.with_dependencies([
py_utils.assert_between(
ids, 0, p.vocab_size, name='vocab_id_validation')
], ids)
embs = tf.nn.embedding_lookup(theta.emb_var, tf.reshape(ids, [-1]))
if p.scale_sqrt_depth:
embs *= p.embedding_dim**0.5
out_shape = tf.concat([tf.shape(ids), [p.embedding_dim]], 0)
return tf.reshape(embs, out_shape)
def FProp(self, theta, ids):
return self.EmbLookup(theta, ids)
class EmbeddingLayer(base_layer.BaseLayer):
"""Embedding layer."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('vocab_size', 0, 'Depth of the input.')
p.Define('embedding_dim', 0, 'Depth of the output.')
p.Define('max_num_shards', 0, 'Num param shards.')
p.Define('on_ps', True, 'True if to perform the embedding lookup on ps.')
p.Define(
'scale_sqrt_depth', False, 'If set True, activations are scaled'
' with sqrt(embedding_dim) in EmbLookup.')
return p
# Min number of params per shard.
MIN_PARAMS_PER_SHARD = 1024 * 256
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.vocab_size > 0
assert p.embedding_dim > 0
assert p.max_num_shards > 0
assert p.name
total_size = p.vocab_size * p.embedding_dim
self._actual_shards = min(
p.max_num_shards,
int(math.ceil(float(total_size) / self.MIN_PARAMS_PER_SHARD)))
self._ids_per_shard = int(
math.ceil(float(p.vocab_size) / self._actual_shards))
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
w_pc = py_utils.WeightParams(
shape=[self._ids_per_shard, p.embedding_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
# EmbeddingLayer handles vars/theta differently from other layers
# because when embedding shards are placed on ps, it's more
# efficiently to do embedding lookups on ps and sends the result
# back to the worker.
emb_vars = []
emb_shards = []
for i in range(self._actual_shards):
var_name = f'var_{i}'
self.CreateVariable(var_name, w_pc)
emb_vars.append(self.vars[var_name])
v = self._private_theta[var_name]
if not p.on_ps:
v = tf.identity(v)
if p.fprop_dtype is not None and p.fprop_dtype != p.dtype:
v = tf.cast(v, p.fprop_dtype)
emb_shards.append(v)
# Remove from _private_vars / _private_thetas to be added later as wm.
del self._private_vars[var_name]
del self._private_theta[var_name]
self._private_vars['wm'] = emb_vars
self._private_theta['wm'] = emb_shards
def AddGlobalVN(self, theta):
theta = super().AddGlobalVN(theta)
theta.wm = [self.AddVN(wm) for wm in theta.wm]
return theta
def EmbLookupDefaultTheta(self, ids):
return self.EmbLookup(self.theta, ids)
def EmbLookup(self, theta, ids):
"""Looks up embedding vectors for ids.
Args:
theta: Named tuple with the weight matrix for the embedding.
ids: A rank-N int32 tensor.
Returns:
A rank-(N+1) params.dtype tensor.
embs[indices, :] is the embedding vector for ids[indices].
"""
p = self.params
ids = tf.convert_to_tensor(ids)
ids = py_utils.with_dependencies([
py_utils.assert_between(
ids, 0, p.vocab_size, name='vocab_id_validation')
], ids)
embs = tf.nn.embedding_lookup(theta.wm, tf.reshape(ids, [-1]))
if p.scale_sqrt_depth:
embs *= p.embedding_dim**0.5
out_shape = tf.concat([tf.shape(ids), [p.embedding_dim]], 0)
return tf.reshape(embs, out_shape)
class SimpleEmbeddingLayer(quant_utils.QuantizableLayer):
"""An embedding layer that is simple to compile (by XLA and Toco).
The params use_matmul and use_gather control how the lookup is performed.
If neither is True, then a loop is used to compute the embedding.
This layer is "simple" in comparison to 'EmbeddingLayer' in that it does
not shard the embeddings.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('vocab_size', 0,
'Depth of the input. I.e., the number of classes.')
p.Define('embedding_dim', 0, 'Depth of the output.')
p.Define(
'use_matmul', False, 'If True, use a matmul to implement '
'the embedding lookup. Depending on vocab_size and #ids, '
'e.g., when vocab_size is small, use_matmul can be more '
'efficient. On the other hand, use_matmul creates a 0/1 '
'sparse matrix and hence may use more memory than the '
'final output.')
p.Define(
'fprop_mode', None, 'Sets the mode used for computing the fprop '
'(different inference engines have different capabilities and this '
'accomodates them). Can be "loop", "matmul" or "gather". If None, '
'defaults to "matmul" if use_matmul or "loop" if false.')
p.Define(
'use_3d_weight_tensor', False, 'If True, and use_matmul is False,'
'in TPU compatibility mode, we reshape the normal 2D weight'
'tensor to [num_rows, embed_dim] to be '
'[num_rows, embed_dim // 128, 128].')
p.Define('apply_pruning', False,
'Whether to prune the weights while training')
p.Define(
'scale_sqrt_depth', False, 'If set True, activations are scaled'
' with sqrt(embedding_dim) in EmbLookup.')
p.Define(
'pruning_hparams_dict', None, 'Pruning related hyperparameters. A dict '
'with hyperparameter: value pairs. See google-research.model_pruning.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.vocab_size > 0
assert symbolic.ToStatic(p.embedding_dim) > 0
valid_fprop_modes = ['loop', 'matmul', 'gather']
self._fprop_mode = p.fprop_mode
if not self._fprop_mode:
self._fprop_mode = 'matmul' if p.use_matmul else 'gather'
assert self._fprop_mode in valid_fprop_modes, (
'fprop_mode must be one of %r' % valid_fprop_modes)
_, weight_shape = self._GetWeightShape()
self.CreateAqtWeight(
'wm', shape=weight_shape, feature_axis=-1, legacy_aqt_w_name='emb_aqt')
if p.pruning_hparams_dict:
self.compression_op = None
self.apply_compression = pruning_utils.ApplyCompression(p)
def _FpropImpl(self, embs, ids_vec):
"""The embedding lookup implementation."""
p = self.params
emb_shape_suf, weight_shape = self._GetWeightShape()
def EmbBprop(xs, ys, dys):
"""Embedding backprop.
Effectively, it computes:
num = size of xs.ids_vec
dembs = zeros_like(xs.embs)
for i in range(num):
dembs[xs.ids_vec[i], :] += dys[i, :]
return dembs, zeros_like(xs.ids_vec)
Args:
xs: A NestedMap containing:
- embs: The embedding matrix. Unused in the backprop.
- ids_vec: A vector of int32 embedding ids.
ys: Required by py_utils._DefineDefun, not used here.
dys: A matrix of size (size of xs.ids_vec, embedding dims).
Returns:
A NestedMap containing:
- embs: A matrix of the same shape of xs.embs. Gradients for xs.embs.
- ids_vec: Zeros. Same shape as xs.ids_vec.
"""
del ys
num = tf.shape(xs.ids_vec)[0]
dembs = inplace_ops.empty(weight_shape, py_utils.FPropDtype(p), init=True)
if len(weight_shape) != 2:
dys_shape = tf.shape(dys)
dys = tf.reshape(dys, [dys_shape[0]] + emb_shape_suf)
def EmbBpropLoop(i, state):
# row_id = state.ids_vec[i]
row_id = tf.gather(state.ids_vec, i)
# row = state.drets[i]
row = tf.reshape(tf.gather(state.drets, i), [1] + emb_shape_suf)
# state.dembs[row_id] = row
state.dembs = inplace_ops.alias_inplace_add(state.dembs, [row_id], row)
return state
dembs = py_utils.ForLoop(
body=EmbBpropLoop,
start=0,
limit=num,
delta=1,
loop_state=py_utils.NestedMap(
ids_vec=xs.ids_vec, drets=dys, dembs=dembs)).dembs
if p.scale_sqrt_depth:
dembs *= p.embedding_dim**0.5
return py_utils.NestedMap(embs=dembs, ids_vec=tf.zeros_like(ids_vec))
def EmbFprop(xs):
"""Embedding forward prop.
Effectively, it computes:
num = size of xs.ids_vec
rets = zeros([num, embedding dim])
for i in range(num):
rets[i, :] = xs.embs[xs.ids_vec[i], :]
return rets
Args:
xs: A NestedMap containing:
- embs: The embedding matrix.
- ids_vec: A vector of int32 embedding ids.
Returns:
The result of embedding lookups. A matrix of shape
[num ids in xs.ids_vec, embedding dims].
"""
num = tf.shape(xs.ids_vec)[0]
rets = inplace_ops.empty([num] + emb_shape_suf, py_utils.FPropDtype(p))
def EmbFpropLoop(i, state):
# row_id = state.ids_vec[i]
row_id = tf.gather(state.ids_vec, i)
# row = state.embs[row_id]
row = tf.reshape(tf.gather(state.embs, row_id), [1] + emb_shape_suf)
# state.rets[i] = row
state.rets = inplace_ops.alias_inplace_update(state.rets, [i], row)
return state
rets = py_utils.ForLoop(
body=EmbFpropLoop,
start=0,
limit=num,
delta=1,
loop_state=py_utils.NestedMap(
embs=xs.embs, ids_vec=xs.ids_vec, rets=rets)).rets
if len(weight_shape) > 2:
rets = tf.reshape(rets, [num, symbolic.ToStatic(p.embedding_dim)])
return rets
def EmbMatmul(xs):
"""Lookups embedding vectors by doing Matmul with one-hot vector."""
# lhs[i, j] is True iff xs.ids_vec[i] == j.
lhs = tf.equal(
tf.expand_dims(xs.ids_vec, 1),
tf.range(p.vocab_size, dtype=xs.ids_vec.dtype))
return tf.matmul(tf.cast(lhs, xs.embs.dtype), xs.embs)
def EmbGather(xs):
"""Lookups embedding vectors."""
if not self.do_eval:
# If tf.gather is used, the gradient for the wm will be represented as
# IndexedSlices which is sparse. tf.tpu.cross_replica_sum turns
# IndexedSlices into a dense tensor with undefined first dimension.
# This may cause issues on TPU so instead we just wrap this with
# tf.identity which allows tf.tpu.cross_replica_sum to properly compute
# the first dim.
return tf.nn.embedding_lookup(tf.identity(xs.embs), xs.ids_vec)
else:
# The above fix tf.tpu_cross_replica_sum causes issues
# on inference graphs in which the EmbeddingLayer is on the host
# as the tf.identity prevents ResourceGather from being used.
return tf.nn.embedding_lookup(xs.embs, xs.ids_vec)
xs = py_utils.NestedMap(embs=embs, ids_vec=ids_vec)
if self._fprop_mode == 'matmul':
return py_utils.CallDefun(EmbMatmul, xs)
elif self._fprop_mode == 'loop':
return py_utils.CallDefun(
EmbFprop, xs, bak=EmbBprop, bak_as_function=True)
elif self._fprop_mode == 'gather':
return EmbGather(xs)
def _GetWeightShape(self):
p = self.params
if py_utils.tpu_compat() and self._fprop_mode != 'matmul':
if p.use_3d_weight_tensor:
assert symbolic.ToStatic(p.embedding_dim) % 128 == 0
emb_shape_suf = [symbolic.ToStatic(p.embedding_dim) // 128, 128]
else:
emb_shape_suf = [symbolic.ToStatic(p.embedding_dim)]
else:
emb_shape_suf = [symbolic.ToStatic(p.embedding_dim)]
weight_shape = [p.vocab_size] + emb_shape_suf
return emb_shape_suf, weight_shape
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
_, weight_shape = self._GetWeightShape()
# Define weights
pc = py_utils.WeightParams(
shape=weight_shape,
init=p.params_init,
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=p.weight_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars'])
if p.apply_pruning:
mask_pc = py_utils.WeightParams(pc.shape,
py_utils.WeightInit.Constant(1.0),
p.dtype)
threshold_pc = py_utils.WeightParams([],
py_utils.WeightInit.Constant(0.0),
tf.float32)
self.CreateVariable('mask', mask_pc, trainable=False)
self.CreateVariable('threshold', threshold_pc, trainable=False)
self.CreateVariable('wm', pc)
pruning_utils.AddToPruningCollections(self.vars.wm, self.vars.mask,
self.vars.threshold)
else:
self.CreateVariable('wm', pc)
if pruning_utils.ApplyCompression(p):
pruning_utils.PruningOp.ApplyPruning(p.pruning_hparams_dict, self, 'wm',
pc, p.dtype, p.name)
self.compression_op = pruning_utils.PruningOp.GetLastCompressionOp()
def AddGlobalVN(self, theta):
theta = super().AddGlobalVN(theta)
theta.wm = self.AddVN(theta.wm)
return theta
def EmbLookupDefaultTheta(self, ids):
"""Lookups embedding vectors for ids."""
return self.FProp(self.theta, ids)
def EmbLookup(self, theta, ids):
return self.FProp(theta, ids)
def EmbLookupDefaultThetaOnCpu(self, ids):
"""A faster path for CPU inference than the default gather."""
p = self.params
wm = self.theta.wm
if p.apply_pruning:
wm = tf.multiply(wm, self.theta.mask, 'masked_weights')
embs = tf.nn.embedding_lookup(wm, tf.reshape(ids, [-1]))
out_shape = tf.concat([tf.shape(ids), [symbolic.ToStatic(p.embedding_dim)]],
0)
if p.scale_sqrt_depth:
embs *= p.embedding_dim**0.5
return tf.reshape(embs, out_shape)
def _FlatFProp(self, theta, ids):
"""Lookups embedding vectors for ids.
Args:
theta: Named tuple collection of weights for the layer.
ids: A rank-N int32 tensor.
Returns:
A tuple of the flattened inputs to the embedding lookup, and a tensor that
is ready to be reshaped into the final shape in FProp.
"""
if not isinstance(ids, tf.Tensor):
tf.logging.warning('ids should be a tf.Tensor!')
ids = tf.convert_to_tensor(ids, tf.int32)
elif ids.dtype != tf.int32:
tf.logging.warning('ids should be tf.int32, but is %s!', ids.dtype)
ids = tf.cast(ids, tf.int32)
p = self.params
ids = py_utils.with_dependencies([
py_utils.assert_between(
ids, 0, p.vocab_size, name='vocab_id_validation')
], ids)
flat_ids = tf.reshape(ids, [-1])
wm = theta.wm
if p.apply_pruning:
wm = tf.multiply(wm, theta.mask, 'masked_weights')
wm = self.QWeight(wm)
# TODO(shivaniagrawal): Determine if quantizing flat_ids would be useful.
wm = self.ToAqtWeight('wm', wm, feature_axis=-1)
if self.apply_compression:
embs_result = pruning_utils.PruningOp.GetEmbeddingLookupResult(
theta, flat_ids, self._fprop_mode, self)
else:
embs_result = self._FpropImpl(wm, flat_ids)
embs_result = self.FromAqtWeight('wm', embs_result)
if p.scale_sqrt_depth:
embs_result *= p.embedding_dim**0.5
return flat_ids, embs_result
def FProp(self, theta, ids):
"""Lookups embedding vectors for ids.
Args:
theta: Named tuple collection of weights for the layer.
ids: A rank-N int32 tensor.
Returns:
A rank-(N+1) params.dtype tensor.
embs[indices, :] is the embedding vector for ids[indices].
"""
p = self.params
_, embs_result = self._FlatFProp(theta, ids)
out_shape = tf.concat(
[tf.shape(ids), [symbolic.ToStatic(self.params.embedding_dim)]], 0)
embs_result = tf.reshape(embs_result, out_shape)
embs_result = gshard_utils.MeshSplit(embs_result, p.device_mesh,
p.activation_split_dims_mapping)
return embs_result
class EinsumEmbeddingLayer(base_layer.BaseLayer):
"""An embedding layer that uses einsum to avoid reshaping."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('vocab_size', 0,
'Depth of the input. I.e., the number of classes.')
p.Define('embedding_dim', 0, 'Depth of the output.')
p.Define(
'scale_sqrt_depth', False, 'If set True, activations are scaled'
' with sqrt(embedding_dim) in EmbLookup.')
p.params_init = py_utils.WeightInit.Uniform(1.)
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.vocab_size > 0
assert symbolic.ToStatic(p.embedding_dim) > 0
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
# Define weights
pc = py_utils.WeightParams(
shape=[p.vocab_size, symbolic.ToStatic(p.embedding_dim)],
init=p.params_init,
dtype=p.dtype,
tensor_split_dims_mapping=p.weight_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars'])
# Apply VN on theta.wm so that this layer can be used within a recurrent
# loop.
self.CreateVariable('wm', pc)
def AddGlobalVN(self, theta):
theta = super().AddGlobalVN(theta)
theta.wm = self.AddVN(theta.wm)
return theta
def EmbLookup(self, theta, ids):
return self.FProp(theta, ids)
def FProp(self, theta, ids):
"""Lookups embedding vectors for ids.
Args:
theta: Named tuple collection of weights for the layer.
ids: A rank-N int32 tensor.
Returns:
A rank-(N+1) params.dtype tensor.
embs[indices, :] is the embedding vector for ids[indices].
"""
p = self.params
# Emulate tf.nn.embedding_lookup(theta.wm, ids) with tf.einsum.
embs_result = py_utils.ProjectLastDim(
tf.one_hot(ids, p.vocab_size),
theta.wm,
input_dim=p.vocab_size,
output_dim=p.embedding_dim)
if p.scale_sqrt_depth:
embs_result *= p.embedding_dim**0.5
embs_result = gshard_utils.MeshSplit(embs_result, p.device_mesh,
p.activation_split_dims_mapping)
return embs_result
class OneHotEmbeddingLayer(base_layer.BaseLayer):
"""Generates one-hot embeddings with uncertainties."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('vocab_size', 0,
'Depth of the input. I.e., the number of classes.')
p.Define('embedding_dim', 0, 'Depth of the output.')
p.Define('uncertainty', 0.0, 'Uncertainty of the correct ID.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.vocab_size > 1
assert p.embedding_dim == p.vocab_size
def EmbLookupDefaultTheta(self, ids):
"""Lookups embedding vectors for ids."""
return self.FProp(self.theta, ids)
def EmbLookup(self, theta, ids):
return self.FProp(theta, ids)
def FProp(self, theta, ids):
"""Lookups embedding vectors for ids.
Args:
theta: Named tuple collection of weights for the layer.
ids: A rank-N int32 tensor.
Returns:
A rank-(N+1) params.dtype tensor.
embs[indices, :] is the embedding vector for ids[indices].
"""
del theta
p = self.params
ids = py_utils.with_dependencies([
py_utils.assert_between(
ids, 0, p.vocab_size, name='vocab_id_validation')
], ids)
low_confidence = p.uncertainty / tf.cast(p.vocab_size - 1, tf.float32)
high_confidence = 1.0 - p.uncertainty
embs_result = tf.one_hot(
ids,
depth=p.vocab_size,
on_value=high_confidence,
off_value=low_confidence)
if p.fprop_dtype is not None:
embs_result = tf.cast(embs_result, p.fprop_dtype)
return embs_result
class PositionalEmbeddingLayer(base_layer.BaseLayer):
"""Generates sinusoidals with respect to the position in time and dimension.
Implements the positional embedding layer from 'Attention is All You Need',
the Transformer Network.
Code and comments are adapted from tensor2tensor/layers/common_attention.py
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'min_timescale', 1, 'Start of the geometric index.'
'Determines the periodicity of the added signal.')
p.Define(
'max_timescale', 10000, 'End of the geometric index. '
'Determines the frequency of the added signal.')
p.Define('embedding_dim', 0, 'Dimension of the embedding to be generated.')
p.Define(
'trainable_scaling', False,
'Introduces a trainable scaling parameter (a scalar) that'
' multiplies the positional embedding in FProp.')
p.Define('trainable_scaling_init', 1.0,
'Initial value of the scaling parameter.')
p.Define(
'frequency_scaling', False,
'Introduces a trainable frequency scaling parameter (a scalar) that'
' multiplies the frequency of the sinusoids.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.min_timescale
assert p.max_timescale
assert p.embedding_dim % 2 == 0
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
if p.trainable_scaling:
pc = py_utils.WeightParams(
shape=[1],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('scale', pc)
if p.frequency_scaling:
pc = py_utils.WeightParams(
shape=[1],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('freq_scale', pc)
def _PosEmbeddingsFromPositions(self, theta, position):
"""Generates the positional embeddings given the position tensor.
Factors out the common code from FProp and FPropWithPosition. Returns
positional embeddings corresponding to the input position tensor.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
position: Position tensor of dtype float and shape [bs, seq_length] to
generate positional embeddings.
Returns:
a Tensor of shape [bs, seq_length, embedding_dim].
"""
p = self.params
seq_length = py_utils.GetShape(position)[1]
num_timescales = p.embedding_dim // 2
log_timescale_increment = (
math.log(float(p.max_timescale) / float(p.min_timescale)) / tf.maximum(
tf.cast(1.0, py_utils.FPropDtype(p)),
tf.cast(num_timescales, py_utils.FPropDtype(p)) - 1))
inv_timescales = p.min_timescale * tf.exp(
tf.cast(tf.range(num_timescales), py_utils.FPropDtype(p)) *
-log_timescale_increment)
scaled_time = tf.expand_dims(position, 2) * tf.reshape(
inv_timescales, [1, 1, -1])
if p.frequency_scaling:
scaled_time *= (1.0 + theta.freq_scale)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.pad(
signal, [[0, 0], [0, 0], [0, tf.math.floormod(p.embedding_dim, -1)]])
signal = tf.reshape(signal, [-1, seq_length, p.embedding_dim])
if p.trainable_scaling:
signal *= (p.trainable_scaling_init + theta.scale)
return signal
def FProp(self, theta, seq_length):
"""Generates a Tensor of sinusoids with different frequencies.
Each channel (dimension) of the generated positionanl embedding Tensor
corresponds to a sinusoid of different frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can
be expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels (dimension) / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
seq_length: Sequence length of the embeddings to be generated
Returns:
a Tensor of shape [seq_length, embedding_dim].
"""
p = self.params
position = tf.reshape(
tf.cast(tf.range(seq_length), py_utils.FPropDtype(p)), [1, seq_length])
pos_emb = self._PosEmbeddingsFromPositions(theta, position)
return tf.reshape(pos_emb, [seq_length, -1])
def FPropWithPosition(self, theta, position_tensor):
"""Generates a Tensor of sinusoids with different frequencies.
Uses the provided position tensor to generate positional embeddings. Refer
to FProp description for details of sinusoidal positional embeddings.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
position_tensor: Position tensor of shape [bs, seq_length] to generate
positional embeddings.
Returns:
a Tensor of shape [bs, seq_length, embedding_dim].
"""
position = tf.cast(position_tensor, py_utils.FPropDtype(self.params))
return self._PosEmbeddingsFromPositions(theta, position)
class LearnablePositionalEmbeddingLayer(base_layer.BaseLayer):
"""Learnable positional embedding."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('embedding_dim', 0, 'Dimension of the positional embeddings.')
p.Define(
'max_pos', 0, 'Maximum position, positional embeddings are generated'
'for the interval [0, max_pos)')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if not isinstance(p.embedding_dim,
numbers.Integral) or p.embedding_dim <= 0:
raise ValueError('params.embedding_dim must be a positive int,'
'but is %s' % params.radius)
if not isinstance(p.max_pos, numbers.Integral) or p.max_pos <= 0:
raise ValueError('params.max_pos must be a positive int, but is %s' %
p.max_pos)
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
pc = py_utils.WeightParams(
shape=[p.max_pos, p.embedding_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('pos_embs', pc)
def _PosEmbeddingsFromPositions(self, theta, position):
"""Generates the positional embeddings given the position tensor.
Factors out the common code from FProp and FPropWithPosition. Returns
positional embeddings corresponding to the input position tensor.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
position: Position tensor of dtype float and shape [bs, seq_length] to
generate positional embeddings.
Returns:
a Tensor of shape [bs, seq_length, embedding_dim].
"""
pos_embs = tf.gather(theta.pos_embs, tf.cast(position, tf.int32))
return pos_embs
def FProp(self, theta, seq_length):
"""Computes the positional embeddings for seq_length.
Args:
theta: A `.NestedMap` object containing weights of this layer.
seq_length: Sequence length of the embeddings to be generated
Returns:
a Tensor of shape [seq_length, embedding_dim].
"""
p = self.params
position = tf.reshape(
tf.cast(tf.range(seq_length), py_utils.FPropDtype(p)), [1, seq_length])
pos_emb = self._PosEmbeddingsFromPositions(theta, position)
return tf.reshape(pos_emb, [seq_length, -1])
def FPropWithPosition(self, theta, position_tensor):
"""Computes the positional embeddings for position_tensor .
Uses the provided position tensor to generate positional embeddings. Refer
to FProp description for details of sinusoidal positional embeddings.
Args:
theta: A `.NestedMap` object containing weights of this layer.
position_tensor: Position tensor of shape [bs, seq_length].
Returns:
a Tensor of shape [bs, seq_length, embedding_dim].
"""
position = tf.cast(position_tensor, py_utils.FPropDtype(self.params))
return self._PosEmbeddingsFromPositions(theta, position)
class RelativePositionalEmbeddingLayer(base_layer.BaseLayer):
"""Relative positional embedding.
Section 3.2 of https://arxiv.org/pdf/1803.02155.pdf
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'radius', None,
'Radius of the relative window size. Distance are clipped to '
'[-radius, radius].')
p.Define('dim', None, 'Dimension of embedding.')
return p
def __init__(self, params):
super().__init__(params)
params = self.params
if not isinstance(params.radius, numbers.Integral) or params.radius <= 0:
raise ValueError('params.radius must be a positive int, but is %s' %
params.radius)
if not isinstance(params.dim, numbers.Integral) or params.dim <= 0:
raise ValueError('params.dim must be a positive int, but is %s' %
params.radius)
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
pc = py_utils.WeightParams(
shape=[2 * self.params.radius + 1, self.params.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=self.params.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('w', pc)
def FProp(self, theta, relative_distance):
"""Computes relative positional embedding.
Args:
theta: A NestedMap of Tensors of layer weights.
relative_distance: A Tensor.
Returns:
A Tensor of shape relative_distance.shape + [params.dim]
"""
params = self.params
clipped_indices = tf.clip_by_value(relative_distance, -params.radius,
params.radius)
# Right-shift indices to make them all non-negative.
calibrated_indices = clipped_indices + params.radius
return tf.gather_nd(theta.w, tf.expand_dims(calibrated_indices, -1))
class SinusoidalPositionalEmbeddingLayer(base_layer.BaseLayer):
"""Generates sinusoidals with respect to the position in time and dimension.
Implements the a variant of the positional embedding layer from 'Attention is
All You Need', the Transformer Network that doesn't require tuning of the
max_timescale/min_timescale. See this blog post and Ron's colab.
https://kazemnejad.com/blog/transformer_architecture_positional_encoding
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('embedding_dim', 0, 'Dimension of the embedding to be generated.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.embedding_dim % 2 != 0:
raise ValueError('embedding_dim needs to be even.')
def FProp(self, theta, seq_length):
"""Generates a Tensor of sinusoids with different frequencies.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
seq_length: Sequence length of the embeddings to be generated
Returns:
a Tensor of shape [seq_length, embedding_dim].
"""
p = self.params
positions = tf.cast(tf.range(seq_length), py_utils.FPropDtype(p))
num_timescales = p.embedding_dim // 2
freq = tf.range(
1, num_timescales + 1,
dtype=py_utils.FPropDtype(p)) * (2 * math.pi / seq_length)
scaled_pos = tf.matmul(positions[:, tf.newaxis], freq[tf.newaxis, :])
sincos = tf.concat([tf.sin(scaled_pos), tf.cos(scaled_pos)], axis=-1)
return tf.reshape(sincos, [seq_length, -1])
class SoftmaxLayer(quant_utils.QuantizableLayer):
"""Base class for softmax layers."""
@classmethod
def Params(cls):
"""Params for SoftmaxLayer."""
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the input.')
p.Define('num_classes', 0, 'Total number of target classes.')
p.Define(
'logits_abs_max', None, 'If not None, logits are clipped to be within'
' [-logits_abs_max, logits_abs_max]. This can be a scalar'
' or a scalar tensor. Applies back pressure at training time; ignored'
' for inference.')
p.Define(
'logits_soft_max', 0.0,
'If positive, soft cap logits to be within (-x, x) where x is'
' this value.')
p.Define(
'chunk_size', 0, 'If non-zero, computes the per example '
'xent by small chunks along the batch dimension.')
p.qdomain.Define('logits', None, 'Quantization domain for logits.')
p.qdomain.Define('weight', None, 'Quantization domain for the weights.')
return p
@property
def wm_transposed(self):
"""Whether wm (as returned by DenseWeights) is transposed."""
return False
def DenseWeights(self, theta):
"""Returns a NestedMap containing dense weights for 'wm'/'bias'."""
raise NotImplementedError(
f'DenseWeights is not implemented: {self.params.cls}.')
def Logits(self, **unused):
"""Returns the logits computed before the softmax."""
raise NotImplementedError(
f'GetLogits is not implemented: {self.params.cls}.')
def XentLossFromLogits(self, **unused):
"""Returns the Xent loss from pre-computed logits."""
raise NotImplementedError(
f'XentLossFromLogits is not implemented: {self.params.cls}.')
def XentLoss(self, *args, **kwargs):
"""Computes cross entropy."""
return self.FProp(self.theta, *args, **kwargs)
def _FProp2D(self,
theta,
inputs,
class_weights,
class_ids=None,
class_probabilities=None):
"""Specialized FProp for matrix inputs."""
raise NotImplementedError(
f'Subclasses of SoftmaxLayer must implement _FProp2D: {self.params.cls}'
)
def FProp(self,
theta,
inputs,
class_weights,
class_ids=None,
class_probabilities=None):
"""Computes logit, cross entropy etc.
This function can both work with class_ids, or probability distributions
over classes. Exactly one of class_ids or class_probabilities must be
provided.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: a list of a single tensor, or a single tensor with the shape [...,
input_dim].
class_weights: a tensor with shape [...] containing the weights for each
target word.
class_ids: a tensor with shape [..., 1] of int32 dtype containing the
target class labels.
class_probabilities: a tensor with shape [..., num_classes] of float
values indicating class-membership probabilities.
Returns:
A `.NestedMap` containing the following fields
- logits: with shape [..., num_classes]. Unnormalized softmax's logits.
- per_example_argmax: with shape [...]. argmax of i-th example.
- per_example_xent: with shape [...]. Cross entropy between i-th example's
prediction and its label.
- per_example_weight: with shape [...]. class_weights casted to
this layer's dtype.
- total_xent: A scalar. The sum of per_example_weight * per_example_xent.
- total_weight: A scalar. The sum of per_example_weight.
- avg_xent: A scalar. total_loss / total_weight.
"""
p = self.params
# Consolidate list/single value into a list.
if not isinstance(inputs, list):
inputs = [inputs]
# If inputs are matrices already, delegate to _FProp2D.
if inputs[0].shape.ndims == 2:
return self._FProp2D(theta, inputs, class_weights, class_ids,
class_probabilities)
# Remembers the original shape[1:-1].
shape_mid = tf.shape(inputs[0])[1:-1]
# Reshape inputs to matrices, labels to vectors, etc.
inputs = [
tf.reshape(x, py_utils.ToStaticShape([-1, p.input_dim])) for x in inputs
]
class_weights = tf.reshape(class_weights, [-1])
if class_ids is not None:
class_ids = tf.reshape(class_ids, [-1, 1])
if class_probabilities is not None:
class_probabilities = tf.reshape(class_probabilities, [-1, p.num_classes])
# Delegates to _FProp2D.
xent_loss = self._FProp2D(theta, inputs, class_weights, class_ids,
class_probabilities)
# Reshapes xent_loss fields according to the inputs' shape.
xent_loss.logits = tf.reshape(
xent_loss.logits, tf.concat([[-1], shape_mid, [p.num_classes]], axis=0))
per_example_shape = tf.concat([[-1], shape_mid], axis=0)
xent_loss.per_example_argmax = tf.reshape(xent_loss.per_example_argmax,
per_example_shape)
xent_loss.per_example_xent = tf.reshape(xent_loss.per_example_xent,
per_example_shape)
xent_loss.per_example_weight = tf.reshape(xent_loss.per_example_weight,
per_example_shape)
return xent_loss
class SimpleFullSoftmax(SoftmaxLayer):
"""A somewhat simple softmax layer."""
@classmethod
def Params(cls):
"""Params for SimpleFullSoftmax."""
p = super().Params()
p.Define(
'num_sampled', 0, 'Number of samples to use for the sampled soft-max. '
'Default value of 0 means no sampling is done; if set to > 0 then '
'training will use sampled soft-max when both chunk_size == 0 and '
'FProp is called with class_probabilities=None.')
p.Define(
'num_shards', 1,
'Number of shards to split params into. num_shards should'
' divide num_classes.')
p.Define('apply_pruning', False,
'Whether to prune the weights while training')
p.Define(
'pruning_hparams_dict', None,
'Pruning related hyperparameters. A dict with hyperparameter: value'
'pairs. See google-research.model_pruning.')
p.Define(
'use_num_classes_major_weight', False,
'Whether to use num_classes as major dimension for weight params. '
'This shows performance benefit especially when sharing embedding '
'and softmax. By removing the transpose before gather, it allows '
'better XLA fusions and optimizations.')
p.Define(
'use_bias', True, 'Whether or not to use a bias variable.'
'Not using bias is not compatible with sampled softmax '
'(num_sampled > 0).')
p.Define('bias_init', 0, 'Weight initialization constant for bias.')
return p
def __init__(self, params):
"""Constructs a SimpleFullSoftmax layer."""
super().__init__(params)
p = self.params
assert p.name
# We shard params across the class dimension.
assert p.num_classes % p.num_shards == 0
if not p.use_bias:
assert p.num_sampled == 0, 'Sampled softmax requires bias.'
if p.num_shards == 1:
self.CreateAqtWeight(
'weight_0',
shape=[p.input_dim, p.num_classes],
feature_axis=-1,
legacy_aqt_w_name='softmax_aqt')
self.compression_ops = []
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
num_classes_per_shard = p.num_classes // p.num_shards
# When using sampled soft-max we'd rather work with weights of
# shape=[num_classes_per_shard, p.input_dim] to avoid an expensive transpose
# op before computing the sampled_softmax_loss.
self._transpose_weight_params = False
weights_shard_shape = [p.input_dim, num_classes_per_shard]
weight_split_dims_mapping = p.weight_split_dims_mapping
bias_split_dims_mapping = (None if weight_split_dims_mapping is None else
weight_split_dims_mapping[-1:])
if p.num_sampled or p.use_num_classes_major_weight:
self._transpose_weight_params = True
weights_shard_shape = [num_classes_per_shard, p.input_dim]
if weight_split_dims_mapping is not None:
weight_split_dims_mapping = weight_split_dims_mapping[::-1]
pc = py_utils.WeightParams(
shape=weights_shard_shape,
init=p.params_init,
dtype=p.dtype,
tensor_split_dims_mapping=weight_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars'])
if p.apply_pruning:
mask_pc = py_utils.WeightParams(pc.shape,
py_utils.WeightInit.Constant(1.0),
p.dtype)
threshold_pc = py_utils.WeightParams([],
py_utils.WeightInit.Constant(0.0),
tf.float32)
for i in range(p.num_shards):
weights_var_name = f'weight_{i}'
if p.apply_pruning:
mask_var_name = f'mask_{i}'
threshold_var_name = f'threshold_{i}'
self.CreateVariable(mask_var_name, mask_pc, trainable=False)
self.CreateVariable(threshold_var_name, threshold_pc, trainable=False)
self.CreateVariable(weights_var_name, pc)
pruning_utils.AddToPruningCollections(
getattr(self.vars, weights_var_name),
getattr(self.vars, mask_var_name),
getattr(self.vars, threshold_var_name))
else:
self.CreateVariable(weights_var_name, pc)
if pruning_utils.ApplyCompression(p):
# matrix compression path. call ApplyPruning to setup compression op
pruning_utils.PruningOp.ApplyPruning(p.pruning_hparams_dict, self,
weights_var_name, pc, p.dtype,
p.name)
self.compression_ops.append(
pruning_utils.PruningOp.GetLastCompressionOp())
pc = py_utils.WeightParams(
shape=[num_classes_per_shard],
init=py_utils.WeightInit.Constant(scale=p.bias_init),
dtype=p.dtype,
tensor_split_dims_mapping=bias_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars'])
if p.use_bias:
for i in range(p.num_shards):
self.CreateVariable(f'bias_{i}', pc)
self.TrackQTensor('inputs')
self.TrackQTensor('logits', domain='logits')
def AddGlobalVN(self, theta):
theta = super().AddGlobalVN(theta)
for i in range(self.params.num_shards):
theta[f'weight_{i}'] = self.AddVN(theta[f'weight_{i}'])
if self.params.use_bias:
theta[f'bias_{i}'] = self.AddVN(theta[f'bias_{i}'])
return theta
def _GetInputs(self, inputs):
if isinstance(inputs, list):
assert len(inputs) == 1
return inputs[0]
return inputs
@property
def wm_transposed(self):
return self._transpose_weight_params
def DenseWeights(self, theta):
p = self.params
# Add per-step noise if configured so.
concat_axis = 1
if self._transpose_weight_params:
concat_axis = 0
weights = []
for i in range(p.num_shards):
weight = theta[f'weight_{i}']
if p.apply_pruning:
weight = tf.multiply(weight, theta[f'mask_{i}'], 'masked_weights')
weights.append(self.QWeight(weight))
new_theta = py_utils.NestedMap()
if p.use_bias:
biases = [self.QWeight(theta[f'bias_{i}']) for i in range(p.num_shards)]
new_theta.bias = self.AddVN(tf.concat(biases, axis=0), per_step=True)
if p.num_shards == 1:
new_theta.wm = self.AddVN(weights[0], per_step=True)
else:
new_theta.wm = self.AddVN(
tf.concat(weights, axis=concat_axis), per_step=True)
return new_theta
def _LogitsUsingConcatenatedWeightsHelper(self, theta, inputs):
p = self.params
inputs = self.QTensor('inputs', inputs)
wm = self.QWeight(theta.wm)
if p.num_shards == 1:
if self._transpose_weight_params:
# TODO(shivaniagrawal): having two transpose is expensive, we should
# optimize this by allowing feature axis to other that last axis.
# For this particular case num_classes is the first dimension, transpose
# of weight would make it last dimension; we scale on the axis
# corresponding to num_classes.
wm = tf.transpose(wm)
inputs, wm = self.ToAqtInputs('weight_0', inputs, wm, w_feature_axis=-1)
wm = tf.transpose(wm)
else:
inputs, wm = self.ToAqtInputs(
'weight_0', act=inputs, weight=wm, w_feature_axis=-1)
if pruning_utils.ApplyCompression(p):
# compression path. call GetMatmulResult.
# inputs and wm are both rank 2. using GetMatmulResult
logits = pruning_utils.PruningOp.GetMatmulResult(
inputs, wm, self, transpose_b=self._transpose_weight_params)
else:
logits = py_utils.Matmul(
inputs, wm, transpose_b=self._transpose_weight_params)
# We used weight's output_dimension, i.e. p.num_classes as feature axis
# while quantizing weight.
logits = self.FromAqtMatmul('weight_0', logits)
else:
logits = py_utils.Matmul(
inputs, wm, transpose_b=self._transpose_weight_params)
if p.use_bias:
bias = self.QWeight(theta.bias)
# x * w + b
# Note that theta.wm and theta.bias are transformed to concated/clipped
# by caller.
logits = tf.nn.bias_add(logits, bias)
# Clip logits by range.
# Note that this is generally not used in conjunction with quantization and
# shouldn't be needed at inference time as the quantized matmul above will
# take care of clipping naturally based on the data type and qparams.
abs_max = p.logits_abs_max
if abs_max is not None and not p.is_inference:
abs_min = -abs_max # pylint: disable=invalid-unary-operand-type
logits = py_utils.clip_by_value(logits, abs_min, abs_max)
return logits
def _LogitsUsingConcatenatedWeights(self, theta, inputs):
logits = self._LogitsUsingConcatenatedWeightsHelper(theta, inputs)
return self.QTensor('logits', logits)
def SimpleLogits(self, theta, inputs):
"""Returns the simple logits computed before the softmax.
Compared to the Logits function, this one has only weights, no bias for the
linear projection.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: A tensor with the shape [N, input_dim].
Returns:
logits: [N, num_classes]
"""
inputs = self.QTensor('inputs', inputs)
theta = self.DenseWeights(theta)
wm = self.QWeight(theta.wm)
logits = py_utils.Matmul(
inputs, wm, transpose_b=self._transpose_weight_params)
return self.QTensor('logits', logits)
def Logits(self, theta, inputs):
"""Returns the logits computed before the softmax.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: a list of a single tensor, or a single tensor with the shape [N,
input_dim].
Returns:
logits [batch, num_classes]
"""
return self._LogitsUsingConcatenatedWeights(
self.DenseWeights(theta), self._GetInputs(inputs))
def _XentLossByChunk(self, theta, activation, class_ids):
"""Computes per-example xent loss between activation and class_ids."""
p = self.params
# We reshape activation from a matrix to a 3-D tensor (a sequence
# of matrices), where the 2nd dimenion is p.chunk_size. Because
# the batch dimenion may not be multiple of p.chunk_size, we pad
# zeros.
activation = py_utils.HasRank(activation, 2)
batch, input_dim = tf.unstack(tf.shape(activation))
dim0, dim1 = (batch + p.chunk_size - 1) // p.chunk_size, p.chunk_size
pad = dim0 * dim1 - batch
padded_activation = tf.concat(
[activation,
tf.zeros([pad, input_dim], dtype=activation.dtype)],
axis=0)
class_ids = py_utils.HasShape(class_ids, [batch, 1])
padded_class_ids = tf.concat(
[class_ids, tf.zeros([pad, 1], dtype=class_ids.dtype)], axis=0)
if py_utils.use_tpu():
id_dtype = tf.int32
else:
id_dtype = tf.int64
padded_class_ids = tf.cast(padded_class_ids, id_dtype)
# For each chunk, we compute logits of padded_activation[i, :, :],
# and its xent loss with padded_class_ids[i, :].
def ChunkFn(theta, state0, inputs):
del state0
activation, class_ids = inputs.activation, inputs.class_ids
logits = self._LogitsUsingConcatenatedWeights(theta, activation)
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=class_ids)
amax = tf.stop_gradient(py_utils.ArgMax(logits))
return py_utils.NestedMap(xent=xent, amax=amax), py_utils.NestedMap()
acc, _ = recurrent.Recurrent(
theta=self.DenseWeights(theta),
state0=py_utils.NestedMap(
xent=tf.zeros([p.chunk_size], dtype=p.dtype),
amax=tf.zeros([p.chunk_size], dtype=id_dtype)),
inputs=py_utils.NestedMap(
activation=tf.reshape(padded_activation, [dim0, dim1, input_dim]),
class_ids=tf.reshape(padded_class_ids, [dim0, dim1])),
cell_fn=ChunkFn)
# acc.xent has the shape [dim0, dim1]. acc.xent[i, :] are
# per-example xent loss for examples in the i-th chunk. We
# reshape acc.xent to a vector and slice the first 'batch' values.
def GetBatch(x):
return tf.reshape(x, [-1])[:batch]
return GetBatch(acc.xent), GetBatch(acc.amax)
def _FProp2D(self,
theta,
inputs,
class_weights,
class_ids=None,
class_probabilities=None):
"""Computes xent loss and log-prob logit."""
p = self.params
inputs = self._GetInputs(inputs)
logits = self.Logits(theta, inputs)
if class_probabilities is not None:
per_example_xent, per_example_argmax = self.XentLossFromLogits(
theta, logits, class_weights, class_ids, class_probabilities)
elif p.chunk_size:
class_ids = py_utils.HasShape(class_ids, [-1, 1])
per_example_xent, per_example_argmax = self._XentLossByChunk(
theta, inputs, class_ids)
elif p.num_sampled == 0 or self.do_eval:
per_example_xent, per_example_argmax = self.XentLossFromLogits(
theta, logits, class_weights, class_ids, class_probabilities)
else: # Use sampled soft-max in training mode with p.num_sampled set.
assert p.num_sampled > 0
assert p.use_bias
tf.logging.vlog(
0, 'Using sampled_softmax_loss(..., num_sampled=%d, '
'num_classes=%d) in SimpleFullSoftmax::_FProp2D', p.num_sampled,
p.num_classes)
# tf.nn.sampled_softmax_loss will call tf.embedding_lookup. And when
# tf.embedding_lookup is used, the gradient for the weights will be
# represented as IndexedSlices which is sparse. tf.tpu.cross_replica_sum
# turns IndexedSlices into a dense tensor with undefined first dimension.
# This may cause issues on TPU so instead we just wrap this with
# tf.identity which allows tf.tpu.cross_replica_sum to properly compute
# the first dim.
per_example_xent = tf.nn.sampled_softmax_loss(
weights=[
tf.identity(theta[f'weight_{i}']) for i in range(p.num_shards)
],
biases=tf.concat([theta[f'bias_{i}'] for i in range(p.num_shards)],
axis=0),
labels=tf.reshape(class_ids, [-1, 1]),
inputs=self._GetInputs(inputs),
num_sampled=p.num_sampled,
num_classes=p.num_classes,
seed=p.random_seed)
# Avoid computing logits; per_example_argmax is going to be always right.
per_example_argmax = tf.identity(class_ids)
label_weights = tf.reshape(
tf.cast(class_weights, py_utils.FPropDtype(p)), [-1])
total_xent = tf.reduce_sum(per_example_xent * label_weights)
total_weights = tf.reduce_sum(label_weights)
return py_utils.NestedMap(
logits=logits,
log_probs=tf.nn.log_softmax(logits),
per_example_argmax=per_example_argmax,
per_example_xent=per_example_xent,
per_example_weight=label_weights,
total_xent=total_xent,
total_weight=total_weights,
avg_xent=total_xent / tf.maximum(total_weights, 1e-6))
def XentLossFromLogits(self,
theta,
logits,
class_weights,
class_ids=None,
class_probabilities=None):
"""Computes cross-entropy, argmax etc. from logits."""
p = self.params
assert logits is not None
per_example_argmax = py_utils.ArgMax(logits)
if class_probabilities is not None:
per_example_xent = tf.nn.softmax_cross_entropy_with_logits(
labels=class_probabilities, logits=logits)
elif p.num_sampled == 0 or self.do_eval:
assert class_ids is not None
tf.logging.vlog(
0, 'Using sparse_softmax_cross_entropy_with_logits() in '
'SimpleFullSoftmax::_FProp2D logits_shape=%r',
py_utils.GetShape(logits))
per_example_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(class_ids, [-1]), logits=logits)
else:
raise ValueError(
'This set of arguments is not supported for XentLossFromLogits.')
return per_example_xent, per_example_argmax
class FocalFullSoftmax(SimpleFullSoftmax):
"""An extended softmax layer with focal loss.
Focal loss: https://arxiv.org/abs/1708.02002, Eq (3) and (4).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'focal_loss_alpha', None,
'The weighting factor alpha with shape [#classes] for focal loss.')
p.Define('focal_loss_gamma', None,
'The modulating factor scalar gamma for focal loss.')
return p
def XentLossFromLogits(self,
theta,
logits,
class_weights,
class_ids=None,
class_probabilities=None):
"""Computes cross-entropy, argmax etc. from logits."""
p = self.params
assert logits is not None
per_example_argmax = py_utils.ArgMax(logits)
if class_ids is not None:
class_ids = tf.reshape(class_ids, [-1])
per_example_xent = py_utils.SoftmaxCrossEntropyFocalLoss(
logits=logits,
label_ids=class_ids,
label_probs=class_probabilities,
alpha=p.focal_loss_alpha,
gamma=p.focal_loss_gamma)
return per_example_xent, per_example_argmax
class EinsumSoftmax(base_layer.BaseLayer):
"""A simple softmax layer implemented with Einsum to avoid reshape ops."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the input.')
p.Define('num_classes', 0, 'Total number of target classes.')
p.Define(
'focal_loss_alpha', None,
'The weighting factor alpha with shape [#classes] for focal loss.')
p.Define('focal_loss_gamma', None,
'The modulating factor scalar gamma for focal loss.')
p.Define('use_bias', True, 'Whether or not to use a bias variable.')
return p
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
weight_split_dims_mapping = p.weight_split_dims_mapping
bias_split_dims_mapping = (None if weight_split_dims_mapping is None else
weight_split_dims_mapping[-1:])
w_pc = py_utils.WeightParams(
shape=(p.input_dim, p.num_classes),
init=p.params_init,
dtype=p.dtype,
tensor_split_dims_mapping=weight_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('w', w_pc)
if p.use_bias:
self.CreateVariable(
'b',
py_utils.WeightParams(
shape=[p.num_classes],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
tensor_split_dims_mapping=bias_split_dims_mapping,
collections=[self.__class__.__name__ + '_vars']))
@property
def wm_transposed(self):
"""Whether wm (as returned by DenseWeights) is transposed."""
return False
def DenseWeights(self, theta):
ret = py_utils.NestedMap()
ret.wm = theta.w
if self.params.use_bias:
ret.bias = theta.b
return ret
def Logits(self, theta, inputs):
"""Returns the logits computed before the softmax.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: a single tensor with the shape [..., input_dim].
Returns:
logits [..., num_classes].
"""
p = self.params
inputs = self._CastToFPropDtype(inputs)
if (inputs.shape
is not None) and (inputs.shape.rank
is not None) and (inputs.shape.rank < 26):
# A common path.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
logits = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, theta.w)
else:
logits = tf.einsum('...d,dv->...v', inputs, theta.w)
logits = gshard_utils.MeshSplit(
logits,
p.device_mesh,
tensor_split_dims_mapping=p.activation_split_dims_mapping)
if p.use_bias:
logits = tf.nn.bias_add(logits, theta.b)
return logits
def XentLossFromLogits(self,
theta,
logits,
class_weights,
class_ids=None,
class_probabilities=None):
"""Computes cross-entropy, argmax etc. from logits."""
p = self.params
assert logits is not None
per_example_argmax = py_utils.ArgMax(logits)
per_example_xent = py_utils.SoftmaxCrossEntropyFocalLoss(
logits=logits,
label_ids=class_ids,
label_probs=class_probabilities,
alpha=p.focal_loss_alpha,
gamma=p.focal_loss_gamma)
return per_example_xent, per_example_argmax
def FProp(self, theta, inputs, class_weights, *args, **kwargs):
logits = self.Logits(theta, inputs)
per_example_xent, per_example_argmax = self.XentLossFromLogits(
theta, logits, class_weights, *args, **kwargs)
return py_utils.NestedMap(
per_example_xent=per_example_xent,
per_example_argmax=per_example_argmax)
class SharedSoftmaxLayer(base_layer.BaseLayer):
"""Shared softmax layer for decoder embedding/softmax matrix.
This implements weight tying, where the softmax weights are the transpose of
the embedding matrix.
"""
@classmethod
def Params(cls):
"""Params for SharedSoftmaxLayer."""
p = super().Params()
p.Define('softmax', SimpleFullSoftmax.Params(), 'Softmax params.')
p.Define('input_dim', 0,
'Dimension of the input. Overrides softmax.input_dim.')
p.Define('num_classes', 0,
'Total number of target classes. Overrides softmax.num_classes.')
p.Define(
'chunk_size', 0,
'If non-zero, computes the per example xent by small chunks along '
'the batch dimension. Overrides softmax.num_classes.')
# Embedding params.
p.Define(
'scale_sqrt_depth', False, 'If set True, activations are scaled'
' with sqrt(input_dim) in EmbLookup.')
p.Define(
'embedding_dim', 0, 'Set to be compatible with embedding layer, '
' and it is equivalent to input_dim')
p.Define(
'vocab_size', 0, 'Set to be compatible with embedding layer, and '
'it is equivalent to num_classes')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
softmax_params = p.softmax.Copy().Set(name=p.name)
if p.input_dim:
softmax_params.input_dim = p.input_dim
if p.num_classes:
softmax_params.num_classes = p.num_classes
if p.chunk_size:
softmax_params.chunk_size = p.chunk_size
if not p.vocab_size:
p.vocab_size = softmax_params.num_classes
if p.vocab_size != softmax_params.num_classes:
raise ValueError('SharedSoftmaxLayer vocab_size must equal num_classes.')
if p.scale_sqrt_depth and softmax_params.input_dim == 0:
tf.logging.warning(
'Input_dim is not set for scaled embedding! Outputs will be 0s!')
self.CreateChild('softmax', softmax_params)
def _child_variable_scope_override(self):
return {**super()._child_variable_scope_override(), 'softmax': []}
def Logits(self, theta, *args, **kwargs):
return self.softmax.Logits(theta.softmax, *args, **kwargs)
def SimpleLogits(self, theta, *args, **kwargs):
return self.softmax.SimpleLogits(theta.softmax, *args, **kwargs)
def XentLossFromLogits(self, theta, *args, **kwargs):
return self.softmax.XentLossFromLogits(theta.softmax, *args, **kwargs)
def FProp(self, theta, *args, **kwargs):
return self.softmax.FProp(theta.softmax, *args, **kwargs)
def EmbLookup(self, theta, ids):
p = self.params
ids = py_utils.with_dependencies([
py_utils.assert_between(
ids,
0,
p.vocab_size,
summarize=100000,
message='{}:class_id_validation'.format(p.cls))
], ids)
wm = self.softmax.DenseWeights(theta.softmax).wm
if not self.softmax.wm_transposed:
wm = tf.transpose(wm)
embs_result = tf.gather(wm, ids)
if p.scale_sqrt_depth:
assert self.softmax.params.input_dim > 0
embs_result *= self.softmax.params.input_dim**0.5
embs_result = gshard_utils.MeshSplit(embs_result, p.device_mesh,
p.activation_split_dims_mapping)
return embs_result
class SingleShardFullSoftmax(SoftmaxLayer):
"""Full softmax layer."""
def __init__(self, params):
"""Constructs a SingleShardFullSoftmax layer."""
super().__init__(params)
p = self.params
assert p.name
if p.device_mesh is not None:
assert p.weight_split_dims_mapping is not None
assert len(p.weight_split_dims_mapping) == 2
linear_p = builder_layers.LinearLayer.Params().Set(
name='linear',
input_dims=p.input_dim,
output_dims=p.num_classes,
device_mesh=p.device_mesh,
weight_split_dims_mapping=p.weight_split_dims_mapping)
self.CreateChild('linear', linear_p)
if p.device_mesh is not None:
bias_split_dims_mapping = [p.weight_split_dims_mapping[1]]
else:
bias_split_dims_mapping = None
bias_p = builder_layers.BiasLayer.Params().Set(
name='bias',
dims=p.num_classes,
device_mesh=p.device_mesh,
weight_split_dims_mapping=bias_split_dims_mapping)
self.CreateChild('bias', bias_p)
def DenseWeights(self, theta):
return py_utils.NestedMap(wm=theta.linear.w, bias=theta.bias.b)
def Logits(self, theta, inputs):
"""Returns the logits computed before the softmax.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: A single tensor with shape [..., input_dim].
Returns:
logits [..., num_classes]
"""
p = self.params
if isinstance(inputs, (list, tuple)):
assert len(inputs) == 1
inputs = inputs[0]
after_proj = self.linear.FProp(theta.linear, inputs)
logits = self.bias.FProp(theta.bias, after_proj)
# Clip logits by range.
# Note that this is generally not used in conjunction with quantization and
# shouldn't be needed at inference time as the quantized matmul above will
# take care of clipping naturally based on the data type and qparams.
abs_max = p.logits_abs_max
if abs_max is not None and not p.is_inference:
abs_min = -abs_max # pylint: disable=invalid-unary-operand-type
logits = py_utils.clip_by_value(logits, abs_min, abs_max)
if p.logits_soft_max > 0.0:
logits = py_utils.MaybeSoftCapLogits(logits, p.logits_soft_max)
return logits
def XentLossFromLogits(self,
theta,
logits,
class_ids=None,
class_probabilities=None):
"""Computes cross-entropy, argmax etc. from logits."""
assert logits is not None
if class_probabilities is not None:
per_example_xent = tf.nn.softmax_cross_entropy_with_logits(
labels=class_probabilities, logits=logits)
per_example_argmax = tf.stop_gradient(py_utils.ArgMax(logits))
else:
assert class_ids is not None
fpdtype = logits.dtype
if fpdtype == tf.bfloat16:
# This is needed in order to workaround the limitation that
# tf.nn.sparse_softmax_cross_entropy_with_logits is not implemented for
# bf16 on cpu.
logits = tf.cast(logits, tf.float32)
per_example_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=class_ids, logits=logits)
if fpdtype == tf.bfloat16:
per_example_xent = tf.cast(per_example_xent, fpdtype)
per_example_argmax = tf.stop_gradient(py_utils.ArgMax(logits))
return per_example_xent, per_example_argmax
def XentLossByChunk(self, theta, activation, class_ids, class_probabilities):
"""Computes per-example xent loss."""
p = self.params
act_orig_shape = tf.shape(activation)
batch_size = act_orig_shape[0]
chunk_size = p.chunk_size
num_chunks = batch_size // chunk_size
num_chunks = py_utils.with_dependencies([
py_utils.assert_equal(
0,
tf.math.floormod(batch_size, chunk_size),
summarize=2,
message='assert_equal')
], num_chunks)
def ReshapeX(x):
if x is None:
return None
x_shape = tf.shape(x)
new_shape = tf.concat([[num_chunks, chunk_size], x_shape[1:]], 0)
return tf.reshape(x, new_shape)
activation = ReshapeX(activation)
class_ids = ReshapeX(class_ids)
class_probabilities = ReshapeX(class_probabilities)
# For each chunk, we compute logits of activation[i, :, :],
# and its xent loss with class_ids[i, :].
def ChunkFn(theta, state0, inputs):
del state0
activation = inputs.activation
class_ids = inputs.get('class_ids', None)
class_probabilities = inputs.get('class_probabilities', None)
logits = self.Logits(theta, activation)
per_example_xent, per_example_argmax = self.XentLossFromLogits(
theta, logits, class_ids, class_probabilities)
return py_utils.NestedMap(
xent=per_example_xent, amax=per_example_argmax), py_utils.NestedMap()
inputs_nmap = py_utils.NestedMap(activation=activation)
if class_ids is not None:
inputs_nmap.class_ids = class_ids
if class_probabilities is not None:
inputs_nmap.class_probabilities = class_probabilities
xent_state0 = tf.zeros(tf.shape(activation)[1:-1], dtype=p.dtype)
argmax_out_dtype = tf.int32 if py_utils.use_tpu() else tf.int64
amax_state0 = tf.zeros(tf.shape(activation)[1:-1], dtype=argmax_out_dtype)
acc, _ = recurrent.Recurrent(
theta=theta,
state0=py_utils.NestedMap(xent=xent_state0, amax=amax_state0),
inputs=inputs_nmap,
cell_fn=ChunkFn)
# acc.xent has the shape [dim0, dim1]. acc.xent[i, :] are
# per-example xent loss for examples in the i-th chunk. We
# reshape acc.xent to a vector and slice the first 'batch' values.
def GetBatch(x):
return tf.reshape(x, act_orig_shape[:-1])
return GetBatch(acc.xent), GetBatch(acc.amax)
def FProp(self,
theta,
inputs,
class_weights,
class_ids=None,
class_probabilities=None):
"""Computes logits, cross entropy etc.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: a single tensor with shape [..., input_dim].
class_weights: a tensor with shape [..., 1] containing the weights for
each target word.
class_ids: a tensor with shape [..., 1] of int32 dtype containing the
target class labels.
class_probabilities: a tensor with shape [..., num_classes] of float
values indicating class-membership probabilities.
Returns:
A `.NestedMap` containing the following fields
- logits: with shape [..., num_classes]. Unnormalized softmax's logits.
- per_example_argmax: with shape [...]. argmax of i-th example.
- per_example_xent: with shape [...]. Cross entropy between i-th example's
prediction and its label.
- per_example_weight: with shape [...]. class_weights casted to
this layer's dtype.
- total_xent: A scalar. The sum of per_example_weight * per_example_xent.
- total_weight: A scalar. The sum of per_example_weight.
- avg_xent: A scalar. total_loss / total_weight.
"""
p = self.params
if isinstance(inputs, (list, tuple)):
assert len(inputs) == 1
inputs = inputs[0]
inputs_shape = tf.shape(inputs)
ids_shape = tf.concat([inputs_shape[:-1], [1]], 0)
probs_shape = tf.concat([inputs_shape[:-1], [p.num_classes]], 0)
class_weights = py_utils.HasShape(class_weights, ids_shape)
class_weights = tf.squeeze(class_weights, -1)
if class_ids is not None:
class_ids = py_utils.HasShape(class_ids, ids_shape)
class_ids = tf.squeeze(class_ids, -1)
if class_probabilities is not None:
class_probabilities = py_utils.HasShape(class_probabilities, probs_shape)
if (not self.do_eval) and (p.chunk_size > 0):
# Chunking.
logits = None
log_probs = None
per_example_xent, per_example_argmax = self.XentLossByChunk(
theta, inputs, class_ids, class_probabilities)
else:
logits = self.Logits(theta, inputs)
log_probs = tf.nn.log_softmax(logits)
per_example_xent, per_example_argmax = self.XentLossFromLogits(
theta, logits, class_ids, class_probabilities)
label_weights = tf.cast(class_weights, py_utils.FPropDtype(p))
total_xent = tf.reduce_sum(per_example_xent * label_weights)
total_weights = tf.reduce_sum(label_weights)
output_nmap = py_utils.NestedMap(
per_example_argmax=per_example_argmax,
per_example_xent=per_example_xent,
per_example_weight=label_weights,
total_xent=total_xent,
total_weight=total_weights,
avg_xent=total_xent / (total_weights + 1e-6))
if logits is not None:
output_nmap.logits = logits
output_nmap.log_probs = log_probs
return output_nmap
class SingleShardSharedEmbeddingSoftmax(SingleShardFullSoftmax):
"""A shared softmax/embedding layer."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('vocab_size', 0, 'Num tokens in vocab.')
p.Define('embedding_dim', 0, 'Depth of the output.')
p.Define(
'scale_sqrt_depth', False, 'If set True, activations are scaled'
' with sqrt(embedding_dim) in EmbLookup.')
p.Define('emb_with_matmul', False, 'use one-hot vector to perform matmul.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.vocab_size == p.num_classes
assert p.embedding_dim == p.input_dim
def EmbLookupDefaultTheta(self, ids):
return self.EmbLookup(self.theta, ids)
def EmbLookup(self, theta, ids):
"""Looks up embedding vectors for ids.
Args:
theta: Named tuple with the weight matrix for the embedding.
ids: A rank-N int32 tensor.
Returns:
A rank-(N+1) params.dtype tensor.
embs[indices, :] is the embedding vector for ids[indices].
"""
p = self.params
ids = tf.convert_to_tensor(ids)
ids = py_utils.with_dependencies([
py_utils.assert_between(
ids, 0, p.vocab_size, name='vocab_id_validation')
], ids)
if p.emb_with_matmul:
# [b, t, vocab_size]
one_hot = tf.one_hot(ids, p.vocab_size, dtype=theta.linear.w.dtype)
if one_hot.shape.is_fully_defined() and len(one_hot.shape.as_list()) == 3:
embs = tf.einsum('blv,kv->blk', one_hot, theta.linear.w)
else:
embs = tf.einsum('kv,...v->...k', theta.linear.w, one_hot)
else:
# TODO(yonghui): Get rid of this extra copy (tf.transpose).
emb_vars = tf.transpose(theta.linear.w)
embs = tf.nn.embedding_lookup(emb_vars, tf.reshape(ids, [-1]))
if p.scale_sqrt_depth:
embs *= p.embedding_dim**0.5
out_shape = tf.concat([tf.shape(ids), [p.embedding_dim]], 0)
return tf.reshape(embs, out_shape)
class ConvSoftmax(quant_utils.QuantizableLayer):
"""A softmax implementation based on 1x1 convolution.
On TPU this is much more memory efficient than MatMul after reshaping logits
to a matrix.
"""
@classmethod
def Params(cls):
"""Params for SoftmaxLayer."""
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the input.')
p.Define('hidden_dim', 0, 'Dimension of the hidden layer.')
p.Define('num_classes', 0, 'Total number of target classes.')
return p
def _CreateLayerVariables(self):
"""Constructs a SimpleFullSoftmax layer."""
super()._CreateLayerVariables()
p = self.params
if p.hidden_dim:
w_proj_pc = py_utils.WeightParams(
shape=(1, p.input_dim, p.hidden_dim),
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('w_proj', w_proj_pc)
w_pc = py_utils.WeightParams(
shape=(1, p.hidden_dim or p.input_dim, p.num_classes),
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('w', w_pc)
self.CreateVariable(
'b',
py_utils.WeightParams(
shape=[p.num_classes],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars']))
def Logits(self, theta, inputs):
p = self.params
with tf.name_scope(p.name):
if inputs.shape.ndims == 2:
# [batch, time, depth]
x = inputs[:, tf.newaxis, :]
else:
x = py_utils.HasShape(inputs, [-1, -1, -1])
if p.hidden_dim:
x = tf.nn.conv1d(x, theta.w_proj, 1, 'VALID')
logits = tf.nn.bias_add(tf.nn.conv1d(x, theta.w, 1, 'VALID'), theta.b)
if inputs.shape.ndims == 2:
return logits[:, 0, :]
else:
return logits
class DropoutLayer(base_layer.BaseLayer):
"""Apply dropout during trainig."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_prob', 1.0, 'Keep probability.')
# noise_shape is unknown when building layer params.
p.Define(
'noise_shape', None, 'A 1-D `Tensor` of type `int32`, representing'
' the shape for randomly generated keep/drop flags.')
p.Define(
'noise_shape_broadcast_dims', None,
'A list of dimension where the noise shape is broadcasted. For '
'example, noise_shape = [n, h, w, 1] when '
'noise_shape_broadcast_dims=[-1] ')
# We typically want to replace dropout by expectation during eval.
# However, in certain cases E(f(x)) != f(E(x)), and replacing dropout by its
# expectation during eval leads to worse quality.
p.Define('dropout_at_eval', False,
'Whether or not to also perform dropout at eval time.')
return p
def _Dropout(self, theta, inputs, noise_shape):
return tf.nn.dropout(
inputs,
rate=1 - self.params.keep_prob,
noise_shape=noise_shape,
seed=self.params.random_seed)
@classmethod
def NumOutputNodes(cls, p):
# The layer does element-wise processing thus is input-shape agnostic.
return
def FProp(self, theta, inputs):
"""Apply dropout to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor.
Returns:
inputs with dropout applied at training time.
"""
p = self.params
if not self.do_eval or p.dropout_at_eval:
if isinstance(p.keep_prob, numbers.Real) and p.keep_prob == 1.0:
return inputs
if p.noise_shape_broadcast_dims:
noise_shape = p.noise_shape or py_utils.GetShape(inputs)
for dim in p.noise_shape_broadcast_dims:
if dim >= len(noise_shape):
raise ValueError('Invalid broadcasted dim {}'.format(dim))
noise_shape[dim] = 1
else:
noise_shape = p.noise_shape
ret = self._Dropout(theta, inputs, noise_shape)
ret.set_shape(inputs.get_shape())
return ret
else:
return inputs
@classmethod
def FPropMeta(cls, p, inputs, *args):
py_utils.CheckShapes((inputs,))
flops_per_element = 10 # Approximately 10 flops per element.
return py_utils.NestedMap(
flops=inputs.num_elements() * flops_per_element, out_shapes=(inputs,))
class DeterministicDropoutLayer(DropoutLayer):
"""Apply dropout during trainig."""
def _Dropout(self, theta, inputs, noise_shape):
return py_utils.DeterministicDropout(
inputs,
keep_prob=self.params.keep_prob,
seeds=py_utils.GenerateStepSeedPair(self.params),
noise_shape=noise_shape)
class LayerNorm(base_layer.BaseLayer):
"""Layer normalization.
Implements layer normalization:
https://arxiv.org/abs/1607.06450
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Depth of the input to the network.')
p.Define('epsilon', 1e-6, 'Tiny value to guard rsqrt.')
p.Define('use_fused_layernorm', False, 'Whether to use fused layernorm.')
p.Define(
'direct_scale', False, 'Whether to apply scale directly '
'without a +1.0. Var is initialized to 1.0 instead. This makes '
'the layer weight-compatible with the implementation in '
'contrib.layers.')
p.Define('bias', True, 'Whether to use bias.')
p.Define('center', True,
'Whether to subtract the mean when computing variance.')
p.Define('use_defun', True, 'Whether to use CallDefun for normalization.')
p.Define(
'use_batch_norm_backend', False,
'Whether to use the implementation based on '
'tf.nn.batch_normalization.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.input_dim > 0, p.input_dim
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
if p.bias:
pc = py_utils.WeightParams(
shape=[p.input_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'] +
[py_utils.SKIP_LP_REGULARIZATION])
self.CreateVariable('bias', pc)
if p.direct_scale:
scale_pc = py_utils.WeightParams(
shape=[p.input_dim],
init=py_utils.WeightInit.Constant(1.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'] +
[py_utils.SKIP_LP_REGULARIZATION])
else:
scale_pc = py_utils.WeightParams(
shape=[p.input_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'] +
[py_utils.SKIP_LP_REGULARIZATION])
self.CreateVariable('scale', scale_pc)
def _GetScaleAndBias(self, theta):
if self.params.bias:
bias = theta.bias
else:
bias = tf.zeros_like(theta.scale)
return theta.scale, bias
def FProp(self, theta, inputs):
"""Applies normalization over the last dimension (layer).
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: A tensor of shape [..., hidden_dim].
Returns:
tensor of the same shape with inputs
"""
if py_utils.testonly_skip_norm_layers():
return inputs
p = self.params
with tf.name_scope(p.name):
inputs = py_utils.with_dependencies(
[py_utils.assert_equal(tf.shape(inputs)[-1], p.input_dim)], inputs)
inputs = self._CastToFPropDtype(inputs)
cur_scale, cur_bias = self._GetScaleAndBias(theta)
if p.direct_scale:
scale = cur_scale
else:
scale = 1.0 + cur_scale
if p.use_fused_layernorm:
if not p.center:
raise ValueError('use_fused_layernorm does not support center=false.')
counts, means_ss, variance_ss, _, = tf.nn.sufficient_statistics(
inputs, axes=[-1], keepdims=True)
mean, variance = tf.nn.normalize_moments(counts, means_ss, variance_ss,
None)
# Adding a cast here. Sometimes, inputs/mean/variance/p.epsilon are in
# float32 while scale and cur_bias are in bf16.
inputs_norm = tf.cast(
(inputs - mean) * tf.math.rsqrt(variance + p.epsilon),
dtype=scale.dtype)
return inputs_norm * scale + cur_bias
if p.use_batch_norm_backend:
# Calculate the moments on the last axis (layer activations).
mean, variance = tf.nn.moments(inputs, -1, keepdims=True)
# Compute layer normalization using the batch_normalization function.
output = tf.nn.batch_normalization(
inputs,
mean,
variance,
offset=cur_bias,
scale=scale,
variance_epsilon=p.epsilon)
return output
def Normalize(xs):
"""Normalize `xs.x` w/ `xs.scale` and `xs.bias` gain/shift."""
x_shape = py_utils.GetShape(xs.x)
x_reshaped = tf.reshape(xs.x, [-1, x_shape[-1]])
mean = tf.reduce_mean(x_reshaped, axis=[1], keepdims=True)
if p.center:
x_in = x_reshaped - mean
else:
x_in = x_reshaped
if x_in.dtype == tf.bfloat16:
# tf.rsqrt and SquaredDifference are not implemented for bfloat16,
# hence we always cast into tf.float32.
x_cast = tf.cast(x_in, tf.float32)
else:
x_cast = x_in
variance = tf.reduce_mean(tf.square(x_cast), axis=[1], keepdims=True)
x_norm_den_inv = tf.cast(
tf.math.rsqrt(variance + p.epsilon), x_in.dtype)
x_norm = x_in * x_norm_den_inv
x_norm = tf.reshape(x_norm, x_shape)
return x_norm * xs.scale + xs.bias
if p.use_defun:
return py_utils.CallDefun(
Normalize, py_utils.NestedMap(x=inputs, scale=scale, bias=cur_bias))
return Normalize(py_utils.NestedMap(x=inputs, scale=scale, bias=cur_bias))
@classmethod
def NumOutputNodes(cls, p):
return p.input_dim
@classmethod
def FPropMeta(cls, p, inputs):
py_utils.CheckShapes((inputs,))
return py_utils.NestedMap(
flops=inputs.num_elements() * 10, out_shapes=(inputs,))
# TODO(shibow/wangtao) remove this after b/174094694 is done.
class ReshapedLayerNorm(LayerNorm):
"""Customized LayerNorm with model dim D reshaped as Md."""
def FProp(self, theta, inputs):
"""Applies normalization over the last two dimensions.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: A 4D tensor of shape [a, b, dim_reshape_segments, hidden_dim //
dim_reshape_segments]. If a 3D tensor [time, batch, dim], the input
(resp. output) rank is first augmented (resp. reduced) by splitting
the last dimension according to the device_mesh (resp. merging the
last two dimensions).
Returns:
tensor of the same shape with inputs.
"""
p = self.params
with tf.name_scope(p.name):
inputs_shape = py_utils.GetShape(inputs)
do_reshape = len(inputs_shape) == 3
if do_reshape:
inputs = gshard_utils.ReshapeDim(inputs, 2, p.device_mesh.shape[1])
inputs = self._CastToFPropDtype(inputs)
cur_scale, cur_bias = self._GetScaleAndBias(theta)
if p.direct_scale:
scale = cur_scale
else:
scale = 1.0 + cur_scale
axes = list(range(len(inputs.shape) - 2, len(inputs.shape)))
counts, means_ss, variance_ss, _, = tf.nn.sufficient_statistics(
inputs, axes=axes, keepdims=True)
mean, variance = tf.nn.normalize_moments(counts, means_ss, variance_ss,
None)
scale = tf.reshape(scale, tf.shape(inputs)[-2:])
cur_bias = tf.reshape(cur_bias, tf.shape(inputs)[-2:])
# Adding a cast here. Sometimes, inputs/mean/variance/p.epsilon are in
# float32 while scale and cur_bias are in bf16.
inputs_norm = tf.cast(
(inputs - mean) * tf.math.rsqrt(variance + p.epsilon),
dtype=scale.dtype)
output = inputs_norm * scale + cur_bias
if do_reshape:
shape = inputs_shape[:2] + [-1]
output = tf.reshape(output, shape)
return output
class CategoricalLayerNorm(LayerNorm):
"""Categorical layer normalization.
Allow dynamic switch of normalization params based on given class_index.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_classes', 1,
'Number of privatized copies of layer norm params.')
return p
def _BiasVarName(self, i):
return 'bias_' + str(i)
def _ScaleVarName(self, i):
return 'scale_' + str(i)
def _CreateLayerVariables(self):
# Skip LayerNorm's _CreateLayerVariables() as bias and scale variables will
# be created in this function.
super(LayerNorm, self)._CreateLayerVariables() # pylint: disable=bad-super-call
p = self.params
pc = py_utils.WeightParams(
shape=[self.params.input_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'] +
[py_utils.SKIP_LP_REGULARIZATION])
for i in range(p.num_classes):
self.CreateVariable(self._BiasVarName(i), pc)
self.CreateVariable(self._ScaleVarName(i), pc)
def __init__(self, params):
super().__init__(params)
p = self.params
assert isinstance(p.num_classes, int)
assert p.num_classes > 0
self.AddExtraTheta('class_index', tf.constant(0, dtype=tf.int32))
def _GetScaleAndBias(self, theta):
p = self.params
with tf.control_dependencies(
[py_utils.assert_between(theta.class_index, 0, p.num_classes)]):
biases = [theta[self._BiasVarName(i)] for i in range(p.num_classes)]
cur_bias = tf.gather(biases, theta.class_index)
scales = [theta[self._ScaleVarName(i)] for i in range(p.num_classes)]
cur_scale = tf.gather(scales, theta.class_index)
return cur_scale, cur_bias
class ConvSetLayer(quant_utils.QuantizableLayer):
"""Set of Convolutions with different filter sizes in a single layer.
Applies a set of convolutions with different filter shapes to the inputs and
returns the concatenated outputs.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('cnn_tpl',
ConvLayer.Params().Set(filter_stride=(1, 1)),
'Conv layer template for the set of conv layers.')
p.Define(
'filter_shapes', [(0, 0, 0, 0)],
'Must be a list of sequences of 4. Elements are in order of height'
' (time), width (frequency), in_channel, out_channel')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
filter_set = set()
input_shape = None
# Asserting kernel sizes are different and input sizes are the same.
for filter_shape in p.filter_shapes:
key = '%d_%d' % (filter_shape[0], filter_shape[1])
assert key not in filter_set
filter_set.add(key)
if input_shape is None:
input_shape = filter_shape[2]
assert input_shape == filter_shape[2]
params_conv_set = []
for filter_shape in p.filter_shapes:
conv_p = p.cnn_tpl.Copy()
conv_p.name = '%d_%d' % (filter_shape[0], filter_shape[1])
# Important: combined quantization will be done pre-concat versus
# by each layer on its output. Otherwise, inherit quantization params
# from this layer.
if p.qdomain.default is not None:
conv_p.qdomain.default = p.qdomain.default.Copy()
conv_p.disable_activation_quantization = True
conv_p.filter_shape = filter_shape
params_conv_set.append(conv_p)
self.CreateChildren('conv_set', params_conv_set)
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
# The same QTensor is used for all inputs to the concat.
self.TrackQTensor('activation')
def FProp(self, theta, inputs, paddings):
"""Apply all convolution sets to inputs and concatenate outputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor. It is expected to be of shape [batch,
time].
Returns:
A tuple (out, output_paddings).
- out: output tensor. Expected to be of shape [batch, time_mod,
frequency_mod, out_channel_1 + out_channel_2 ...] where time_mod and
frequency_mod depend on the conv layer strides and out_channel_i is
the output channel size of the i-th conv layer in the set.
- output_paddings: Modified paddings generated within `ConvLayer.FProp`.
Expected to be of the shape [batch, time_mod].
"""
p = self.params
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),
py_utils.assert_shape_match(
tf.shape(inputs),
tf.concat([tf.shape(paddings), [-1, p.filter_shapes[0][2]]], 0))
], inputs)
conv_outputs = []
output_paddings = None
# output_padding should be same for all filters for the same stride.
for i, conv_i in enumerate(self.conv_set):
conv_i_output, conv_i_padding = conv_i.FProp(theta.conv_set[i], inputs,
paddings)
if output_paddings is None:
output_paddings = conv_i_padding
conv_outputs.append(conv_i_output)
# Track for quantization.
conv_outputs = [self.QTensor('activation', t) for t in conv_outputs]
out = tf.concat(conv_outputs, -1)
return out, output_paddings
class LocalizedLabelSmoother(base_layer.BaseLayer):
"""Smooths labels given as class ids.
Implements the smoothing from https://arxiv.org/abs/1612.02695. Instead of
1-hot class ids the model is trained to predict a distribution over classes
that includes the correct class label and with a small probability the labels
of tokens that appear nearby in time in the ground truth. This typically acts
as a strong regularizer.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_classes', 0, 'Number of classes')
p.Define(
'offsets', [], 'Offset (over time) for smoothing. At time T the '
'smoothed target is class[T] + sum_i weights[i]*class[T+offset[i]]')
p.Define('weights', [], 'Weight of the smoothing at corresponding offset')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.num_classes > 0
assert len(p.offsets) == len(p.weights)
assert p.name
def FProp(self, theta, target_paddings, target_labels, target_ids):
"""Convert class_ids to 1hot and smooth by neighborhood.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
target_paddings: float32 matrix [bs, seq_len]
target_labels: int32 matrix [bs, seq_len]. This stores the target label
output at each decoder step as generated by the speech input generator
input_batch.tgt.labels
target_ids: int32 matrix [bs, seq_len]. This stores the target_id that is
fed to the decoder, as generated by the speech input generator
input_batch.tgt.ids
Returns:
A tensor [bs, seq_len, num_classes] denoting a smoothed distribution over
num_classes.
"""
del target_ids # Unused.
p = self.params
class_probabilities = tf.one_hot(
target_labels, p.num_classes, dtype=py_utils.FPropDtype(p))
# Start list keeping the scaled class-probabilities at different offsets.
output_distributions = [class_probabilities]
seq_len = tf.shape(class_probabilities)[1]
# If offsets < 0 we force a future output_act to be like a past token.
# If offsets > 0 we force a past output_act to be like a future token.
min_offset = np.min(p.offsets + [0])
max_offset = np.max(p.offsets + [0])
class_probabilities = tf.pad(class_probabilities,
[[0, 0], [-min_offset, max_offset], [0, 0]])
# Shift the weights to the left by one location - we don't make the
# EOS more probable.
class_weights = tf.pad(1.0 - target_paddings[:, 1:],
[[0, 0], [-min_offset, max_offset + 1]])
class_weights = tf.expand_dims(class_weights, 2)
for offset, weight in zip(p.offsets, p.weights):
offset_in_padded = offset - min_offset
output_distributions.append(
class_probabilities[:, offset_in_padded:offset_in_padded + seq_len, :]
* class_weights[:, offset_in_padded:offset_in_padded + seq_len, :] *
weight)
output_distributions = tf.add_n(output_distributions)
output_distributions /= tf.reduce_sum(
output_distributions, axis=-1, keepdims=True)
return output_distributions
class UniformLabelSmoother(base_layer.BaseLayer):
"""Smooths labels given as class ids and confidence.
Implements the smoothing from https://arxiv.org/abs/1512.00567. Correct class
label confidence is dropped by eps and all the other classes are increased
by eps/num_classes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_classes', 0, 'Number of classes')
p.Define('uncertainty', 0.1, 'Uncertainty of correct label, eps.')
p.Define(
'uncertainty_larger', 0.1,
'Apply a larger uncertainty to specific tokens, as specified '
'by token_from_target_ids.')
p.Define('token_id_uncertainty_larger', None, 'Id of token from target_ids '
'to apply uncertainty_larger to.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.num_classes > 0
assert 0.0 <= p.uncertainty < 1.0
assert p.token_id_uncertainty_larger is None or (
p.token_id_uncertainty_larger >= 0)
assert p.name
def FProp(self, theta, target_paddings, target_labels, target_ids):
"""Convert target_labels to 1hot and smooth uniformly.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
target_paddings: float32 matrix [bs, seq_len]
target_labels: int32 matrix [bs, seq_len]. This stores the target label
output at each decoder step as generated by the speech input generator
input_batch.tgt.labels
target_ids: int32 matrix [bs, seq_len]. This stores the target_id that is
fed to the decoder, as generated by the speech input generator
input_batch.tgt.ids
Returns:
A tensor of float32 [bs, seq_len, num_classes] denoting a smoothed
distribution over num_classes.
"""
del target_paddings # Unused by FProp.
p = self.params
low_confidence = p.uncertainty / tf.cast(p.num_classes - 1, tf.float32)
high_confidence = (1.0 - p.uncertainty)
smooth_targets = tf.one_hot(
tf.cast(target_labels, tf.int32),
depth=p.num_classes,
on_value=high_confidence,
off_value=low_confidence)
if p.token_id_uncertainty_larger is not None:
assert target_ids is not None
low_confidence_larger = p.uncertainty_larger / tf.cast(
p.num_classes - 1, tf.float32)
high_confidence_larger = (1.0 - p.uncertainty_larger)
smooth_targets_larger = tf.one_hot(
tf.cast(target_labels, tf.int32),
depth=p.num_classes,
on_value=high_confidence_larger,
off_value=low_confidence_larger)
should_smooth_larger = tf.tile(
tf.expand_dims(
tf.equal(target_ids, p.token_id_uncertainty_larger), -1),
multiples=[1, 1, p.num_classes])
smooth_targets = tf.where(should_smooth_larger, smooth_targets_larger,
smooth_targets)
return smooth_targets
class HighwaySkipLayer(base_layer.BaseLayer):
"""A highway skip layer.
This class represents a highway skip layer, which takes multiple
inputs (from different layers of the network) and gates them.
This returns C(x)x + T(x)h, initially biasing C to be open.
For some discussion about initialization please see:
Section 2.2 in [Srivastava, 2015]: https://arxiv.org/pdf/1505.00387v2.pdf
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the input to the network.')
p.Define(
'batch_norm', False,
'Whether or not to apply BN to the highway skip layer output. '
'Note this is only a single bool.')
p.Define('carry_bias_init', 1.0, 'carry gates bias initialization')
p.Define('couple_carry_transform_gates', False,
'Boolean on whether to couple the transform and carry gates.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
carry_gate_params = ProjectionLayer.Params().Set(
batch_norm=p.batch_norm,
has_bias=True,
activation='SIGMOID',
input_dim=p.input_dim,
output_dim=p.input_dim,
bias_init=p.carry_bias_init,
name='%s_carry_gate' % p.name)
self.CreateChild('carry_gate', carry_gate_params)
if not p.couple_carry_transform_gates:
transform_gate_params = ProjectionLayer.Params().Set(
batch_norm=p.batch_norm,
has_bias=True,
activation='SIGMOID',
input_dim=p.input_dim,
output_dim=p.input_dim,
bias_init=-p.carry_bias_init,
name='%s_transform_gate' % p.name)
self.CreateChild('transform_gate', transform_gate_params)
def FProp(self, theta, x, transformed_x, paddings=None):
"""Fprop for Highway Skip layer.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
x: feature at the lower layer.
transformed_x: transformation of x at a higher layer.
paddings: padding applied to the features.
Returns:
layer_out - activations after forward propagation.
"""
p = self.params
assert self.carry_gate is not None
carry = self.carry_gate.FProp(theta.carry_gate, x, paddings)
if p.couple_carry_transform_gates:
transform = 1 - carry
else:
assert self.transform_gate is not None
transform = self.transform_gate.FProp(theta.transform_gate, x, paddings)
layer_out = x * carry + transformed_x * transform
return layer_out
class GatingLayer(base_layer.BaseLayer):
"""A gating layer.
This class represents a gating layer, which takes 2 inputs of the same shape
and gates them.
The output is: carry * x + (1 - carry) * y where, carry is given by
sigmoid(x @ w_1 + y @ w_2 + bias).
This is different from the HighwaySkipLayer above in that carry is also a
function of y (named transformed_x in HighwaySkipLayer).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the input to the network.')
p.Define('has_bias', False, 'Whether carry has a bias term.')
p.Define('carry_bias_init', 0.0, 'carry gates bias initialization')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
carry_gate_params = ProjectionLayer.Params().Set(
batch_norm=False,
has_bias=p.has_bias,
activation='SIGMOID',
input_dim=p.input_dim * 2,
output_dim=p.input_dim,
bias_init=p.carry_bias_init,
name='carry')
self.CreateChild('carry_gate', carry_gate_params)
def FProp(self, theta, x, y, paddings=None):
"""Fprop for the gating layer.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
x: An input feature, the last dimension must match p.input_dim.
y: Another input feature. Must have the same shape as 'x'.
paddings: padding applied to the features. When x and y have shape [...,
input_dim], 'paddings', when specified, must have shaped [..., 1], where
all but the last dimension match.
Returns:
layer_out - activations after forward propagation. Same shape as x and y.
"""
y = py_utils.with_dependencies(
[py_utils.assert_shape_match(tf.shape(x), tf.shape(y))], y)
carry = self.carry_gate.FProp(theta.carry_gate, tf.concat([x, y], axis=-1),
paddings)
layer_out = x * carry + y * (1 - carry)
return layer_out
class GradNormTracker(base_layer.BaseLayer):
"""A helper class to keep track of gradient norm stats."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('decay', 0.995,
'Decay in updating the moving avgs in grad norm stats')
p.Define('grad_norm_lower_cap', 1e-2, 'The minimal gradient norm value.')
p.Define(
'clip_threshold', 4.0,
'Distance threshold at which gradients are clipped to 0.0.'
' Distance is measured in the number of standard deviations a'
' given gradient norm is from the mean gradient norm. The'
' default value of 4.0 means we are throwing away roughly'
' 0.15% of steps.')
p.Define(
'grad_norm_clip_cap_min', 0.0,
'We stop clipping if grad norm is already smaller than this'
' value.')
p.Define(
'dry_run', False, 'If True, always return 1.0 in FProp() to signify '
'no grad clipping suggested, in which case the class only collects '
'stats and summaries.')
return p
def __init__(self, params):
super().__init__(params)
self._decay = params.decay
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
pc = py_utils.WeightParams(
shape=[],
init=py_utils.WeightInit.Constant(0.0),
dtype=tf.float32,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('log_mean', pc, trainable=False)
self.CreateVariable('log_mean_squared', pc, trainable=False)
self.CreateVariable('total_weight', pc, trainable=False)
self.CreateVariable('total_rejections', pc, trainable=False)
def FProp(self, theta, grad_norm, has_nan=None):
"""Update gradient norm moving avgs, and returns whether or not ...
to clip gradients to 0.0. If the current batch has NaN grads, does not
update the moving avgs and forces to clip the gradients to 0.0.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
grad_norm: A float scalar tensor.
has_nan: A boolean scalar tensor to indicate if the current batch has nan.
Returns:
A scalar float tensor with value of either 1.0 or 0.0. The value of 0.0
means the gradient norm is excessively large or contains NaN, and the step
should be aborted completely.
"""
p = self.params
with tf.name_scope(p.name):
grad_norm = tf.maximum(grad_norm, p.grad_norm_lower_cap)
# Exponentially decayed moving avg of log(grad_norm) mean.
mean = theta.log_mean / tf.maximum(theta.total_weight, 1e-6)
# Exponentially decayed moving avg of log(grad_norm) variance.
var = ((theta.log_mean_squared / tf.maximum(theta.total_weight, 1e-6)) -
mean * mean)
std = tf.sqrt(tf.maximum(var, 1e-6))
summary_utils.scalar('log_grad_norm_mean', mean)
summary_utils.scalar('log_grad_norm_std', std)
summary_utils.scalar('clip_ratio_threshold',
tf.exp(std * p.clip_threshold))
summary_utils.scalar('clip_threshold',
tf.exp(mean + std * p.clip_threshold) - 1.0)
summary_utils.scalar('total_rejections', theta.total_rejections)
log_grad_norm = tf.math.log(grad_norm + 1.0)
log_grad_norm_cap = tf.cast(mean + std * p.clip_threshold, tf.float32)
log_grad_norm_cap_min = tf.math.log(p.grad_norm_clip_cap_min + 1.0)
log_grad_norm_cap = tf.maximum(log_grad_norm_cap, log_grad_norm_cap_min)
def UpdateExpMovingAvg(ref_var, val, ignore):
if ignore is not None:
delta = tf.where(ignore, tf.zeros([]),
(1.0 - p.decay) * (val - ref_var))
else:
delta = (1.0 - p.decay) * (val - ref_var)
return tf.assign_add(ref_var, delta)
# We trigger when total_weight is at least half of max weight or the
# current batch contains NaNs.
trigger = tf.math.logical_and(log_grad_norm > log_grad_norm_cap,
theta.total_weight > 0.75)
if has_nan is not None:
trigger = tf.math.logical_or(trigger, has_nan)
log_grad_norm_capped = tf.minimum(log_grad_norm, log_grad_norm_cap)
update_moving_avg = tf.group(
UpdateExpMovingAvg(self.vars.log_mean, log_grad_norm_capped, has_nan),
UpdateExpMovingAvg(self.vars.log_mean_squared,
log_grad_norm_capped * log_grad_norm_capped,
has_nan),
UpdateExpMovingAvg(self.vars.total_weight, tf.constant(1.0), has_nan),
tf.assign_add(self.vars.total_rejections,
tf.cast(trigger, tf.float32)))
return py_utils.with_dependencies([update_moving_avg],
1.0 if p.dry_run else 1.0 -
tf.cast(trigger, tf.float32))
class WeightedSumLayer(base_layer.BaseLayer):
"""Returns the weighted sum of a list of input tensors."""
@classmethod
def Params(cls):
"""Params for this MergerLayer class."""
p = super().Params()
p.Define('num_sources', 0, 'Number of input sources to combine.')
p.Define('weighted_merger_dropout_prob', 0.1,
'Applies dropout to the weights.')
p.Define(
'weighted_merger_softmax', True, 'If set, applies a softmax '
'layer on top of the weights for normalization.')
p.Define('global_weight_scale', 1.0, 'A global scale put on weights.')
p.Define('minimal_prob', 0.0, 'The minimal weight for each component.')
p.Define('add_weight_summaries', False, 'If set, creates summaries for the '
'sum weights.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if not p.name:
raise ValueError('Layer must have a specified name!')
assert p.num_sources > 0, ('Must specify num_sources > 0.')
if p.weighted_merger_dropout_prob > 0.0:
dropout_tpl = DropoutLayer.Params()
dropout_tpl.keep_prob = (1.0 - p.weighted_merger_dropout_prob)
self.CreateChild('weighted_merger_dropout', dropout_tpl)
else:
self.CreateChild('weighted_merger_dropout', IdentityLayer.Params())
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
params_init = py_utils.WeightInit.Constant(0.0)
# Weights to be learned.
pw = py_utils.WeightParams(
shape=[p.num_sources],
init=params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('sum_weight', pw)
def FProp(self, theta, inputs):
"""Combines the list of input tensors into a single tensor.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: A list of tensors of shape [time, batch, hidden_dim]
Returns:
A tensor of the same shape with input tensors.
"""
p = self.params
n_sources = len(inputs)
if n_sources == 1:
return inputs[0]
# Weighted sum of all sources, all dims must match.
# For weighted_sum, assume input is a list of rank 3 tensors
inputs = tf.stack(inputs)
inputs = py_utils.HasRank(inputs, 4)
# The constant factor is just meant to support the non-normalized scenario.
# If softmax is applied, this factor will cancel out.
w = theta.sum_weight * p.global_weight_scale + (1 / p.num_sources)
w = self.weighted_merger_dropout.FProp(theta.weighted_merger_dropout, w)
if p.weighted_merger_softmax:
residual_weights = p.minimal_prob * p.num_sources
assert residual_weights >= 0.0
assert residual_weights < 1.0
w = tf.nn.softmax(w, axis=0) * (1.0 - residual_weights) + p.minimal_prob
if p.add_weight_summaries:
for i in range(p.num_sources):
summary_utils.scalar(p.name + f'weight_{i}', w[i])
w = tf.reshape(w, [p.num_sources, 1, 1, 1])
output = tf.reduce_sum(inputs * w, axis=0)
return output
class GatedAverageLayer(base_layer.BaseLayer):
"""Gated combination of n input vectors.
Given n inputs, x_1 ... x_n. First learns a gate g in a single layer.
Returns g_1 * x_1 + ... g_n * x_n.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_nodes', 0, 'Number of nodes in each input vector.')
p.Define('num_inputs', 0, 'Number of input vectors to combine.')
return p
def __init__(self, params):
"""Initializes GatedAverageLayer."""
super().__init__(params)
p = self.params
assert p.num_nodes > 0, 'Number of dimensions should be greater than 0.'
assert p.num_inputs > 0, 'Number of inputs should be greater than 0.'
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
in_size = p.num_inputs * p.num_nodes
# Weight matrix for scalar gates
gm_pc = py_utils.WeightParams(
shape=[in_size, p.num_inputs], init=p.params_init, dtype=p.dtype)
self.CreateVariable('gm', gm_pc)
def FProp(self, theta, inputs):
"""Gates, then merges a list of n input vectors.
Args:
theta: gm (gate matrix)
inputs: List of inputs, each of shape [..., num_nodes]
Returns:
a gated output vector [..., num_nodes]
"""
p = self.params
assert len(inputs) == p.num_inputs, 'Number of inputs should match params.'
for i, inp in enumerate(inputs):
inputs[i] = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(inp)[-1]], [p.num_nodes]),
py_utils.assert_shape_match(tf.shape(inp), tf.shape(inputs[0])),
], inp)
input_shape = tf.shape(inputs[0])
reshaped_inputs = [tf.reshape(inp, [-1, p.num_nodes]) for inp in inputs]
concat_inputs = tf.concat(reshaped_inputs, axis=1)
xmg = tf.nn.softmax(py_utils.Matmul(concat_inputs, theta.gm))
xmg = tf.expand_dims(xmg, 2)
inputs = tf.reshape(concat_inputs, [-1, p.num_inputs, p.num_nodes])
gated_sum = tf.reduce_sum(xmg * inputs, axis=1)
return tf.reshape(gated_sum, input_shape)
class LHUCLayer(base_layer.BaseLayer):
"""`Learning Hidden Unit Contribution (LHUC)` layer.
This paper proposes to use LHUC layer for NMT adaptation:
http://aclweb.org/anthology/N18-2080
During base model training, LHUC layer is fixed to 1.0 (no-op in
multiplication). During adaptation, only LHUC layer is trained, and all other
parameters in the model are frozen.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the input and output.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.input_dim > 0
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
pc = py_utils.WeightParams(
shape=[p.input_dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype)
self.CreateVariable('w', pc)
def FProp(self, theta, inp):
"""Add learnt gate for adaptation."""
out = 2.0 * tf.sigmoid(theta.w) * inp
return out
class ResidualAdapterLayer(base_layer.BaseLayer):
"""Residual Adapter layer for NLP tasks.
This paper proposes using residual adapters for fine-tuning new tasks on BERT.
https://arxiv.org/pdf/1902.00751.pdf
During adaptation, residual adapter layers can be added to a pre-trained
model and trained, while all other parameters are frozen.
In terms of operations, the layer is identical to a vanilla Transformer
feedforward layer. Separate implementation is meant to distinguish function.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the input to the adapter.')
p.Define('bottleneck_dim', 0, 'Dimension of the feedforward inner layer.')
p.Define('ln_tpl', LayerNorm.Params(), 'Layer norm default params.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
bottleneck_params = FeedForwardNet.Params().Set(
name='bottleneck',
activation=['RELU', 'NONE'],
input_dim=p.input_dim,
hidden_layer_dims=[p.bottleneck_dim, p.input_dim])
self.CreateChild('bottleneck', bottleneck_params)
params = p.ln_tpl.Copy()
params.name = 'adapter_ln'
params.input_dim = p.input_dim
self.CreateChild('layer_norm', params)
def FProp(self, theta, x, paddings=None):
"""Fprop for Residual Adapter.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
x: [..., input_dim].
paddings: padding applied to the features.
Returns:
layer_out - [..., input_dim].
"""
normalized_x = self.layer_norm.FProp(theta.layer_norm, x)
bottleneck_x = self.bottleneck.FProp(theta.bottleneck, normalized_x,
paddings)
return x + bottleneck_x
def Conv2DFlops(inputs, filter_shape, stride, padding):
"""Returns number of float operations (mult/adds) for a Conv2D op.
Args:
inputs: the input shape. Must have four elements.
filter_shape: the convolution filter shape. Must have four elements.
stride: the strides along height and width, respectively.
padding: 'SAME' or 'VALID'.
Returns:
Number of multiplications and additions.
"""
b, h, w = inputs[0], inputs[1], inputs[2]
fh, fw, ic, oc = filter_shape
sh, sw = stride
def _CeilDiv(x, y):
return tf.math.floordiv(x + y - 1, y)
if padding == 'SAME':
oh = _CeilDiv(h, sh)
ow = _CeilDiv(w, sw)
else:
assert padding == 'VALID'
oh = _CeilDiv(h - fh + 1, sh)
ow = _CeilDiv(w - fw + 1, sw)
# Mul/add counts as 2 flops.
return (tf.cast(b * oh * ow, tf.int64) *
tf.cast(fh * fw * ic * oc, tf.int64) * 2)
class Conv2DLayerNoPadding(base_layer.BaseLayer):
"""2-D Convolution layer w/o padding.
TODO(laurenzo): Dedup in favor of SeparableConv2DLayer where possible.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'filter_shape', (0, 0, 0, 0),
'Filter shape. Must be a sequence of length 4. Elements are in'
' the order of height (time), width (frequency), in_channel,'
' out_channel. ')
p.Define(
'filter_stride', (0, 0),
'Filter stride to use. Must be a pair of ints. The first int'
' specifies the stride on the height dimension. The second int'
' specifies the stride on the width dimension.')
p.Define(
'dilations', (1, 1), ' An optional list of ints. Defaults to [1, 1]. '
'1-D tensor of length 2. The dilation factor for each dimension '
'of input. If set to k > 1, there will be k-1 skipped cells '
'between each filter element on that dimension.')
p.Define('padding', 'SAME', 'SAME|VALID')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.padding in ['SAME', 'VALID']
assert len(p.filter_shape) == 4
assert len(p.filter_stride) == 2
assert len(p.dilations) == 2
assert all(x > 0 for x in p.filter_stride)
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
w_pc = py_utils.WeightParams(
shape=p.filter_shape,
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('w', w_pc)
def FProp(self, theta, x):
"""Apply convolution to inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
x: The inputs tensor. It is expected to be of shape [batch, height, width,
channel].
Returns:
Convolution output.
"""
p = self.params
with tf.name_scope(p.name):
computation_cost.Add(
self, 'flops',
Conv2DFlops(
tf.shape(x),
filter_shape=symbolic.EvalExpr(symbolic.TENSOR_VALUES,
p.filter_shape),
stride=p.filter_stride,
padding=p.padding))
return tf.nn.conv2d(
input=x,
filters=theta.w,
strides=[1, p.filter_stride[0], p.filter_stride[1], 1],
padding=p.padding,
dilations=[1, p.dilations[0], p.dilations[1], 1],
data_format='NHWC')
@classmethod
def FPropMeta(cls, p, inputs):
py_utils.CheckShapes((inputs,))
b, h, w, c = inputs
fh, fw, ic, oc = p.filter_shape
assert ic == c
sh, sw = p.filter_stride
if p.padding == 'SAME':
oh = sympy.ceiling(h / sh)
ow = sympy.ceiling(w / sw)
else:
oh = sympy.ceiling((h - fh + 1) / sh)
ow = sympy.ceiling((w - fw + 1) / sw)
flops = b * oh * ow * fh * fw * ic * oc * 2 # mul/add counts as 2 flop.
outputs = tshape.Shape([b, oh, ow, oc])
return py_utils.NestedMap(flops=flops, out_shapes=(outputs,))
class FetchLayer(base_layer.BaseLayer):
"""A layer facilitating fetching activations and their gradients."""
def __init__(self, params):
super().__init__(params)
assert self.params.name
self._activations = None
self._gradients = None
@classmethod
def FPropMeta(cls, params, *args):
return py_utils.NestedMap(flops=0, out_shapes=args)
def _ReturnSingleValueOrList(self, lst):
assert lst is not None
assert isinstance(lst, list)
return lst if len(lst) > 1 else lst[0]
@property
def activation(self):
return self._ReturnSingleValueOrList(self._activations)
@property
def gradient(self):
return self._ReturnSingleValueOrList(self._gradients)
def FProp(self, theta, *args):
del theta
num = len(args)
self._activations = [None] * num
self._gradients = [None] * num
for i, v in enumerate(args):
def FetchBak(xs, ys, dys, index=i):
del xs, ys
self._gradients[index] = dys
return dys
def FetchFwd(x):
return x
self._activations[i] = py_utils.CallDefun(FetchFwd, v, bak=FetchBak)
return tuple(self._activations) if num > 1 else self._activations[0]
class GluLayer(base_layer.BaseLayer):
"""Gated Linear Unit.
See https://arxiv.org/abs/1612.08083 for more details.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Dimension of the layer input.')
p.Define('output_dim', 0, 'Dimension of the layer output.')
p.Define('ln_tpl', LayerNorm.Params(), 'Layer norm default params.')
p.Define('dense_tpl', FCLayer.Params().Set(), 'Fully connected layer.')
p.Define(
'activation', 'NONE',
'Non-linearity applied after the dense layer in the value branch.')
p.Define('gate_activation', 'SIGMOID',
'Non-linearity applied for the gating.')
p.Define('dropout_tpl', DropoutLayer.Params(), 'Dropout applied to output.')
p.Define('apply_residual', True, 'Whether or not to add inputs to outputs.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.input_dim
if p.output_dim:
output_dim = p.output_dim
else:
output_dim = p.input_dim
if p.apply_residual:
assert output_dim == p.input_dim
# Initialize value feed-forward layer.
params = p.dense_tpl.Copy()
params.name = 'value_layer'
params.input_dim = p.input_dim
params.activation = p.activation
params.output_dim = output_dim
self.CreateChild('value_layer', params)
# Initialize gate feed-forward layer.
params = p.dense_tpl.Copy()
params.name = 'gate_layer'
params.input_dim = p.input_dim
params.activation = p.gate_activation
params.output_dim = output_dim
self.CreateChild('gate_layer', params)
# Initialize layer norm.
if p.ln_tpl:
params = p.ln_tpl.Copy()
params.name = 'layer_norm'
params.input_dim = p.input_dim
self.CreateChild('layer_norm', params)
# Initialize dropout.
dropout_tpl = p.dropout_tpl.Copy()
self.CreateChild('dropout', dropout_tpl)
def FProp(self, theta, inputs, paddings):
if 'layer_norm' in self.children:
inputs_normalized = self.layer_norm.FProp(theta.layer_norm, inputs)
else:
inputs_normalized = inputs
if (paddings.shape.ndims is None or
paddings.shape.ndims != inputs_normalized.shape.ndims):
paddings = tf.expand_dims(paddings, -1)
values = self.value_layer.FProp(theta.value_layer, inputs_normalized,
paddings)
gates = self.gate_layer.FProp(theta.gate_layer, inputs_normalized, paddings)
glu_output = values * gates
glu_output = self.dropout.FProp(theta.dropout, glu_output)
if self.params.apply_residual:
return inputs + glu_output
return glu_output
class MultitaskAdapterBaseLayer(base_layer.BaseLayer):
"""Residual adapter layer for multilingual models.
Residual adapters can be used to fine-tune a single model to multiple
domains, tasks, or languages: https://arxiv.org/pdf/1902.00751.pdf
Each adapter consists of a "down" projection to a smaller dimension followed
by an "up" projection, the result of which is added back to the input
activation. The projection weights and biases are task-specific.
Whereas ResidualAdapterLayer learns and applies the parameters for a single
task, this layer learns and applies the parameters for multiple tasks so that
we have a single model serving the different tasks. The parameters can be
trained for all tasks at the same time, or in one-off per-task training jobs.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_tasks', 0, 'Number of tasks.')
p.Define('input_dim', 0, 'Dimension of the input to the adapter.')
p.Define('bottleneck_dim', 0, 'Dimension of the bottleneck.')
p.Define('layer_norm_tpl', LayerNorm.Params(), 'Layer norm default params.')
p.Define(
'data_format', 'TBC', 'String(enum) specifying the input and output '
'data format for this layer. Supported formats: '
'"TBC": [time, batch, input_dim] and "BTC": [batch, time, input_dim].')
p.Define('clip_task_ids', False,
'If True, clips the given task ids to [0, p.num_tasks - 1].')
return p
class MultitaskAdapterLayer(MultitaskAdapterBaseLayer):
"""MultitaskAdapterBaseLayer implemented with EmbeddingLayers."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'projection_params_init', None,
'Weight initialization for up and down projections. Only used for '
'weights, not biases. If None, uses default weight init, which is '
'typically Xavier with scale of 1.0.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
# Data format is either 'TBC' (time-major) or 'BTC' (batch-major).
assert p.data_format in ('TBC', 'BTC')
base_emb_params = EmbeddingLayer.Params().Set(
vocab_size=p.num_tasks, max_num_shards=1)
down_proj_w_params = base_emb_params.Copy()
down_proj_w_params.Set(
embedding_dim=p.input_dim * p.bottleneck_dim, name='down_proj_w')
if p.projection_params_init:
down_proj_w_params.params_init = p.projection_params_init
down_proj_b_params = base_emb_params.Copy()
down_proj_b_params.Set(embedding_dim=p.bottleneck_dim, name='down_proj_b')
up_proj_w_params = base_emb_params.Copy()
up_proj_w_params.Set(
embedding_dim=p.bottleneck_dim * p.input_dim, name='up_proj_w')
if p.projection_params_init:
up_proj_w_params.params_init = p.projection_params_init
up_proj_b_params = base_emb_params.Copy()
up_proj_b_params.Set(embedding_dim=p.input_dim, name='up_proj_b')
self.CreateChild('down_proj_w', down_proj_w_params)
self.CreateChild('down_proj_b', down_proj_b_params)
self.CreateChild('up_proj_w', up_proj_w_params)
self.CreateChild('up_proj_b', up_proj_b_params)
params = p.layer_norm_tpl.Copy()
params.name = 'adapter_ln'
params.input_dim = p.input_dim
self.CreateChild('layer_norm', params)
def FProp(self, theta, inputs, tasks):
"""Fprop for multitask adapter.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: A tensor containing the activations from the previous layer. For
'TBC', the shape is [time, batch, input_dim] and for 'BTC', it's [batch,
time, input_dim].
tasks: An int32 tensor containing the task ID for each input. If 'tasks'
is of rank 2, we assume it to be of shape [time, batch] if 'BTC' and
[batch, time] if 'TBC', indicating a different task for each timestep.
In this case we look up adapter params for each timestep. If 'tasks' is
of rank 1, we assume it to be of shape [batch], indicating a single task
for all timesteps of a sequence. This latter setup uses substantially
less memory and is generally preferred.
Returns:
A tensor containing the adapted activations with shape
[time, batch, input_dim] for 'TBC' and [batch, time, input_dim] for 'BTC'.
"""
p = self.params
inputs_shape = tf.shape(inputs)
per_timestep_task = (tasks.shape.ndims == 2)
batch_index = 1 if p.data_format == 'TBC' else 0
time_index = 1 - batch_index
inputs = py_utils.with_dependencies(
[
# Checks that inputs has 3 dimensions, last is hidden dim.
py_utils.assert_shape_match(inputs_shape, [-1, -1, p.input_dim]),
# Checks that inputs and tasks have same batch dimension.
py_utils.assert_shape_match([inputs_shape[batch_index]], [
tf.shape(tasks)[batch_index]
if per_timestep_task else tf.shape(tasks)[0]
])
],
inputs)
if p.clip_task_ids:
tasks = tf.clip_by_value(tasks, 0, p.num_tasks - 1)
# To support different task for each timetstep, flatten inputs and
# tasks. Below, 'batch' now refers to flattened batch size, time * batch.
if per_timestep_task:
tasks = py_utils.with_dependencies(
[
# Checks that inputs and tasks have same time dimension.
py_utils.assert_shape_match(inputs_shape[:1],
tf.shape(tasks)[:1])
],
tasks)
tasks = tf.reshape(tasks, [-1])
if p.data_format == 'TBC':
inputs = tf.reshape(inputs, [1, -1, p.input_dim])
else:
inputs = tf.reshape(inputs, [-1, 1, p.input_dim])
# Lookup all weights and biases
# [batch] -> [batch, hidden * k] -> [batch, hidden, k]
down_weights = tf.reshape(
self.down_proj_w.EmbLookup(theta.down_proj_w, tasks),
[-1, p.input_dim, p.bottleneck_dim])
# [batch] -> [batch, k] -> [1, batch, k] if 'TBC' else [batch, 1, k]
down_biases = tf.expand_dims(
self.down_proj_b.EmbLookup(theta.down_proj_b, tasks), time_index)
# [batch] -> [batch, k * hidden] -> [batch, k, hidden]
up_weights = tf.reshape(
self.up_proj_w.EmbLookup(theta.up_proj_w, tasks),
[-1, p.bottleneck_dim, p.input_dim])
# [batch] -> [batch, h] -> [1, batch, h] if 'TBC' else [batch, 1, h]
up_biases = tf.expand_dims(
self.up_proj_b.EmbLookup(theta.up_proj_b, tasks), time_index)
# Layer norm -> down-projection -> non-linearity -> up-projection
norm_inputs = self.layer_norm.FProp(theta.layer_norm, inputs)
# If per_timestep_task, t = 1, b = time * batch.
# Otherwise, t = time, b = batch.
if p.data_format == 'TBC':
down_projected = tf.einsum('tbh,bhk->tbk', norm_inputs, down_weights)
else:
down_projected = tf.einsum('bth,bhk->btk', norm_inputs, down_weights)
down_projected += down_biases
down_projected = tf.nn.relu(down_projected)
if p.data_format == 'TBC':
up_projected = tf.einsum('tbk,bkh->tbh', down_projected, up_weights)
else:
up_projected = tf.einsum('btk,bkh->bth', down_projected, up_weights)
up_projected += up_biases
output = inputs + up_projected
# Unflatten output:
# for 'TBC': [1, time * batch, hidden] -> [time, batch, hidden]
# for 'BTC': [1, batch * time, hidden] -> [batch, time, hidden]
if per_timestep_task:
output = tf.reshape(output, inputs_shape)
return output
class MultitaskAdapterEinsumLayer(MultitaskAdapterBaseLayer):
"""MultitaskAdapterBaseLayer implemented with Einsum.
The embedding-based solution sometimes triggers b/175464137.
"""
@classmethod
def Params(cls):
p = super().Params()
p.data_format = 'BTC'
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.data_format == 'BTC'
params = p.layer_norm_tpl.Copy()
params.input_dim = p.input_dim
self.CreateChild('layer_norm', params)
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
down_w_pc = py_utils.WeightParams(
shape=[p.num_tasks, p.input_dim, p.bottleneck_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('down_w', down_w_pc)
down_b_pc = py_utils.WeightParams(
shape=[p.num_tasks, p.bottleneck_dim],
init=py_utils.WeightInit.Constant(0.),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('down_b', down_b_pc)
up_w_pc = py_utils.WeightParams(
shape=[p.num_tasks, p.bottleneck_dim, p.input_dim],
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('up_w', up_w_pc)
up_b_pc = py_utils.WeightParams(
shape=[p.num_tasks, p.input_dim],
init=py_utils.WeightInit.Constant(0.),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('up_b', up_b_pc)
def FProp(self, theta, inputs, tasks):
"""Fprop for multitask adapter.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: A tensor containing the activations from the previous layer.
[batch, time, input_dim].
tasks: An int32 tensor containing the task ID for each input. [batch].
Returns:
A tensor containing the adapted activations with the same shape as inputs.
"""
p = self.params
inputs = self._CastToFPropDtype(inputs)
assert tasks.shape.ndims == 1
if p.clip_task_ids:
tasks = tf.clip_by_value(tasks, 0, p.num_tasks - 1)
# [batch, num_tasks].
tasks_onehot = tf.one_hot(tasks, p.num_tasks, axis=-1, dtype=inputs.dtype)
# Einsum axis names:
# b - batch
# t - time
# k - task
# i - input_dim
# n - bottleneck_dim
# [batch, input_dim, bottleneck_dim].
with tf.name_scope('down_w_einsum'):
down_w = tf.einsum('bk,kin->bin', tasks_onehot, theta.down_w)
# [batch, 1, bottleneck_dim].
with tf.name_scope('down_b_einsum'):
down_b = tf.einsum('bk,kn->bn', tasks_onehot, theta.down_b)[:, None, :]
# [batch, bottleneck_dim, input_dim].
with tf.name_scope('up_w_einsum'):
up_w = tf.einsum('bk,kni->bni', tasks_onehot, theta.up_w)
# [batch, 1, input_dim].
with tf.name_scope('up_b_einsum'):
up_b = tf.einsum('bk,ki->bi', tasks_onehot, theta.up_b)[:, None, :]
# Layer norm -> down-projection -> non-linearity -> up-projection
with tf.name_scope('layer_norm_feed'):
norm_inputs = self.layer_norm.FProp(theta.layer_norm, inputs)
# [batch, time, bottleneck_dim].
down_projected = tf.einsum('bti,bin->btn', norm_inputs, down_w) + down_b
# ReLU.
down_projected = tf.nn.relu(down_projected)
# [batch, time, input_dim].
up_projected = tf.einsum('btn,bni->bti', down_projected, up_w) + up_b
# Residual.
return inputs + up_projected
class CCTGatingNetwork(quant_utils.QuantizableLayer):
"""A gating network that is continous for training and discrete for eval.
Based on the gating network from https://arxiv.org/abs/2002.07106.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Depth of the input to the network.')
p.Define('hidden_layer_dim', 0, 'Depth of the hidden layer outputs.')
p.Define('num_outputs', 0, 'Number of scalar gate outputs.')
p.Define('noise_std', 1.0, 'Standard deviation for gating noise.')
p.Define('noise_warmup_steps', 1.0, 'Steps to full noise.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
params = schedule.PolynomialSchedule.Params()
params.start = (0, 0.0)
params.limit = (p.noise_warmup_steps, p.noise_std)
self.CreateChild('noise_std', params)
params = FeedForwardNet.Params()
params.name = 'gating_layer'
params.input_dim = p.input_dim
params.activation = ['RELU', 'NONE']
params.hidden_layer_dims = [p.hidden_layer_dim, p.num_outputs]
self.CreateChild('gatingfflayer', params)
def FProp(self, theta, inputs, paddings=None):
p = self.params
p_c = self.gatingfflayer.FProp(theta.gatingfflayer, inputs, paddings)
if self.do_eval:
ones = tf.ones(tf.shape(p_c), py_utils.FPropDtype(p))
zeros = tf.zeros(tf.shape(p_c), py_utils.FPropDtype(p))
p_c = tf.where(
tf.greater_equal(p_c, tf.constant(0.0, dtype=py_utils.FPropDtype(p))),
ones, zeros)
else:
noise_std = self.noise_std.Value()
noise = py_utils.DeterministicVN(p, tf.shape(p_c), std=noise_std)
p_c = tf.nn.sigmoid(p_c + noise)
return p_c
@classmethod
def FPropMeta(cls, p, inputs, paddings=None):
py_utils.CheckShapes((inputs,))
assert inputs[-1] == p.input_dim
flops = 0
in_dim = inputs[-1]
other_dims = inputs.num_elements() / in_dim
flops = 5 * other_dims * in_dim * p.hidden_layer_dim
flops = 5 * other_dims * p.num_outputs * p.hidden_layer_dim
out_shape = tshape.Shape(inputs[:-1] + [symbolic.ToStatic(p.num_outputs)])
return py_utils.NestedMap(flops=flops, out_shapes=(out_shape,))
class CondScaleShiftFFNLayer(base_layer.BaseLayer):
"""Feature Modulation layer.
https://distill.pub/2018/feature-wise-transformations/
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_dim', 0, 'Depth of the input.')
p.Define('output_dim', 0, 'Depth of the output.')
p.Define('ffn', FeedForwardNet.Params(), 'Projection layer params')
p.Define('scale_fn', 'NONE',
'The activation function to use for scale output')
p.Define('shift_fn', 'NONE',
'The activation function to use for shift output')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
output_dim = p.output_dim * 2 # 1st split for shift, 2nd split for scale
params_ffn = p.ffn.Copy().Set(
input_dim=p.input_dim, name='{}_ffn'.format(p.name))
params_fcout = FCLayer.Params().Copy().Set(
input_dim=params_ffn.hidden_layer_dims[-1],
output_dim=output_dim,
activation='NONE',
name='{}_fcout'.format(p.name))
self.CreateChild('ffn', params_ffn)
self.CreateChild('fcout', params_fcout)
def FProp(self, theta, inputs, paddings=None):
"""Calculate scale shift and modify input.
Args:
theta: params.
inputs: The input tensor. Shaped [..., input_dim].
paddings: The input padding tensors.
Returns:
Output after calculating shift and scale (2 tensors).
Shaped [..., output_dim].
"""
p = self.params
ffn_output = self.ffn.FProp(theta.ffn, inputs, paddings)
fcout_output = self.fcout.FProp(theta.fcout, ffn_output, paddings)
scale_output, shift_output = tf.split(
fcout_output, num_or_size_splits=2, axis=-1)
def OpWrapper(name, tensor):
"""Wrapper for retrieve tf operations."""
if activations.IsSupported(name):
op = activations.GetFn(name)
else:
if name == 'EXP':
op = tf.exp
elif name == 'NONE':
op = tf.identity
else:
raise ValueError()
return op(tensor)
scale_output = OpWrapper(p.scale_fn, scale_output)
shift_output = OpWrapper(p.shift_fn, shift_output)
return scale_output, shift_output
class StatisticalPoolingLayer(base_layer.BaseLayer):
"""A statistical pooling layer that perform sequence pooling.
Convert a sequence of vectors into their mean and standard deviation vectors.
The layer has no trainable parameters.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('has_stddev', True, 'Include standard deviation.')
p.Define(
'input_data_format', 'BTC',
'String(enum) specifying the output data format of the encoder. '
'Also used for output converters.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.input_data_format in {'BTC', 'TBC'}, 'Expect TBC or BTC inputs.'
def FProp(self, inputs, paddings=None):
# transform input features
p = self.params
if p.input_data_format == 'TBC':
inputs = tf.transpose(inputs, [1, 0, 2])
if paddings is not None:
paddings = tf.transpose(paddings, [1, 0])
# process paddings & compute sequence lengths etc.
(batch, time, dim) = inputs.shape
if paddings is not None:
padding_mask = 1 - paddings
else:
padding_mask = tf.ones([batch, time])
seqlens = tf.reduce_sum(padding_mask, axis=1)
seqlens = tf.repeat(tf.expand_dims(seqlens, axis=-1), repeats=dim, axis=-1)
padding_mask = tf.expand_dims(padding_mask, axis=-1)
# compute mean first
inputs = inputs * padding_mask
mean = tf.math.divide_no_nan(
tf.math.reduce_sum(inputs, axis=1, keepdims=False), seqlens)
# compute stddev if required
if p.has_stddev:
values = tf.repeat(tf.expand_dims(mean, axis=1), repeats=time, axis=1)
values = tf.math.square(inputs - values) * padding_mask
values = tf.math.divide_no_nan(
tf.math.reduce_sum(values, axis=1, keepdims=False), seqlens)
stddev = tf.math.sqrt(values)
return tf.concat([mean, stddev], axis=1)
else:
return mean
class MaskedLmDataAugmenter(base_layer.BaseLayer):
"""Performs data augmentation as according to the BERT paper.
https://arxiv.org/pdf/1810.04805.pdf
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('vocab_size', 0, 'The total vocabulary size')
p.Define(
'mask_prob', 0.12,
'Probability at which a token is replaced by the special '
' <MASK> token.')
p.Define('random_prob', 0.015,
'Probability at which a token is replaced by a random token.')
p.Define('same_prob', 0.015,
'Probability at which a token is replaced by itself.')
p.Define('mask_token_id', -1, 'Id of the special <MASK> token.')
p.Define('random_avoid_ids', [],
'Ids to avoid when replacing with random token.')
p.Define(
'mlm_duration', 0, 'Duration for MLM task. mlm_task_weight '
'decreased linearly to 0 at step mlm_duration. No decay when 0.')
p.Define(
'min_replace_prob_ratio', 0, 'The total_replacement_prob '
'will decrease to total_replacement_prob * min_replace_prob_ratio '
'at step mlm_duration.')
return p
def FProp(self, theta, inputs, paddings=None):
"""Applies data augmentation by randomly mask/replace tokens in inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: An int32 tensor of shape [batch, length].
paddings: A 0/1 tensor of shape [batch, length].
Returns:
A pair <new_inputs, mask>:
new_inputs: An int32 tensor of shape [batch, length]. The new token ids
after data augmentation.
mask: A 0/1 tensor. A "1" indicates the corresponding token at that
position had undergone the data augmentation process.
"""
p = self.params
assert p.vocab_size > 0
assert p.mask_token_id >= 0
assert p.mask_prob + p.random_prob + p.same_prob < 1.0
if p.mask_prob + p.random_prob == 0:
assert p.same_prob > 0
def _UniformSample(sample_p):
return tf.cast(
tf.less(
tf.random.uniform(tf.shape(inputs), 0, 1.0, seed=p.random_seed),
sample_p), py_utils.FPropDtype(p))
if p.mlm_duration > 0:
global_step = tf.minimum(
tf.cast(py_utils.GetGlobalStep(), tf.float32), p.mlm_duration)
replace_prob_ratio = 1.0 - 1.0 * global_step / p.mlm_duration
replace_prob_ratio = tf.maximum(replace_prob_ratio,
p.min_replace_prob_ratio)
else:
replace_prob_ratio = 1.0
total_replacement_prob = p.mask_prob + p.random_prob + p.same_prob
# valid_tokens == 1.0 if the corresponding position is a valid token.
valid_tokens = tf.cast(1.0 - paddings, py_utils.FPropDtype(p))
# replacement == 1.0 if the corresponding token is to be replaced by
# something else (mask, random, self).
replacement_pos = valid_tokens * _UniformSample(
total_replacement_prob * replace_prob_ratio)
# First sample the token positions to be masked out.
remaining_prob = total_replacement_prob
remaining_pos = replacement_pos
mask_prob = p.mask_prob / remaining_prob
# mask_pos == 1.0 if the corresponding token should be masked.
mask_pos = remaining_pos * _UniformSample(mask_prob)
# Next sample the token positions to be replaced by random tokens.
remaining_prob -= p.mask_prob
remaining_pos -= mask_pos
random_prob = p.random_prob / remaining_prob
random_pos = remaining_pos * _UniformSample(random_prob)
# Lastly, token positions to be replaced by self.
self_pos = remaining_pos - random_pos
if p.random_avoid_ids:
categorical_probs = (
tf.ones([1, p.vocab_size]) / (p.vocab_size - len(p.random_avoid_ids)))
for x in p.random_avoid_ids:
# Set categorical_probs[0, x] = 0.
categorical_probs = tf.minimum(
categorical_probs,
tf.one_hot([x], p.vocab_size, on_value=1e-24, off_value=1.0))
random_tokens = tf.reshape(
tf.random.categorical(
tf.math.log(categorical_probs),
tf.size(inputs),
seed=p.random_seed,
dtype=inputs.dtype), tf.shape(inputs))
else:
random_tokens = tf.random.uniform(
tf.shape(inputs),
0,
p.vocab_size,
seed=p.random_seed,
dtype=inputs.dtype)
mask_tokens = tf.fill(tf.shape(inputs), p.mask_token_id)
no_replacement = tf.cast(1.0 - replacement_pos, dtype=inputs.dtype)
replace_with_mask = tf.cast(mask_pos, dtype=inputs.dtype)
replace_with_random = tf.cast(random_pos, dtype=inputs.dtype)
replace_with_self = tf.cast(self_pos, dtype=inputs.dtype)
sum_all = (
no_replacement + replace_with_mask + replace_with_random +
replace_with_self)
augmented = py_utils.with_dependencies(
[py_utils.assert_equal(sum_all, tf.fill(tf.shape(inputs), 1))],
inputs * no_replacement + mask_tokens * replace_with_mask +
random_tokens * replace_with_random + inputs * replace_with_self)
return augmented, replacement_pos
|
#!/usr/bin/env python
#
# Wrapper script for starting the biopet-vcfstats JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'VcfStats-assembly-1.0.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
import React from 'react';
import { graphql } from 'gatsby';
import SEO from '../../components/SEO';
import Layout from '../../layouts/index';
const Team = (props) => {
const teams = props.data.allMarkdownRemark.edges;
return (
<Layout bodyClass="page-teams">
<SEO title="Team" />
<div className="intro">
<div className="container">
<div className="row">
<div className="col-12">
<h1>Meet The Team</h1>
<p>
Our team of qualified accountants and financial consultants can help your business
at any stage of it’s growth.
</p>
</div>
</div>
</div>
</div>
<div className="container pb-6">
<div className="row">
{teams.map(edge => (
<div key={edge.node.frontmatter.path} className="col-12 col-md-6 mb-1">
<div className="team card-two">
<div className="card-header">
<div className="card-header-left">
{edge.node.frontmatter.image && (
<div className="card-image">
<img
className="img-fluid mb-2"
src={edge.node.frontmatter.image}
/>
</div>
)}
</div>
<div className="card-right">
<h2 className="card-title">{edge.node.frontmatter.title}</h2>
<ul className="card-meta">
<li>
<strong>{edge.node.frontmatter.jobtitle}</strong>
</li>
<li>
<a target="_blank" href={edge.node.frontmatter.linkedinurl}>
{edge.node.frontmatter.linkedinurl}
</a>
</li>
<li>
<a href={edge.node.frontmatter.email}>{edge.node.frontmatter.email}</a>
</li>
</ul>
</div>
</div>
<div
className="team-content"
dangerouslySetInnerHTML={{ __html: edge.node.html }}
/>
</div>
</div>
))}
</div>
</div>
</Layout>
);
};
export const query = graphql`
query TeamQuery {
allMarkdownRemark(
filter: { fileAbsolutePath: { regex: "/team/" } }
sort: { fields: [frontmatter___date], order: DESC }
) {
edges {
node {
html
frontmatter {
title
path
image
jobtitle
linkedinurl
email
}
}
}
}
}
`;
export default Team;
|
from ops import *
from utils import *
import time
from tensorflow.contrib.data import prefetch_to_device, shuffle_and_repeat, map_and_batch
import numpy as np
from glob import glob
class StarGAN(object) :
def __init__(self, sess, args):
self.model_name = 'StarGAN'
self.sess = sess
self.checkpoint_dir = args.checkpoint_dir
self.load_step = args.load_step
self.sample_dir = args.sample_dir
self.result_dir = args.result_dir
self.log_dir = args.log_dir
self.dataset_name = args.dataset
self.dataset_path = os.path.join('./dataset', self.dataset_name)
self.augment_flag = args.augment_flag
self.epoch = args.epoch
self.iteration = args.iteration
self.decay_flag = args.decay_flag
self.decay_epoch = args.decay_epoch
self.gan_type = args.gan_type
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.init_lr = args.lr
self.ch = args.ch
self.selected_attrs = args.selected_attrs
self.custom_label = np.expand_dims(args.custom_label, axis=0)
self.c_dim = len(self.selected_attrs)
""" Weight """
self.adv_weight = args.adv_weight
self.rec_weight = args.rec_weight
self.cls_weight = args.cls_weight
self.ld = args.ld
""" Generator """
self.n_res = args.n_res
""" Discriminator """
self.n_dis = args.n_dis
self.n_critic = args.n_critic
self.img_size = args.img_size
self.img_ch = args.img_ch
print()
print("##### Information #####")
print("# gan type : ", self.gan_type)
print("# selected_attrs : ", self.selected_attrs)
print("# dataset : ", self.dataset_name)
print("# batch_size : ", self.batch_size)
print("# epoch : ", self.epoch)
print("# iteration per epoch : ", self.iteration)
print()
print("##### Generator #####")
print("# residual blocks : ", self.n_res)
print()
print("##### Discriminator #####")
print("# discriminator layer : ", self.n_dis)
print("# the number of critic : ", self.n_critic)
##################################################################################
# Generator
##################################################################################
def generator(self, x_init, c, reuse=False, scope="generator"):
channel = self.ch
c = tf.cast(tf.reshape(c, shape=[-1, 1, 1, c.shape[-1]]), tf.float32)
c = tf.tile(c, [1, x_init.shape[1], x_init.shape[2], 1])
x = tf.concat([x_init, c], axis=-1)
with tf.variable_scope(scope, reuse=reuse) :
x = conv(x, channel, kernel=7, stride=1, pad=3, use_bias=False, scope='conv')
x = instance_norm(x, scope='ins_norm')
x = relu(x)
# Down-Sampling
for i in range(2) :
x = conv(x, channel*2, kernel=4, stride=2, pad=1, use_bias=False, scope='conv_'+str(i))
x = instance_norm(x, scope='down_ins_norm_'+str(i))
x = relu(x)
channel = channel * 2
# Bottleneck
for i in range(self.n_res):
x = resblock(x, channel, use_bias=False, scope='resblock_' + str(i))
# Up-Sampling
for i in range(2) :
x = deconv(x, channel//2, kernel=4, stride=2, use_bias=False, scope='deconv_'+str(i))
x = instance_norm(x, scope='up_ins_norm'+str(i))
x = relu(x)
channel = channel // 2
x = conv(x, channels=3, kernel=7, stride=1, pad=3, use_bias=False, scope='G_logit')
x = tanh(x)
return x
##################################################################################
# Discriminator
##################################################################################
def discriminator(self, x_init, reuse=False, scope="discriminator"):
with tf.variable_scope(scope, reuse=reuse) :
channel = self.ch
x = conv(x_init, channel, kernel=4, stride=2, pad=1, use_bias=True, scope='conv_0')
x = lrelu(x, 0.01)
for i in range(1, self.n_dis):
x = conv(x, channel * 2, kernel=4, stride=2, pad=1, use_bias=True, scope='conv_' + str(i))
x = lrelu(x, 0.01)
channel = channel * 2
c_kernel = int(self.img_size / np.power(2, self.n_dis))
logit = conv(x, channels=1, kernel=3, stride=1, pad=1, use_bias=False, scope='D_logit')
c = conv(x, channels=self.c_dim, kernel=c_kernel, stride=1, use_bias=False, scope='D_label')
c = tf.reshape(c, shape=[-1, self.c_dim])
return logit, c
##################################################################################
# Model
##################################################################################
def gradient_panalty(self, real, fake, scope="discriminator"):
if self.gan_type == 'dragan' :
shape = tf.shape(real)
eps = tf.random_uniform(shape=shape, minval=0., maxval=1.)
x_mean, x_var = tf.nn.moments(real, axes=[0, 1, 2, 3])
x_std = tf.sqrt(x_var) # magnitude of noise decides the size of local region
noise = 0.5 * x_std * eps # delta in paper
# Author suggested U[0,1] in original paper, but he admitted it is bug in github
# (https://github.com/kodalinaveen3/DRAGAN). It should be two-sided.
alpha = tf.random_uniform(shape=[shape[0], 1, 1, 1], minval=-1., maxval=1.)
interpolated = tf.clip_by_value(real + alpha * noise, -1., 1.) # x_hat should be in the space of X
else :
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0., maxval=1.)
interpolated = alpha*real + (1. - alpha)*fake
logit, _ = self.discriminator(interpolated, reuse=True, scope=scope)
GP = 0
grad = tf.gradients(logit, interpolated)[0] # gradient of D(interpolated)
grad_norm = tf.norm(flatten(grad), axis=1) # l2 norm
# WGAN - LP
if self.gan_type == 'wgan-lp' :
GP = self.ld * tf.reduce_mean(tf.square(tf.maximum(0.0, grad_norm - 1.)))
elif self.gan_type == 'wgan-gp' or self.gan_type == 'dragan':
GP = self.ld * tf.reduce_mean(tf.square(grad_norm - 1.))
return GP
def build_model(self):
self.lr = tf.placeholder(tf.float32, name='learning_rate')
""" Input Image"""
Image_data_class = ImageData(load_size=self.img_size, channels=self.img_ch, data_path=self.dataset_path, selected_attrs=self.selected_attrs, augment_flag=self.augment_flag)
Image_data_class.preprocess()
train_dataset_num = len(Image_data_class.train_dataset)
test_dataset_num = len(Image_data_class.test_dataset)
train_dataset = tf.data.Dataset.from_tensor_slices((Image_data_class.train_dataset, Image_data_class.train_dataset_label, Image_data_class.train_dataset_fix_label))
test_dataset = tf.data.Dataset.from_tensor_slices((Image_data_class.test_dataset, Image_data_class.test_dataset_label, Image_data_class.test_dataset_fix_label))
gpu_device = '/gpu:0'
train_dataset = train_dataset.\
apply(shuffle_and_repeat(train_dataset_num)).\
apply(map_and_batch(Image_data_class.image_processing, self.batch_size, num_parallel_batches=8, drop_remainder=True)).\
apply(prefetch_to_device(gpu_device, self.batch_size))
test_dataset = test_dataset.\
apply(shuffle_and_repeat(test_dataset_num)).\
apply(map_and_batch(Image_data_class.image_processing, self.batch_size, num_parallel_batches=8, drop_remainder=True)).\
apply(prefetch_to_device(gpu_device, self.batch_size))
train_dataset_iterator = train_dataset.make_one_shot_iterator()
test_dataset_iterator = test_dataset.make_one_shot_iterator()
self.x_real, label_org, label_fix_list = train_dataset_iterator.get_next() # Input image / Original domain labels
label_trg = tf.random_shuffle(label_org) # Target domain labels
label_fix_list = tf.transpose(label_fix_list, perm=[1, 0, 2])
self.x_test, test_label_org, test_label_fix_list = test_dataset_iterator.get_next() # Input image / Original domain labels
test_label_fix_list = tf.transpose(test_label_fix_list, perm=[1, 0, 2])
self.custom_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='custom_image') # Custom Image
custom_label_fix_list = tf.transpose(create_labels(self.custom_label, self.selected_attrs), perm=[1, 0, 2])
""" Define Generator, Discriminator """
x_fake = self.generator(self.x_real, label_trg) # real a
x_recon = self.generator(x_fake, label_org, reuse=True) # real b
real_logit, real_cls = self.discriminator(self.x_real)
fake_logit, fake_cls = self.discriminator(x_fake, reuse=True)
""" Define Loss """
if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan' :
GP = self.gradient_panalty(real=self.x_real, fake=x_fake)
else :
GP = 0
g_adv_loss = generator_loss(loss_func=self.gan_type, fake=fake_logit)
g_cls_loss = classification_loss(logit=fake_cls, label=label_trg)
g_rec_loss = L1_loss(self.x_real, x_recon)
d_adv_loss = discriminator_loss(loss_func=self.gan_type, real=real_logit, fake=fake_logit) + GP
d_cls_loss = classification_loss(logit=real_cls, label=label_org)
self.d_loss = self.adv_weight * d_adv_loss + self.cls_weight * d_cls_loss
self.g_loss = self.adv_weight * g_adv_loss + self.cls_weight * g_cls_loss + self.rec_weight * g_rec_loss
""" Result Image """
self.x_fake_list = tf.map_fn(lambda x : self.generator(self.x_real, x, reuse=True), label_fix_list, dtype=tf.float32)
""" Test Image """
self.x_test_fake_list = tf.map_fn(lambda x : self.generator(self.x_test, x, reuse=True), test_label_fix_list, dtype=tf.float32)
self.custom_fake_image = tf.map_fn(lambda x : self.generator(self.custom_image, x, reuse=True), custom_label_fix_list, dtype=tf.float32)
""" Training """
t_vars = tf.trainable_variables()
G_vars = [var for var in t_vars if 'generator' in var.name]
D_vars = [var for var in t_vars if 'discriminator' in var.name]
self.g_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.g_loss, var_list=G_vars)
self.d_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.d_loss, var_list=D_vars)
"""" Summary """
self.Generator_loss = tf.summary.scalar("Generator_loss", self.g_loss)
self.Discriminator_loss = tf.summary.scalar("Discriminator_loss", self.d_loss)
self.g_adv_loss = tf.summary.scalar("g_adv_loss", g_adv_loss)
self.g_cls_loss = tf.summary.scalar("g_cls_loss", g_cls_loss)
self.g_rec_loss = tf.summary.scalar("g_rec_loss", g_rec_loss)
self.d_adv_loss = tf.summary.scalar("d_adv_loss", d_adv_loss)
self.d_cls_loss = tf.summary.scalar("d_cls_loss", d_cls_loss)
self.g_summary_loss = tf.summary.merge([self.Generator_loss, self.g_adv_loss, self.g_cls_loss, self.g_rec_loss])
self.d_summary_loss = tf.summary.merge([self.Discriminator_loss, self.d_adv_loss, self.d_cls_loss])
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# saver to save model
self.saver = tf.train.Saver(max_to_keep=None)
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.iteration)
start_batch_id = checkpoint_counter - start_epoch * self.iteration
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
self.sample_dir = os.path.join(self.sample_dir, self.model_dir)
check_folder(self.sample_dir)
# loop for epoch
start_time = time.time()
past_g_loss = -1.
lr = self.init_lr
for epoch in range(start_epoch, self.epoch):
if self.decay_flag :
lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch) # linear decay
for idx in range(start_batch_id, self.iteration):
train_feed_dict = {
self.lr : lr
}
# Update D
_, d_loss, summary_str = self.sess.run([self.d_optimizer, self.d_loss, self.d_summary_loss], feed_dict = train_feed_dict)
self.writer.add_summary(summary_str, counter)
# Update G
g_loss = None
if (counter - 1) % self.n_critic == 0 :
real_images, fake_images, _, g_loss, summary_str = self.sess.run([self.x_real, self.x_fake_list, self.g_optimizer, self.g_loss, self.g_summary_loss], feed_dict = train_feed_dict)
self.writer.add_summary(summary_str, counter)
past_g_loss = g_loss
# display training status
counter += 1
if g_loss == None :
g_loss = past_g_loss
print("Epoch: [%2d] [%5d/%5d] time: %4.4f d_loss: %.8f, g_loss: %.8f" % (epoch, idx, self.iteration, time.time() - start_time, d_loss, g_loss))
if np.mod(idx+1, self.print_freq) == 0 :
real_image = np.expand_dims(real_images[0], axis=0)
fake_image = np.transpose(fake_images, axes=[1, 0, 2, 3, 4])[0] # [bs, c_dim, h, w, ch]
save_images(real_image, [1, 1],
'./{}/real_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
save_images(fake_image, [1, self.c_dim],
'./{}/fake_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
if np.mod(idx + 1, self.save_freq) == 0:
self.save(self.checkpoint_dir, counter)
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model for final step
self.save(self.checkpoint_dir, counter)
@property
def model_dir(self):
n_res = str(self.n_res) + 'resblock'
n_dis = str(self.n_dis) + 'dis'
return "{}_{}_{}_{}_{}".format(self.model_name, self.dataset_name,
self.gan_type,
n_res, n_dis)
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name + '.model'), global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
if self.load_step > -1: ckpt_name = "StarGAN.model-%d" % (self.load_step)
print(ckpt_name)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def test(self):
tf.global_variables_initializer().run()
test_path = os.path.join(self.dataset_path, 'test')
check_folder(test_path)
test_files = glob(os.path.join(test_path, '*.*'))
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
self.result_dir = os.path.join(self.result_dir, self.model_dir)
check_folder(self.result_dir)
image_folder = os.path.join(self.result_dir, 'images')
check_folder(image_folder)
if could_load :
print(" [*] Load SUCCESS")
else :
print(" [!] Load failed...")
# write html for visual comparison
index_path = os.path.join(self.result_dir, 'index.html')
index = open(index_path, 'w')
index.write("<html><body><table><tr>")
index.write("<th>name</th><th>input</th><th>output</th></tr>")
# Custom Image
for sample_file in test_files:
print("Processing image: " + sample_file)
sample_image = np.asarray(load_test_data(sample_file, size=self.img_size))
image_path = os.path.join(image_folder, '{}'.format(os.path.basename(sample_file)))
fake_image = self.sess.run(self.custom_fake_image, feed_dict = {self.custom_image : sample_image})
fake_image = np.transpose(fake_image, axes=[1, 0, 2, 3, 4])[0]
save_images(fake_image, [1, self.c_dim], image_path)
index.write("<td>%s</td>" % os.path.basename(image_path))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else (
'../..' + os.path.sep + sample_file), self.img_size, self.img_size))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else (
'../..' + os.path.sep + image_path), self.img_size * self.c_dim, self.img_size))
index.write("</tr>")
# CelebA
real_images, fake_images = self.sess.run([self.x_test, self.x_test_fake_list])
fake_images = np.transpose(fake_images, axes=[1, 0, 2, 3, 4])
for i in range(len(real_images)) :
print("{} / {}".format(i, len(real_images)))
real_path = os.path.join(image_folder, 'real_{}.png'.format(i))
fake_path = os.path.join(image_folder, 'fake_{}.png'.format(i))
real_image = np.expand_dims(real_images[i], axis=0)
fake_image = fake_images[i]
save_images(real_image, [1, 1], real_path)
save_images(fake_image, [1, self.c_dim], fake_path)
index.write("<td>%s</td>" % os.path.basename(real_path))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (real_path if os.path.isabs(real_path) else (
'../..' + os.path.sep + real_path), self.img_size, self.img_size))
index.write("<td><img src='%s' width='%d' height='%d'></td>" % (fake_path if os.path.isabs(fake_path) else (
'../..' + os.path.sep + fake_path), self.img_size * self.c_dim, self.img_size))
index.write("</tr>")
index.close()
|
# -- encoding:utf-8 --
"""
Create by ibf on 2018/6/21
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# 设置字符集,防止中文乱码
mpl.rcParams['font.sans-serif'] = [u'simHei']
mpl.rcParams['axes.unicode_minus'] = False
# 1. 随机数据产生
# 给定随机数种子:当程序多次运行的时候,可以保证每次运行时候随机的数据都是一样的
np.random.seed(28)
n = 100
b_values = np.random.normal(loc=-1.0, scale=20.0, size=n)
c_values = np.random.normal(loc=0.0, scale=1.0, size=n)
print("b的均值:{}".format(np.mean(b_values)))
# # 随机数据可视化查看
# plt.figure(facecolor='w')
# plt.subplot(1, 2, 1)
# plt.hist(b_values, 1000, color='#FF0000')
# plt.subplot(1, 2, 2)
# plt.hist(c_values, 1000, color='#00FF00')
# plt.suptitle(u'随机数据可视化', fontsize=22)
# plt.show()
def calc_min_value_with_one_sample(b_values, c_values, max_iter=1000, tol=0.00001, alpha=0.01):
"""
计算最小值时候对应的x和y的值
:param b_values: 样本对应的b值
:param c_values: 样本对应的c值
:param max_iter: 最大迭代次数
:param tol: 当变量小于该值的时候收敛
:param alpha: 梯度下降学习率
:return:
"""
def f(x, b, c):
"""
原始函数
:param x:
:param b:
:param c:
:return:
"""
return x ** 2 + b * x + c
def h(x, b, c):
"""
原始函数对应的导函数
:param x:
:param b:
:param c:
:return:
"""
return 2 * x + b
# 定义变量
step_channge = 1.0 + tol
step = 0
# 获取第一个样本
b = b_values[0]
c = c_values[0]
# 给定一个初始的x值
current_x = np.random.randint(low=-10, high=10)
current_y = f(current_x, b, c)
print("当前参数为:")
print("b={}".format(b))
print("c={}".format(c))
# 开始迭代循环
while step_channge > tol and step < max_iter:
# 1. 计算梯度值
current_d_f = h(current_x, b, c)
# 2. 更新参数
current_x = current_x - alpha * current_d_f
# 3. 计算更新x之后的y值
tmp_y = f(current_x, b, c)
# 4. 记录y的变换大小、更新迭代次数、更新当前的y值
step_channge = np.abs(current_y - tmp_y)
step += 1
current_y = tmp_y
print("最终更新的次数:{}, 最终的变化率:{}".format(step, step_channge))
print("最终结果为:{}---->{}".format(current_x, current_y))
def calc_min_value_with_ten_sample(n, b_values, c_values, max_iter=1000, tol=0.00001, alpha=0.01):
"""
计算最小值时候对应的x和y的值
:param n: 样本数量
:param b_values: 样本对应的b值
:param c_values: 样本对应的c值
:param max_iter: 最大迭代次数
:param tol: 当变量小于该值的时候收敛
:param alpha: 梯度下降学习率
:return:
"""
# 要求n必须等于10
assert n == 10 and len(b_values) == n and len(c_values) == n
def f(x, b_values, c_values):
"""
原始函数
:param x:
:param b_values:
:param c_values:
:return:
"""
sample_1 = x ** 2 + b_values[0] * x + c_values[0]
sample_2 = x ** 2 + b_values[1] * x + c_values[1]
sample_3 = x ** 2 + b_values[2] * x + c_values[2]
sample_4 = x ** 2 + b_values[3] * x + c_values[3]
sample_5 = x ** 2 + b_values[4] * x + c_values[4]
sample_6 = x ** 2 + b_values[5] * x + c_values[5]
sample_7 = x ** 2 + b_values[6] * x + c_values[6]
sample_8 = x ** 2 + b_values[7] * x + c_values[7]
sample_9 = x ** 2 + b_values[8] * x + c_values[8]
sample_10 = x ** 2 + b_values[9] * x + c_values[9]
return sample_1 + sample_2 + sample_3 + sample_4 + sample_5 + sample_6 + sample_7 + sample_8 + sample_9 + sample_10
def h(x, b_values, c_values):
"""
原始函数对应的导函数
:param x:
:param b_values:
:param c_values:
:return:
"""
sample_1 = x * 2 + b_values[0]
sample_2 = x * 2 + b_values[1]
sample_3 = x * 2 + b_values[2]
sample_4 = x * 2 + b_values[3]
sample_5 = x * 2 + b_values[4]
sample_6 = x * 2 + b_values[5]
sample_7 = x * 2 + b_values[6]
sample_8 = x * 2 + b_values[7]
sample_9 = x * 2 + b_values[8]
sample_10 = x * 2 + b_values[9]
return sample_1 + sample_2 + sample_3 + sample_4 + sample_5 + sample_6 + sample_7 + sample_8 + sample_9 + sample_10
# 定义变量
step_channge = 1.0 + tol
step = 0
# 给定一个初始的x值
current_x = np.random.randint(low=-10, high=10)
current_y = f(current_x, b_values, c_values)
print("当前参数为:")
print("b_values={},b的均值为:{}".format(b_values, np.mean(b_values)))
print("c_values={},c的均值为:{}".format(c_values, np.mean(c_values)))
# 开始迭代循环
while step_channge > tol and step < max_iter:
# 1. 计算梯度值
current_d_f = h(current_x, b_values, c_values)
# 2. 更新参数
current_x = current_x - alpha * current_d_f
# 3. 计算更新x之后的y值
tmp_y = f(current_x, b_values, c_values)
# 4. 记录y的变换大小、更新迭代次数、更新当前的y值
step_channge = np.abs(current_y - tmp_y)
step += 1
current_y = tmp_y
print("最终更新的次数:{}, 最终的变化率:{}".format(step, step_channge))
print("最终结果为:{}---->{}".format(current_x, current_y))
def calc_min_value_with_n_sample(n, b_values, c_values, max_iter=1000, tol=0.00001, alpha=0.01, show_img=True):
"""
计算最小值时候对应的x和y的值
:param n: 样本数量
:param b_values: 样本对应的b值
:param c_values: 样本对应的c值
:param max_iter: 最大迭代次数
:param tol: 当变量小于该值的时候收敛
:param alpha: 梯度下降学习率
:return:
"""
def f1(x, b, c):
return x ** 2 + b * x + c
def f(x, b_values, c_values):
"""
原始函数
:param x:
:param b_values:
:param c_values:
:return:
"""
result = 0
for b, c in zip(b_values, c_values):
# 遍历所有b和c的组合,这里求均值(防止数据量太大,计算困难)
result += f1(x, b, c) / n
return result
def h1(x, b, c):
return x * 2 + b
def h(x, b_values, c_values):
"""
原始函数对应的导函数
:param x:
:param b_values:
:param c_values:
:return:
"""
result = 0
for b, c in zip(b_values, c_values):
# 遍历求解每个b、c组合对应的梯度值,这里求均值(防止数据量太大,计算困难)
result += h1(x, b, c) / n
return result
# 定义变量
step_channge = 1.0 + tol
step = 0
# 给定一个初始的x值
current_x = np.random.randint(low=-10, high=10)
current_y = f(current_x, b_values, c_values)
print("当前参数为:")
print("b_values={},b的均值为:{}".format(b_values, np.mean(b_values)))
print("c_values={},c的均值为:{}".format(c_values, np.mean(c_values)))
# 开始迭代循环
y_value_changes = []
if show_img:
y_value_changes.append(current_y)
error_value_changes = []
while step_channge > tol and step < max_iter:
# 1. 计算梯度值
current_d_f = h(current_x, b_values, c_values)
# 2. 更新参数
current_x = current_x - alpha * current_d_f
# 3. 计算更新x之后的y值
tmp_y = f(current_x, b_values, c_values)
# 4. 记录y的变换大小、更新迭代次数、更新当前的y值
step_channge = np.abs(current_y - tmp_y)
step += 1
current_y = tmp_y
# 添加可视化相关值
if show_img:
y_value_changes.append(current_y)
error_value_changes.append(step_channge)
print("最终更新的次数:{}, 最终的变化率:{}".format(step, step_channge))
print("最终结果为:{}---->{}".format(current_x, current_y))
# 可视化代码(看一下y的变化大小以及函数的变换情况)
if show_img:
plt.figure(facecolor='w')
plt.subplot(1, 2, 1)
plt.plot(range(step), error_value_changes, 'r-')
plt.xlabel('迭代次数')
plt.ylabel('变换大小')
plt.subplot(1, 2, 2)
plt.plot(range(step + 1), y_value_changes, 'g-')
plt.xlabel('迭代次数')
plt.ylabel('损失函数值')
plt.suptitle('MGD变换情况可视化')
plt.show()
def calc_min_value_with_n_sample_sgd(n, b_values, c_values, max_iter=1000, tol=0.00001, alpha=0.01, show_img=True):
"""
计算最小值时候对应的x和y的值
:param n: 样本数量
:param b_values: 样本对应的b值
:param c_values: 样本对应的c值
:param max_iter: 最大迭代次数
:param tol: 当变量小于该值的时候收敛
:param alpha: 梯度下降学习率
:return:
"""
def f1(x, b, c):
return x ** 2 + b * x + c
def f(x, b_values, c_values):
"""
原始函数
:param x:
:param b_values:
:param c_values:
:return:
"""
result = 0
for b, c in zip(b_values, c_values):
# 遍历所有b和c的组合,这里求均值(防止数据量太大,计算困难)
result += f1(x, b, c) / n
return result
def h1(x, b, c):
return x * 2 + b
# 定义变量
step_channge = 1.0 + tol
step = 0
# 给定一个初始的x值
current_x = np.random.randint(low=-10, high=10)
current_y = f(current_x, b_values, c_values)
print("当前参数为:")
print("b_values={},b的均值为:{}".format(b_values, np.mean(b_values)))
print("c_values={},c的均值为:{}".format(c_values, np.mean(c_values)))
# 开始迭代循环
change_numbers = 0
y_value_changes = []
if show_img:
y_value_changes.append(current_y)
error_value_changes = []
while step_channge > tol and step < max_iter:
"""
在一个迭代次数中(Step),对m条数据进行遍历,每条样本更新一次模型参数
"""
print(step)
random_index = np.random.permutation(n)
for index in random_index:
b = b_values[index]
c = c_values[index]
# 1. 计算梯度值
current_d_f = h1(current_x, b, c)
# 2. 更新参数
current_x = current_x - alpha * current_d_f
# 3. 计算更新x之后的y值
tmp_y = f(current_x, b_values, c_values)
# 4. 记录y的变换大小、更新次数、更新当前的y值
step_channge = np.abs(current_y - tmp_y)
current_y = tmp_y
change_numbers += 1
# 添加可视化相关值
if show_img:
y_value_changes.append(current_y)
error_value_changes.append(step_channge)
# 如果模型效果已经达到最优的情况下,直接退出
if step_channge < tol:
break
# 更新迭代次数
step += 1
print("最终迭代的次数:{}, 参数的更新次数:{}, 最终的变化率:{}".format(step, change_numbers, step_channge))
print("最终结果为:{}---->{}".format(current_x, current_y))
# 可视化代码(看一下y的变化大小以及函数的变换情况)
if show_img:
plt.figure(facecolor='w')
plt.subplot(1, 2, 1)
plt.plot(range(change_numbers), error_value_changes, 'r-')
plt.xlabel('迭代次数')
plt.ylabel('变换大小')
plt.subplot(1, 2, 2)
plt.plot(range(change_numbers + 1), y_value_changes, 'g-')
plt.xlabel('迭代次数')
plt.ylabel('损失函数值')
plt.suptitle('SGD变换情况可视化')
plt.show()
# print("*" * 50)
# calc_min_value_with_one_sample(b_values, c_values)
# print("*" * 50)
# calc_min_value_with_ten_sample(n, b_values, c_values)
# print("*" * 50)
# calc_min_value_with_n_sample(n, b_values, c_values)
print("*" * 50)
calc_min_value_with_n_sample_sgd(n, b_values, c_values)
|
#include <stdio.h>
int main() {
int i;
char *a[] = {"Carrot","Onion","Potato"};
char **p = a;
for(i=0; i<3; i++) {
printf("%c\n", *(p[i]+1));
}
return 0;
}
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from ..models import MyModel
class MyModelListViewTest(TestCase):
@classmethod
def setUpClass(cls):
super(MyModelListViewTest, cls).setUpClass()
cls.instance = MyModel.objects.create(title="Test")
@classmethod
def tearDownClass(cls):
super(MyModelListViewTest, cls).tearDownClass()
cls.instance.delete()
def test_get(self):
response = self.client.get('/mymodels/')
self.assertEqual(
response.status_code,
200,
"Response code is invalid",
)
class MyModelDetailViewTest(TestCase):
@classmethod
def setUpClass(cls):
super(MyModelDetailViewTest, cls).setUpClass()
cls.instance = MyModel.objects.create(title="Test")
@classmethod
def tearDownClass(cls):
super(MyModelDetailViewTest, cls).tearDownClass()
cls.instance.delete()
def test_get(self):
response = self.client.get('/mymodels/{}/'.format(self.pk))
self.assertEqual(
response.status_code,
200,
"Response code is invalid",
)
|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helpers to use scoped values within async functions.
Similar to AsyncContext in that the scope "pauses" and "resumes"
as the scheduler pauses and resumes the execution of the function
the scoped values are defined/overridden within.
See tests/test_scoped_value.py for examples.
"""
import qcore
from . import contexts
_empty_context = qcore.empty_context
class AsyncScopedValue(object):
def __init__(self, default):
self._value = default
def get(self):
return self._value
def set(self, value):
self._value = value
def override(self, value):
"""Temporarily overrides the old value with the new one."""
return _AsyncScopedValueOverrideContext(self, value)
def __call__(self):
"""Same as get."""
return self._value
def __str__(self):
return "AsyncScopedValue(%s)" % str(self._value)
def __repr__(self):
return "AsyncScopedValue(%s)" % repr(self._value)
class _AsyncScopedValueOverrideContext(contexts.AsyncContext):
def __init__(self, target, value):
self._target = target
self._value = value
self._old_value = None
def resume(self):
self._old_value = self._target._value
self._target._value = self._value
def pause(self):
self._target._value = self._old_value
def __repr__(self):
return "_AsyncScopedValueOverrideContext(target=%r, value=%r)" % (
self._target,
self._value,
)
class _AsyncPropertyOverrideContext(contexts.AsyncContext):
def __init__(self, target, property_name, value):
self._target = target
self._property_name = property_name
self._value = value
self._old_value = None
def resume(self):
self._old_value = getattr(self._target, self._property_name)
setattr(self._target, self._property_name, self._value)
def pause(self):
setattr(self._target, self._property_name, self._old_value)
def __repr__(self):
return (
"_AsyncPropertyOverrideContext(target=%r, property_name=%r, value=%r)"
% (self._target, self._property_name, self._value)
)
async_override = _AsyncPropertyOverrideContext
globals()["async_override"] = async_override
|
const Validator = require("validator");
const isEmpty = require("is-empty");
module.exports = function validatePurchaseLocationInput(data) {
let errors = {};
// Convert empty fields to an empty string so we can use validator functions
data.name = !isEmpty(data.name) ? data.name : "";
data.userID = !isEmpty(data.userID) ? data.userID : "";
data.globalID = !isEmpty(data.globalID) ? data.globalID : "";
// Name checks
if (Validator.isEmpty(data.name)) {
errors.name = "Name field is required";
}
return {
errors,
isValid: isEmpty(errors)
};
};
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from compiler_gym.envs import CompilerEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_autophase_crc32_feature_vector(env: CompilerEnv):
env.benchmark = "cBench-v0/crc32"
env.reset()
features = env.observation["Autophase"]
assert features[0] == 0 # BBNumArgsHi
assert features[1] == 0 # BBNumArgsLo
assert features[2] == 16 # onePred
assert features[3] == 12 # onePredOneSuc
assert features[4] == 2 # onePredTwoSuc
assert features[5] == 16 # oneSuccessor
assert features[6] == 8 # twoPred
assert features[7] == 2 # twoPredOneSuc
assert features[8] == 4 # twoEach
assert features[9] == 8 # twoSuccessor
assert features[10] == 0 # morePreds
assert features[11] == 0 # BB03Phi
assert features[12] == 0 # BBHiPhi
assert features[13] == 29 # BBNoPhi
assert features[14] == 0 # BeginPhi
assert features[15] == 24 # BranchCount
assert features[16] == 9 # returnInt
assert features[17] == 2 # CriticalCount
assert features[18] == 32 # NumEdges
assert features[19] == 38 # const32Bit
assert features[20] == 21 # const64Bit
assert features[21] == 14 # numConstZeroes
assert features[22] == 30 # numConstOnes
assert features[23] == 16 # UncondBranches
assert features[24] == 13 # binaryConstArg
assert features[25] == 0 # NumAShrInst
assert features[26] == 5 # NumAddInst
assert features[27] == 24 # NumAllocaInst
assert features[28] == 3 # NumAndInst
assert features[29] == 3 # BlockMid
assert features[30] == 26 # BlockLow
assert features[31] == 0 # NumBitCastInst
assert features[32] == 24 # NumBrInst
assert features[33] == 13 # NumCallInst
assert features[34] == 5 # NumGetElementPtrInst
assert features[35] == 10 # NumICmpInst
assert features[36] == 3 # NumLShrInst
assert features[37] == 51 # NumLoadInst
assert features[38] == 0 # NumMulInst
assert features[39] == 1 # NumOrInst
assert features[40] == 0 # NumPHIInst
assert features[41] == 5 # NumRetInst
assert features[42] == 0 # NumSExtInst
assert features[43] == 0 # NumSelectInst
assert features[44] == 0 # NumShlInst
assert features[45] == 38 # NumStoreInst
assert features[46] == 0 # NumSubInst
assert features[47] == 1 # NumTruncInst
assert features[48] == 8 # NumXorInst
assert features[49] == 5 # NumZExtInst
assert features[50] == 29 # TotalBlocks
assert features[51] == 196 # TotalInsts
assert features[52] == 131 # TotalMemInst
assert features[53] == 13 # TotalFuncs
assert features[54] == 0 # ArgsPhi
assert features[55] == 81 # testUnary
if __name__ == "__main__":
main()
|
define([
"core/framework/Tundra",
"core/framework/ITundraPlugin",
"core/asset/AssetFactory",
"plugins/avatar/asset/AvatarDescAsset",
], function(Tundra, ITundraPlugin, AssetFactory, AvatarDescAsset)
{
var AvatarPlugin = ITundraPlugin.$extend({
__init__ : function()
{
this.$super("AvatarPlugin", [ "Avatar" ]);
},
/// ITundraPlugin override
pluginPropertyName : function()
{
return "avatar";
},
/// ITundraPlugin override
initialize : function(options)
{
this.framework.asset.registerAssetFactory(new AssetFactory("RealXtendAvatarDescription", AvatarDescAsset, {
".avatar" : "xml"
}));
},
/// ITundraPlugin override
postInitialize : function()
{
}
});
Tundra.registerPlugin(new AvatarPlugin());
return AvatarPlugin;
});
|
#ifndef DALI_TOOLKIT_TEXT_RENDERER_H
#define DALI_TOOLKIT_TEXT_RENDERER_H
/*
* Copyright (c) 2019 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// EXTERNAL INCLUDES
#include <dali/public-api/actors/actor.h>
#include <dali/public-api/common/intrusive-ptr.h>
#include <dali/public-api/object/ref-object.h>
namespace Dali
{
namespace Toolkit
{
namespace Text
{
class Renderer;
typedef IntrusivePtr<Renderer> RendererPtr;
class ViewInterface;
/**
* @brief Abstract base class for Text renderers.
*
* This is reponsible for rendering the glyphs from a ViewInterface in the specified positions.
* It is implemented by returning an Actor intended as the child of a UI control.
*/
class Renderer : public RefObject
{
public:
/**
* @brief Render the glyphs from a ViewInterface.
*
* @param[in] view The interface to a view.
* @param[in] textControl handle to the text control
* @param[in] animatablePropertyIndex textControl specific animatable property
* @param[out] alignmentOffset Offset used to internally align the placement actor.
* @param[in] depth The depth in the tree of the parent.
* @return The Renderable actor used to position the text.
*/
virtual Actor Render( ViewInterface& view,
Actor textContol,
Property::Index animatablePropertyIndex,
float& alignmentOffset,
int depth ) = 0;
protected:
/**
* @brief Constructor.
*/
Renderer();
/**
* @brief A reference counted object may only be deleted by calling Unreference().
*/
virtual ~Renderer();
private:
// Undefined
Renderer( const Renderer& handle );
// Undefined
Renderer& operator=( const Renderer& handle );
};
} // namespace Text
} // namespace Toolkit
} // namespace Dali
#endif // DALI_TOOLKIT_TEXT_RENDERER_H
|
import os
from django.conf.global_settings import LANGUAGES as DJANGO_LANGUAGES
import dj_database_url
###################
# Django Settings #
###################
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "not-secret-at-all")
DEBUG = bool(int(os.getenv("DEBUG", 1)))
TEST = os.getenv("FAIL_INVALID_TEMPLATE_VARS")
PREFIX = "" if os.getenv("STANDALONE") else "tests.test_app."
ALLOWED_HOSTS = ["*"]
INSTALLED_APPS = [
# Keep this above 'django.contrib.admin'
"jazzmin",
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"import_export",
# Our apps
"{}library.books.apps.BooksConfig".format(PREFIX),
"{}library.loans.apps.LoansConfig".format(PREFIX),
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.admindocs.middleware.XViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "{}library.urls".format(PREFIX)
WSGI_APPLICATION = "{}library.wsgi.application".format(PREFIX)
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {"console": {"class": "logging.StreamHandler"}},
"loggers": {"": {"handlers": ["console"], "level": "INFO"}},
}
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
]
DATABASES = {
"default": dj_database_url.config(
env="DATABASE_URL",
conn_max_age=500,
default="sqlite:///{}".format(os.path.join(BASE_DIR, "db.sqlite3")),
)
}
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
LANGUAGE_CODE = "en"
TIME_ZONE = "Europe/London"
USE_I18N = True
LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),)
USE_L10N = True
USE_TZ = True
# English default
LANGUAGES = DJANGO_LANGUAGES
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
if DEBUG and not TEST:
os.environ.setdefault("WERKZEUG_DEBUG_PIN", "off")
INSTALLED_APPS.extend(["debug_toolbar", "django_extensions"])
MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware")
DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": lambda _: False}
if not DEBUG and not TEST:
MIDDLEWARE.insert(1, "whitenoise.middleware.WhiteNoiseMiddleware")
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
########################
# Third party settings #
########################
JAZZMIN_SETTINGS = {
# title of the window (Will default to current_admin_site.site_title if absent or None)
"site_title": "Library Admin",
# Title on the brand, and login screen (19 chars max) (defaults to current_admin_site.site_header if absent or None)
"site_header": "Library",
# square logo to use for your site, must be present in static files, used for favicon and brand on top left
"site_logo": "books/img/logo.png",
# Welcome text on the login screen
"welcome_sign": "Welcome to the library",
# Copyright on the footer
"copyright": "Acme Library Ltd",
# The model admin to search from the search bar, search bar omitted if excluded
"search_model": "auth.User",
# Field name on user model that contains avatar image
"user_avatar": None,
############
# Top Menu #
############
# Links to put along the top menu
"topmenu_links": [
# Url that gets reversed (Permissions can be added)
{"name": "Home", "url": "admin:index", "permissions": ["auth.view_user"]},
# external url that opens in a new window (Permissions can be added)
{"name": "Support", "url": "https://github.com/farridav/django-jazzmin/issues", "new_window": True},
# model admin to link to (Permissions checked against model)
{"model": "auth.User"},
# App with dropdown menu to all its models pages (Permissions checked against models)
{"app": "books"},
{"app": "loans"},
],
#############
# User Menu #
#############
# Additional links to include in the user menu on the top right ('app' url type is not allowed)
"usermenu_links": [
{"name": "Support", "url": "https://github.com/farridav/django-jazzmin/issues", "new_window": True},
{"model": "auth.user"},
],
#############
# Side Menu #
#############
# Whether to display the side menu
"show_sidebar": True,
# Whether to aut expand the menu
"navigation_expanded": True,
# Hide these apps when generating side menu e.g (auth)
"hide_apps": [],
# Hide these models when generating side menu (e.g auth.user)
"hide_models": [],
# List of apps to base side menu (app or model) ordering off of
"order_with_respect_to": ["Make Messages", "auth", "books", "books.author", "books.book", "loans"],
# Custom links to append to app groups, keyed on app name
"custom_links": {
"loans": [
{
"name": "Make Messages",
"url": "make_messages",
"icon": "fas fa-comments",
"permissions": ["loans.view_loan"],
},
{"name": "Custom View", "url": "admin:custom_view", "icon": "fas fa-box-open"},
]
},
# Custom icons for side menu apps/models See the link below
# https://fontawesome.com/icons?d=gallery&m=free&v=5.0.0,5.0.1,5.0.10,5.0.11,5.0.12,5.0.13,5.0.2,5.0.3,5.0.4,5.0.5,5.0.6,5.0.7,5.0.8,5.0.9,5.1.0,
# 5.1.1,5.2.0,5.3.0,5.3.1,5.4.0,5.4.1,5.4.2,5.13.0,5.12.0,5.11.2,5.11.1,5.10.0,5.9.0,5.8.2,5.8.1,5.7.2,5.7.1,5.7.0,5.6.3,5.5.0,5.4.2
# for the full list of 5.13.0 free icon classes
"icons": {
"auth": "fas fa-users-cog",
"auth.user": "fas fa-user",
"auth.Group": "fas fa-users",
"admin.LogEntry": "fas fa-file",
"books.Author": "fas fa-user",
"books.Book": "fas fa-book",
"books.Genre": "fas fa-photo-video",
"loans.BookLoan": "fas fa-book-open",
"loans.Library": "fas fa-book-reader",
},
# Icons that are used when one is not manually specified
"default_icon_parents": "fas fa-chevron-circle-right",
"default_icon_children": "fas fa-circle",
#################
# Related Modal #
#################
# Activate Bootstrap modal
"related_modal_active": False,
#############
# UI Tweaks #
#############
# Relative paths to custom CSS/JS scripts (must be present in static files)
"custom_css": None,
"custom_js": None,
# Whether to show the UI customizer on the sidebar
"show_ui_builder": True,
###############
# Change view #
###############
# Render out the change view as a single form, or in tabs, current options are
# - single
# - horizontal_tabs (default)
# - vertical_tabs
# - collapsible
# - carousel
"changeform_format": "horizontal_tabs",
# override change forms on a per modeladmin basis
"changeform_format_overrides": {"auth.user": "collapsible", "auth.group": "vertical_tabs"},
# Add a language dropdown into the admin
"language_chooser": True,
}
if not DEBUG and not TEST:
JAZZMIN_SETTINGS["welcome_sign"] = "Username: test@test.com, Password: test (Data resets nightly)"
JAZZMIN_UI_TWEAKS = {
"navbar_small_text": False,
"footer_small_text": False,
"body_small_text": False,
"brand_small_text": False,
"brand_colour": False,
"accent": "accent-primary",
"navbar": "navbar-white navbar-light",
"no_navbar_border": False,
"navbar_fixed": False,
"layout_boxed": False,
"footer_fixed": False,
"sidebar_fixed": False,
"sidebar": "sidebar-dark-primary",
"sidebar_nav_small_text": False,
"sidebar_disable_expand": False,
"sidebar_nav_child_indent": False,
"sidebar_nav_compact_style": False,
"sidebar_nav_legacy_style": False,
"sidebar_nav_flat_style": False,
"theme": "default",
"dark_mode_theme": None,
"button_classes": {
"primary": "btn-outline-primary",
"secondary": "btn-outline-secondary",
"info": "btn-outline-info",
"warning": "btn-outline-warning",
"danger": "btn-outline-danger",
"success": "btn-outline-success",
},
}
################
# App settings #
################
|
"""
Copyright (c) rwightman
https://gist.github.com/rwightman/f2d3849281624be7c0f11c85c87c1598
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _quadruple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2d, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding) # convert to l, r, t, b
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - (ih % self.stride[0]), 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - (iw % self.stride[1]), 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = (pl, pr, pt, pb)
else:
padding = self.padding
return padding
def forward(self, x):
# using existing pytorch functions and tensor ops so that we get autograd,
# would likely be more efficient to implement from scratch at C/Cuda level
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
|
import os
with open(
os.path.join(
os.path.dirname(__file__),
'text.grammar'
)
) as f:
text = f.read()
|
/*
* Generated by asn1c-0.9.29 (http://lionet.info/asn1c)
* From ASN.1 module "NR-RRC-Definitions"
* found in "/home/labadmin/hlal/rrc_15.3_asn.asn1"
* `asn1c -D ./15_3_rrc/ -fcompound-names -fno-include-deps -findirect-choice -gen-PER -no-gen-example`
*/
#include "CellGroupConfigRrc.h"
#include "MAC-CellGroupConfig.h"
#include "PhysicalCellGroupConfig.h"
#include "SpCellConfig.h"
#include "RLC-BearerConfig.h"
#include "SCellConfig.h"
/*
* This type is implemented using NativeEnumerated,
* so here we adjust the DEF accordingly.
*/
static int
memb_rlc_BearerToAddModList_constraint_1(const asn_TYPE_descriptor_t *td, const void *sptr,
asn_app_constraint_failed_f *ctfailcb, void *app_key) {
size_t size;
if(!sptr) {
ASN__CTFAIL(app_key, td, sptr,
"%s: value not given (%s:%d)",
td->name, __FILE__, __LINE__);
return -1;
}
/* Determine the number of elements */
size = _A_CSEQUENCE_FROM_VOID(sptr)->count;
if((size >= 1 && size <= 32)) {
/* Perform validation of the inner elements */
return td->encoding_constraints.general_constraints(td, sptr, ctfailcb, app_key);
} else {
ASN__CTFAIL(app_key, td, sptr,
"%s: constraint failed (%s:%d)",
td->name, __FILE__, __LINE__);
return -1;
}
}
static int
memb_rlc_BearerToReleaseList_constraint_1(const asn_TYPE_descriptor_t *td, const void *sptr,
asn_app_constraint_failed_f *ctfailcb, void *app_key) {
size_t size;
if(!sptr) {
ASN__CTFAIL(app_key, td, sptr,
"%s: value not given (%s:%d)",
td->name, __FILE__, __LINE__);
return -1;
}
/* Determine the number of elements */
size = _A_CSEQUENCE_FROM_VOID(sptr)->count;
if((size >= 1 && size <= 32)) {
/* Perform validation of the inner elements */
return td->encoding_constraints.general_constraints(td, sptr, ctfailcb, app_key);
} else {
ASN__CTFAIL(app_key, td, sptr,
"%s: constraint failed (%s:%d)",
td->name, __FILE__, __LINE__);
return -1;
}
}
static int
memb_sCellToAddModList_constraint_1(const asn_TYPE_descriptor_t *td, const void *sptr,
asn_app_constraint_failed_f *ctfailcb, void *app_key) {
size_t size;
if(!sptr) {
ASN__CTFAIL(app_key, td, sptr,
"%s: value not given (%s:%d)",
td->name, __FILE__, __LINE__);
return -1;
}
/* Determine the number of elements */
size = _A_CSEQUENCE_FROM_VOID(sptr)->count;
if((size >= 1 && size <= 31)) {
/* Perform validation of the inner elements */
return td->encoding_constraints.general_constraints(td, sptr, ctfailcb, app_key);
} else {
ASN__CTFAIL(app_key, td, sptr,
"%s: constraint failed (%s:%d)",
td->name, __FILE__, __LINE__);
return -1;
}
}
static int
memb_sCellToReleaseList_constraint_1(const asn_TYPE_descriptor_t *td, const void *sptr,
asn_app_constraint_failed_f *ctfailcb, void *app_key) {
size_t size;
if(!sptr) {
ASN__CTFAIL(app_key, td, sptr,
"%s: value not given (%s:%d)",
td->name, __FILE__, __LINE__);
return -1;
}
/* Determine the number of elements */
size = _A_CSEQUENCE_FROM_VOID(sptr)->count;
if((size >= 1 && size <= 31)) {
/* Perform validation of the inner elements */
return td->encoding_constraints.general_constraints(td, sptr, ctfailcb, app_key);
} else {
ASN__CTFAIL(app_key, td, sptr,
"%s: constraint failed (%s:%d)",
td->name, __FILE__, __LINE__);
return -1;
}
}
static asn_oer_constraints_t asn_OER_type_rlc_BearerToAddModList_constr_3 CC_NOTUSED = {
{ 0, 0 },
-1 /* (SIZE(1..32)) */};
static asn_per_constraints_t asn_PER_type_rlc_BearerToAddModList_constr_3 CC_NOTUSED = {
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
{ APC_CONSTRAINED, 5, 5, 1, 32 } /* (SIZE(1..32)) */,
0, 0 /* No PER value map */
};
static asn_oer_constraints_t asn_OER_type_rlc_BearerToReleaseList_constr_5 CC_NOTUSED = {
{ 0, 0 },
-1 /* (SIZE(1..32)) */};
static asn_per_constraints_t asn_PER_type_rlc_BearerToReleaseList_constr_5 CC_NOTUSED = {
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
{ APC_CONSTRAINED, 5, 5, 1, 32 } /* (SIZE(1..32)) */,
0, 0 /* No PER value map */
};
static asn_oer_constraints_t asn_OER_type_sCellToAddModList_constr_10 CC_NOTUSED = {
{ 0, 0 },
-1 /* (SIZE(1..31)) */};
static asn_per_constraints_t asn_PER_type_sCellToAddModList_constr_10 CC_NOTUSED = {
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
{ APC_CONSTRAINED, 5, 5, 1, 31 } /* (SIZE(1..31)) */,
0, 0 /* No PER value map */
};
static asn_oer_constraints_t asn_OER_type_sCellToReleaseList_constr_12 CC_NOTUSED = {
{ 0, 0 },
-1 /* (SIZE(1..31)) */};
static asn_per_constraints_t asn_PER_type_sCellToReleaseList_constr_12 CC_NOTUSED = {
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
{ APC_CONSTRAINED, 5, 5, 1, 31 } /* (SIZE(1..31)) */,
0, 0 /* No PER value map */
};
static asn_oer_constraints_t asn_OER_type_reportUplinkTxDirectCurrent_v1530_constr_16 CC_NOTUSED = {
{ 0, 0 },
-1};
static asn_per_constraints_t asn_PER_type_reportUplinkTxDirectCurrent_v1530_constr_16 CC_NOTUSED = {
{ APC_CONSTRAINED, 0, 0, 0, 0 } /* (0..0) */,
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
0, 0 /* No PER value map */
};
static asn_oer_constraints_t asn_OER_memb_rlc_BearerToAddModList_constr_3 CC_NOTUSED = {
{ 0, 0 },
-1 /* (SIZE(1..32)) */};
static asn_per_constraints_t asn_PER_memb_rlc_BearerToAddModList_constr_3 CC_NOTUSED = {
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
{ APC_CONSTRAINED, 5, 5, 1, 32 } /* (SIZE(1..32)) */,
0, 0 /* No PER value map */
};
static asn_oer_constraints_t asn_OER_memb_rlc_BearerToReleaseList_constr_5 CC_NOTUSED = {
{ 0, 0 },
-1 /* (SIZE(1..32)) */};
static asn_per_constraints_t asn_PER_memb_rlc_BearerToReleaseList_constr_5 CC_NOTUSED = {
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
{ APC_CONSTRAINED, 5, 5, 1, 32 } /* (SIZE(1..32)) */,
0, 0 /* No PER value map */
};
static asn_oer_constraints_t asn_OER_memb_sCellToAddModList_constr_10 CC_NOTUSED = {
{ 0, 0 },
-1 /* (SIZE(1..31)) */};
static asn_per_constraints_t asn_PER_memb_sCellToAddModList_constr_10 CC_NOTUSED = {
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
{ APC_CONSTRAINED, 5, 5, 1, 31 } /* (SIZE(1..31)) */,
0, 0 /* No PER value map */
};
static asn_oer_constraints_t asn_OER_memb_sCellToReleaseList_constr_12 CC_NOTUSED = {
{ 0, 0 },
-1 /* (SIZE(1..31)) */};
static asn_per_constraints_t asn_PER_memb_sCellToReleaseList_constr_12 CC_NOTUSED = {
{ APC_UNCONSTRAINED, -1, -1, 0, 0 },
{ APC_CONSTRAINED, 5, 5, 1, 31 } /* (SIZE(1..31)) */,
0, 0 /* No PER value map */
};
static asn_TYPE_member_t asn_MBR_rlc_BearerToAddModList_3[] = {
{ ATF_POINTER, 0, 0,
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2)),
0,
&asn_DEF_RLC_BearerConfig,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
""
},
};
static const ber_tlv_tag_t asn_DEF_rlc_BearerToAddModList_tags_3[] = {
(ASN_TAG_CLASS_CONTEXT | (1 << 2)),
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2))
};
static asn_SET_OF_specifics_t asn_SPC_rlc_BearerToAddModList_specs_3 = {
sizeof(struct CellGroupConfigRrc__rlc_BearerToAddModList),
offsetof(struct CellGroupConfigRrc__rlc_BearerToAddModList, _asn_ctx),
0, /* XER encoding is XMLDelimitedItemList */
};
static /* Use -fall-defs-global to expose */
asn_TYPE_descriptor_t asn_DEF_rlc_BearerToAddModList_3 = {
"rlc-BearerToAddModList",
"rlc-BearerToAddModList",
&asn_OP_SEQUENCE_OF,
asn_DEF_rlc_BearerToAddModList_tags_3,
sizeof(asn_DEF_rlc_BearerToAddModList_tags_3)
/sizeof(asn_DEF_rlc_BearerToAddModList_tags_3[0]) - 1, /* 1 */
asn_DEF_rlc_BearerToAddModList_tags_3, /* Same as above */
sizeof(asn_DEF_rlc_BearerToAddModList_tags_3)
/sizeof(asn_DEF_rlc_BearerToAddModList_tags_3[0]), /* 2 */
{ &asn_OER_type_rlc_BearerToAddModList_constr_3, &asn_PER_type_rlc_BearerToAddModList_constr_3, SEQUENCE_OF_constraint },
asn_MBR_rlc_BearerToAddModList_3,
1, /* Single element */
&asn_SPC_rlc_BearerToAddModList_specs_3 /* Additional specs */
};
static asn_TYPE_member_t asn_MBR_rlc_BearerToReleaseList_5[] = {
{ ATF_POINTER, 0, 0,
(ASN_TAG_CLASS_UNIVERSAL | (2 << 2)),
0,
&asn_DEF_LogicalChannelIdentity,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
""
},
};
static const ber_tlv_tag_t asn_DEF_rlc_BearerToReleaseList_tags_5[] = {
(ASN_TAG_CLASS_CONTEXT | (2 << 2)),
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2))
};
static asn_SET_OF_specifics_t asn_SPC_rlc_BearerToReleaseList_specs_5 = {
sizeof(struct CellGroupConfigRrc__rlc_BearerToReleaseList),
offsetof(struct CellGroupConfigRrc__rlc_BearerToReleaseList, _asn_ctx),
0, /* XER encoding is XMLDelimitedItemList */
};
static /* Use -fall-defs-global to expose */
asn_TYPE_descriptor_t asn_DEF_rlc_BearerToReleaseList_5 = {
"rlc-BearerToReleaseList",
"rlc-BearerToReleaseList",
&asn_OP_SEQUENCE_OF,
asn_DEF_rlc_BearerToReleaseList_tags_5,
sizeof(asn_DEF_rlc_BearerToReleaseList_tags_5)
/sizeof(asn_DEF_rlc_BearerToReleaseList_tags_5[0]) - 1, /* 1 */
asn_DEF_rlc_BearerToReleaseList_tags_5, /* Same as above */
sizeof(asn_DEF_rlc_BearerToReleaseList_tags_5)
/sizeof(asn_DEF_rlc_BearerToReleaseList_tags_5[0]), /* 2 */
{ &asn_OER_type_rlc_BearerToReleaseList_constr_5, &asn_PER_type_rlc_BearerToReleaseList_constr_5, SEQUENCE_OF_constraint },
asn_MBR_rlc_BearerToReleaseList_5,
1, /* Single element */
&asn_SPC_rlc_BearerToReleaseList_specs_5 /* Additional specs */
};
static asn_TYPE_member_t asn_MBR_sCellToAddModList_10[] = {
{ ATF_POINTER, 0, 0,
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2)),
0,
&asn_DEF_SCellConfig,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
""
},
};
static const ber_tlv_tag_t asn_DEF_sCellToAddModList_tags_10[] = {
(ASN_TAG_CLASS_CONTEXT | (6 << 2)),
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2))
};
static asn_SET_OF_specifics_t asn_SPC_sCellToAddModList_specs_10 = {
sizeof(struct CellGroupConfigRrc__sCellToAddModList),
offsetof(struct CellGroupConfigRrc__sCellToAddModList, _asn_ctx),
0, /* XER encoding is XMLDelimitedItemList */
};
static /* Use -fall-defs-global to expose */
asn_TYPE_descriptor_t asn_DEF_sCellToAddModList_10 = {
"sCellToAddModList",
"sCellToAddModList",
&asn_OP_SEQUENCE_OF,
asn_DEF_sCellToAddModList_tags_10,
sizeof(asn_DEF_sCellToAddModList_tags_10)
/sizeof(asn_DEF_sCellToAddModList_tags_10[0]) - 1, /* 1 */
asn_DEF_sCellToAddModList_tags_10, /* Same as above */
sizeof(asn_DEF_sCellToAddModList_tags_10)
/sizeof(asn_DEF_sCellToAddModList_tags_10[0]), /* 2 */
{ &asn_OER_type_sCellToAddModList_constr_10, &asn_PER_type_sCellToAddModList_constr_10, SEQUENCE_OF_constraint },
asn_MBR_sCellToAddModList_10,
1, /* Single element */
&asn_SPC_sCellToAddModList_specs_10 /* Additional specs */
};
static asn_TYPE_member_t asn_MBR_sCellToReleaseList_12[] = {
{ ATF_POINTER, 0, 0,
(ASN_TAG_CLASS_UNIVERSAL | (2 << 2)),
0,
&asn_DEF_SCellIndex,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
""
},
};
static const ber_tlv_tag_t asn_DEF_sCellToReleaseList_tags_12[] = {
(ASN_TAG_CLASS_CONTEXT | (7 << 2)),
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2))
};
static asn_SET_OF_specifics_t asn_SPC_sCellToReleaseList_specs_12 = {
sizeof(struct CellGroupConfigRrc__sCellToReleaseList),
offsetof(struct CellGroupConfigRrc__sCellToReleaseList, _asn_ctx),
0, /* XER encoding is XMLDelimitedItemList */
};
static /* Use -fall-defs-global to expose */
asn_TYPE_descriptor_t asn_DEF_sCellToReleaseList_12 = {
"sCellToReleaseList",
"sCellToReleaseList",
&asn_OP_SEQUENCE_OF,
asn_DEF_sCellToReleaseList_tags_12,
sizeof(asn_DEF_sCellToReleaseList_tags_12)
/sizeof(asn_DEF_sCellToReleaseList_tags_12[0]) - 1, /* 1 */
asn_DEF_sCellToReleaseList_tags_12, /* Same as above */
sizeof(asn_DEF_sCellToReleaseList_tags_12)
/sizeof(asn_DEF_sCellToReleaseList_tags_12[0]), /* 2 */
{ &asn_OER_type_sCellToReleaseList_constr_12, &asn_PER_type_sCellToReleaseList_constr_12, SEQUENCE_OF_constraint },
asn_MBR_sCellToReleaseList_12,
1, /* Single element */
&asn_SPC_sCellToReleaseList_specs_12 /* Additional specs */
};
static const asn_INTEGER_enum_map_t asn_MAP_reportUplinkTxDirectCurrent_v1530_value2enum_16[] = {
{ 0, 4, "true" }
};
static const unsigned int asn_MAP_reportUplinkTxDirectCurrent_v1530_enum2value_16[] = {
0 /* true(0) */
};
static const asn_INTEGER_specifics_t asn_SPC_reportUplinkTxDirectCurrent_v1530_specs_16 = {
asn_MAP_reportUplinkTxDirectCurrent_v1530_value2enum_16, /* "tag" => N; sorted by tag */
asn_MAP_reportUplinkTxDirectCurrent_v1530_enum2value_16, /* N => "tag"; sorted by N */
1, /* Number of elements in the maps */
0, /* Enumeration is not extensible */
1, /* Strict enumeration */
0, /* Native long size */
0
};
static const ber_tlv_tag_t asn_DEF_reportUplinkTxDirectCurrent_v1530_tags_16[] = {
(ASN_TAG_CLASS_CONTEXT | (0 << 2)),
(ASN_TAG_CLASS_UNIVERSAL | (10 << 2))
};
static /* Use -fall-defs-global to expose */
asn_TYPE_descriptor_t asn_DEF_reportUplinkTxDirectCurrent_v1530_16 = {
"reportUplinkTxDirectCurrent-v1530",
"reportUplinkTxDirectCurrent-v1530",
&asn_OP_NativeEnumerated,
asn_DEF_reportUplinkTxDirectCurrent_v1530_tags_16,
sizeof(asn_DEF_reportUplinkTxDirectCurrent_v1530_tags_16)
/sizeof(asn_DEF_reportUplinkTxDirectCurrent_v1530_tags_16[0]) - 1, /* 1 */
asn_DEF_reportUplinkTxDirectCurrent_v1530_tags_16, /* Same as above */
sizeof(asn_DEF_reportUplinkTxDirectCurrent_v1530_tags_16)
/sizeof(asn_DEF_reportUplinkTxDirectCurrent_v1530_tags_16[0]), /* 2 */
{ &asn_OER_type_reportUplinkTxDirectCurrent_v1530_constr_16, &asn_PER_type_reportUplinkTxDirectCurrent_v1530_constr_16, NativeEnumerated_constraint },
0, 0, /* Defined elsewhere */
&asn_SPC_reportUplinkTxDirectCurrent_v1530_specs_16 /* Additional specs */
};
static asn_TYPE_member_t asn_MBR_ext1_15[] = {
{ ATF_POINTER, 1, offsetof(struct CellGroupConfigRrc__ext1, reportUplinkTxDirectCurrent_v1530),
(ASN_TAG_CLASS_CONTEXT | (0 << 2)),
-1, /* IMPLICIT tag at current level */
&asn_DEF_reportUplinkTxDirectCurrent_v1530_16,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
"reportUplinkTxDirectCurrent-v1530"
},
};
static const int asn_MAP_ext1_oms_15[] = { 0 };
static const ber_tlv_tag_t asn_DEF_ext1_tags_15[] = {
(ASN_TAG_CLASS_CONTEXT | (8 << 2)),
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2))
};
static const asn_TYPE_tag2member_t asn_MAP_ext1_tag2el_15[] = {
{ (ASN_TAG_CLASS_CONTEXT | (0 << 2)), 0, 0, 0 } /* reportUplinkTxDirectCurrent-v1530 */
};
static asn_SEQUENCE_specifics_t asn_SPC_ext1_specs_15 = {
sizeof(struct CellGroupConfigRrc__ext1),
offsetof(struct CellGroupConfigRrc__ext1, _asn_ctx),
asn_MAP_ext1_tag2el_15,
1, /* Count of tags in the map */
asn_MAP_ext1_oms_15, /* Optional members */
1, 0, /* Root/Additions */
-1, /* First extension addition */
};
static /* Use -fall-defs-global to expose */
asn_TYPE_descriptor_t asn_DEF_ext1_15 = {
"ext1",
"ext1",
&asn_OP_SEQUENCE,
asn_DEF_ext1_tags_15,
sizeof(asn_DEF_ext1_tags_15)
/sizeof(asn_DEF_ext1_tags_15[0]) - 1, /* 1 */
asn_DEF_ext1_tags_15, /* Same as above */
sizeof(asn_DEF_ext1_tags_15)
/sizeof(asn_DEF_ext1_tags_15[0]), /* 2 */
{ 0, 0, SEQUENCE_constraint },
asn_MBR_ext1_15,
1, /* Elements count */
&asn_SPC_ext1_specs_15 /* Additional specs */
};
static asn_TYPE_member_t asn_MBR_CellGroupConfigRrc_1[] = {
{ ATF_NOFLAGS, 0, offsetof(struct CellGroupConfigRrc, cellGroupId),
(ASN_TAG_CLASS_CONTEXT | (0 << 2)),
-1, /* IMPLICIT tag at current level */
&asn_DEF_CellGroupId,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
"cellGroupId"
},
{ ATF_POINTER, 8, offsetof(struct CellGroupConfigRrc, rlc_BearerToAddModList),
(ASN_TAG_CLASS_CONTEXT | (1 << 2)),
0,
&asn_DEF_rlc_BearerToAddModList_3,
0,
{ &asn_OER_memb_rlc_BearerToAddModList_constr_3, &asn_PER_memb_rlc_BearerToAddModList_constr_3, memb_rlc_BearerToAddModList_constraint_1 },
0, 0, /* No default value */
"rlc-BearerToAddModList"
},
{ ATF_POINTER, 7, offsetof(struct CellGroupConfigRrc, rlc_BearerToReleaseList),
(ASN_TAG_CLASS_CONTEXT | (2 << 2)),
0,
&asn_DEF_rlc_BearerToReleaseList_5,
0,
{ &asn_OER_memb_rlc_BearerToReleaseList_constr_5, &asn_PER_memb_rlc_BearerToReleaseList_constr_5, memb_rlc_BearerToReleaseList_constraint_1 },
0, 0, /* No default value */
"rlc-BearerToReleaseList"
},
{ ATF_POINTER, 6, offsetof(struct CellGroupConfigRrc, mac_CellGroupConfig),
(ASN_TAG_CLASS_CONTEXT | (3 << 2)),
-1, /* IMPLICIT tag at current level */
&asn_DEF_MAC_CellGroupConfig,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
"mac-CellGroupConfig"
},
{ ATF_POINTER, 5, offsetof(struct CellGroupConfigRrc, physicalCellGroupConfig),
(ASN_TAG_CLASS_CONTEXT | (4 << 2)),
-1, /* IMPLICIT tag at current level */
&asn_DEF_PhysicalCellGroupConfig,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
"physicalCellGroupConfig"
},
{ ATF_POINTER, 4, offsetof(struct CellGroupConfigRrc, spCellConfig),
(ASN_TAG_CLASS_CONTEXT | (5 << 2)),
-1, /* IMPLICIT tag at current level */
&asn_DEF_SpCellConfig,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
"spCellConfig"
},
{ ATF_POINTER, 3, offsetof(struct CellGroupConfigRrc, sCellToAddModList),
(ASN_TAG_CLASS_CONTEXT | (6 << 2)),
0,
&asn_DEF_sCellToAddModList_10,
0,
{ &asn_OER_memb_sCellToAddModList_constr_10, &asn_PER_memb_sCellToAddModList_constr_10, memb_sCellToAddModList_constraint_1 },
0, 0, /* No default value */
"sCellToAddModList"
},
{ ATF_POINTER, 2, offsetof(struct CellGroupConfigRrc, sCellToReleaseList),
(ASN_TAG_CLASS_CONTEXT | (7 << 2)),
0,
&asn_DEF_sCellToReleaseList_12,
0,
{ &asn_OER_memb_sCellToReleaseList_constr_12, &asn_PER_memb_sCellToReleaseList_constr_12, memb_sCellToReleaseList_constraint_1 },
0, 0, /* No default value */
"sCellToReleaseList"
},
{ ATF_POINTER, 1, offsetof(struct CellGroupConfigRrc, ext1),
(ASN_TAG_CLASS_CONTEXT | (8 << 2)),
0,
&asn_DEF_ext1_15,
0,
{ 0, 0, 0 },
0, 0, /* No default value */
"ext1"
},
};
static const int asn_MAP_CellGroupConfigRrc_oms_1[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
static const ber_tlv_tag_t asn_DEF_CellGroupConfigRrc_tags_1[] = {
(ASN_TAG_CLASS_UNIVERSAL | (16 << 2))
};
static const asn_TYPE_tag2member_t asn_MAP_CellGroupConfigRrc_tag2el_1[] = {
{ (ASN_TAG_CLASS_CONTEXT | (0 << 2)), 0, 0, 0 }, /* cellGroupId */
{ (ASN_TAG_CLASS_CONTEXT | (1 << 2)), 1, 0, 0 }, /* rlc-BearerToAddModList */
{ (ASN_TAG_CLASS_CONTEXT | (2 << 2)), 2, 0, 0 }, /* rlc-BearerToReleaseList */
{ (ASN_TAG_CLASS_CONTEXT | (3 << 2)), 3, 0, 0 }, /* mac-CellGroupConfig */
{ (ASN_TAG_CLASS_CONTEXT | (4 << 2)), 4, 0, 0 }, /* physicalCellGroupConfig */
{ (ASN_TAG_CLASS_CONTEXT | (5 << 2)), 5, 0, 0 }, /* spCellConfig */
{ (ASN_TAG_CLASS_CONTEXT | (6 << 2)), 6, 0, 0 }, /* sCellToAddModList */
{ (ASN_TAG_CLASS_CONTEXT | (7 << 2)), 7, 0, 0 }, /* sCellToReleaseList */
{ (ASN_TAG_CLASS_CONTEXT | (8 << 2)), 8, 0, 0 } /* ext1 */
};
static asn_SEQUENCE_specifics_t asn_SPC_CellGroupConfigRrc_specs_1 = {
sizeof(struct CellGroupConfigRrc),
offsetof(struct CellGroupConfigRrc, _asn_ctx),
asn_MAP_CellGroupConfigRrc_tag2el_1,
9, /* Count of tags in the map */
asn_MAP_CellGroupConfigRrc_oms_1, /* Optional members */
7, 1, /* Root/Additions */
8, /* First extension addition */
};
asn_TYPE_descriptor_t asn_DEF_CellGroupConfigRrc = {
"CellGroupConfigRrc",
"CellGroupConfigRrc",
&asn_OP_SEQUENCE,
asn_DEF_CellGroupConfigRrc_tags_1,
sizeof(asn_DEF_CellGroupConfigRrc_tags_1)
/sizeof(asn_DEF_CellGroupConfigRrc_tags_1[0]), /* 1 */
asn_DEF_CellGroupConfigRrc_tags_1, /* Same as above */
sizeof(asn_DEF_CellGroupConfigRrc_tags_1)
/sizeof(asn_DEF_CellGroupConfigRrc_tags_1[0]), /* 1 */
{ 0, 0, SEQUENCE_constraint },
asn_MBR_CellGroupConfigRrc_1,
9, /* Elements count */
&asn_SPC_CellGroupConfigRrc_specs_1 /* Additional specs */
};
|
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
#ifndef __ASM_GENERIC_SIGNAL_DEFS_H
#define __ASM_GENERIC_SIGNAL_DEFS_H
#define _ASM_GENERIC_SIGNAL_DEFS_H
#define _ASM_GENERIC_SIGNAL_DEFS_H_
#define _UAPI_ASM_GENERIC_SIGNAL_DEFS_H
#define _UAPI_ASM_GENERIC_SIGNAL_DEFS_H_
#define _UAPI__ASM_GENERIC_SIGNAL_DEFS_H
#define _UAPI__ASM_GENERIC_SIGNAL_DEFS_H_
#define __ASM_GENERIC_SIGNAL_DEFS_H_
#include <museum/7.1.2/bionic/libc/linux/compiler.h>
#ifndef SIG_BLOCK
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define SIG_BLOCK 0
#endif
#ifndef SIG_UNBLOCK
#define SIG_UNBLOCK 1
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#endif
#ifndef SIG_SETMASK
#define SIG_SETMASK 2
#endif
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#ifndef __ASSEMBLY__
typedef void __signalfn_t(int);
typedef __signalfn_t __user * __sighandler_t;
typedef void __restorefn_t(void);
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
typedef __restorefn_t __user * __sigrestore_t;
#define SIG_DFL ((__force __sighandler_t) 0)
#define SIG_IGN ((__force __sighandler_t) 1)
#define SIG_ERR ((__force __sighandler_t) - 1)
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#endif
#endif
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import threading
from queue import Queue
import requests
import tensorflow as tf
import model
# In[4]:
get_ipython().run_cell_magic(
"time",
"",
'tf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel.Model(3, 256, 64, 2, 0.0001)\nsess.run(tf.global_variables_initializer())\nsaver = tf.train.Saver(tf.global_variables())\nsaver.restore(sess, os.getcwd() + "/model-rnn-vector-huber.ckpt")',
)
# In[5]:
get_ipython().run_cell_magic(
"time",
"",
"tf.reset_default_graph()\ndef load_graph(frozen_graph_filename):\n with tf.gfile.GFile(frozen_graph_filename, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def)\n return graph\n\ng=load_graph('frozen_model.pb')\nx = g.get_tensor_by_name('import/Placeholder:0')\ny = g.get_tensor_by_name('import/logits:0')\nsess = tf.InteractiveSession(graph=g)",
)
# ## Freeze model loaded more faster than dynamic model
# In[9]:
def run_parallel_in_threads(target, args_list):
globalparas = []
result = Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
for t in threads:
t.start()
for t in threads:
t.join()
while not result.empty():
globalparas.append(result.get())
globalparas = list(filter(None, globalparas))
return globalparas
def get_time(text, type_api, i):
response = str(requests.get("http://192.168.0.102:8033/%s?text=%s" % (type_api, text)).content)
return [response, i]
# # Stress test 20 requests concurrently on dynamic graph
# In[27]:
CONCURRENT = 20
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "dynamic", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
print("thread %d, time taken %f s" % (i[1], float(i[0][2:-1])))
print("total time taken %f s, average time taken %f s" % (total, total / CONCURRENT))
# # Stress test 20 requests concurrently on static graph
# In[29]:
CONCURRENT = 20
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "static", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
print("thread %d, time taken %f s" % (i[1], float(i[0][2:-1])))
print("total time taken %f s, average time taken %f s" % (total, total / CONCURRENT))
# # Run 5 experiments on stress test 20 requests concurrently on dynamic graph
# In[34]:
total_experiments = 0
for _ in range(5):
CONCURRENT = 20
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "dynamic", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
total_experiments += total
print(
"time taken to run experiments %f s, average %f s" % (total_experiments, total_experiments / 5)
)
# # Run 5 experiments on stress test 20 requests concurrently on static graph
# In[35]:
total_experiments = 0
for _ in range(5):
CONCURRENT = 20
threads = []
for i in range(CONCURRENT):
threads.append(("Freeze model loaded more faster than dynamic model", "static", i))
outputs = run_parallel_in_threads(get_time, threads)
total = 0
for i in outputs:
total += float(i[0][2:-1])
total_experiments += total
print(
"time taken to run experiments %f s, average %f s" % (total_experiments, total_experiments / 5)
)
# In[ ]:
|
export function convertState(state) {
switch (Number(state)) {
case 0:
return 'Tạo sản phẩm';
case 1:
return 'Đã thanh toán';
case 2:
return 'Đã giao hàng';
default:
return 'Tạo sản phẩm';
}
}
|
import sys,os
sys.path.append("..")
import numpy as np
import tensorflow as tf
from example import hvd_distributed_classifier as bert_classifier
from bunch import Bunch
from data_generator import tokenization
from data_generator import hvd_distributed_tf_data_utils as tf_data_utils
from model_io import model_io
from example import feature_writer, write_to_tfrecords
import json
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import horovod.tensorflow as hvd
from optimizer import hvd_distributed_optimizer as optimizer
from porn_classification import classifier_processor
flags = tf.flags
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
## Required parameters
flags.DEFINE_string(
"eval_data_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"output_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"config_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"init_checkpoint", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"result_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"vocab_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"label_id", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"max_length", 128,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"train_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"dev_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_output", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"gpu_id", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"epoch", 5,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"num_classes", 5,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"train_size", 1402171,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"batch_size", 32,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_type", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"if_shard", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_bool(
"lower_case", True,
"Input TF example files (can be a glob or comma separated).")
def main(_):
hvd.init()
sess_config = tf.ConfigProto()
sess_config.gpu_options.visible_device_list = str(hvd.local_rank())
graph = tf.Graph()
with graph.as_default():
import json
config = json.load(open(FLAGS.config_file, "r"))
init_checkpoint = FLAGS.init_checkpoint
config = Bunch(config)
config.use_one_hot_embeddings = True
config.scope = "bert"
config.dropout_prob = 0.1
config.label_type = "single_label"
if FLAGS.if_shard == "0":
train_size = FLAGS.train_size
epoch = int(FLAGS.epoch / hvd.size())
elif FLAGS.if_shard == "1":
train_size = int(FLAGS.train_size/hvd.size())
epoch = FLAGS.epoch
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file,
do_lower_case=FLAGS.lower_case)
classifier_data_api = classifier_processor.EvaluationProcessor()
classifier_data_api.get_labels(FLAGS.label_id)
train_examples = classifier_data_api.get_train_examples(FLAGS.train_file)
write_to_tfrecords.convert_classifier_examples_to_features(train_examples,
classifier_data_api.label2id,
FLAGS.max_length,
tokenizer,
FLAGS.eval_data_file)
init_lr = 2e-5
num_train_steps = int(
train_size / FLAGS.batch_size * epoch)
num_warmup_steps = int(num_train_steps * 0.1)
num_storage_steps = int(train_size / FLAGS.batch_size)
print(" model type {}".format(FLAGS.model_type))
print(num_train_steps, num_warmup_steps, "=============")
opt_config = Bunch({"init_lr":init_lr/hvd.size(),
"num_train_steps":num_train_steps,
"num_warmup_steps":num_warmup_steps})
sess = tf.Session(config=sess_config)
model_io_config = Bunch({"fix_lm":False})
model_io_fn = model_io.ModelIO(model_io_config)
optimizer_fn = optimizer.Optimizer(opt_config)
num_classes = FLAGS.num_classes
model_eval_fn = bert_classifier.classifier_model_fn_builder(config, num_classes, init_checkpoint,
reuse=False,
load_pretrained=True,
model_io_fn=model_io_fn,
optimizer_fn=optimizer_fn,
model_io_config=model_io_config,
opt_config=opt_config)
def metric_fn(features, logits, loss):
print(logits.get_shape(), "===logits shape===")
pred_label = tf.argmax(logits, axis=-1, output_type=tf.int32)
prob = tf.nn.softmax(logits)
accuracy = correct = tf.equal(
tf.cast(pred_label, tf.int32),
tf.cast(features["label_ids"], tf.int32)
)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
return {"accuracy":accuracy, "loss":loss, "pred_label":pred_label,
"label_ids":features["label_ids"],
"prob":prob}
name_to_features = {
"input_ids":
tf.FixedLenFeature([FLAGS.max_length], tf.int64),
"input_mask":
tf.FixedLenFeature([FLAGS.max_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([FLAGS.max_length], tf.int64),
"label_ids":
tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example.
"""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
params = Bunch({})
params.epoch = FLAGS.epoch
params.batch_size = FLAGS.batch_size
eval_features = tf_data_utils.eval_input_fn(FLAGS.eval_data_file,
_decode_record, name_to_features, params, if_shard=FLAGS.if_shard)
[_, eval_loss, eval_per_example_loss, eval_logits] = model_eval_fn(eval_features, [], tf.estimator.ModeKeys.EVAL)
result = metric_fn(eval_features, eval_logits, eval_loss)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
sess.run(hvd.broadcast_global_variables(0))
print("===horovod rank==={}".format(hvd.rank()))
def eval_fn(result):
i = 0
total_accuracy = 0
label, label_id, prob = [], [], []
while True:
try:
eval_result = sess.run(result)
total_accuracy += eval_result["accuracy"]
label_id.extend(eval_result["label_ids"])
label.extend(eval_result["pred_label"])
prob.extend(eval_result["prob"])
i += 1
except tf.errors.OutOfRangeError:
print("End of dataset")
break
macro_f1 = f1_score(label_id, label, average="macro")
micro_f1 = f1_score(label_id, label, average="micro")
macro_precision = precision_score(label_id, label, average="macro")
micro_precision = precision_score(label_id, label, average="micro")
macro_recall = recall_score(label_id, label, average="macro")
micro_recall = recall_score(label_id, label, average="micro")
accuracy = accuracy_score(label_id, label)
print("test accuracy {} macro_f1 score {} micro_f1 {} accuracy {}".format(total_accuracy/ i,
macro_f1, micro_f1, accuracy))
return total_accuracy/ i, label_id, label, prob
import time
import time
start = time.time()
acc, true_label, pred_label, prob = eval_fn(result)
end = time.time()
print("==total time {} numbers of devices {}".format(end - start, hvd.size()))
if hvd.rank() == 0:
import _pickle as pkl
pkl.dump({"true_label":true_label,
"pred_label":pred_label,
"prob":prob},
open(FLAGS.model_output+"/predict.pkl", "wb"))
if __name__ == "__main__":
tf.app.run()
|
"""
m2g.utils.cloud_utils
~~~~~~~~~~~~~~~~~~~~~~
Contains utility functions for working on the cloud with AWS.
"""
# standard library imports
from configparser import ConfigParser
import os
import sys
# package imports
import boto3
def get_credentials():
"""Searches for and returns AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
Returns
-------
tuple
Two strings inside of a tuple, (Access_key, Secret_access_key)
Raises
------
AttributeError
No AWS credentials are found
"""
# add option to pass profile name
try:
config = ConfigParser()
config.read(os.getenv("HOME") + "/.aws/credentials")
return (
config.get("default", "aws_access_key_id"),
config.get("default", "aws_secret_access_key"),
)
except:
ACCESS = os.getenv("AWS_ACCESS_KEY_ID")
SECRET = os.getenv("AWS_SECRET_ACCESS_KEY")
if not ACCESS and SECRET:
raise AttributeError("No AWS credentials found.")
return (ACCESS, SECRET)
def s3_client(service="s3"):
"""
create an s3 client.
Parameters
----------
service : str
Type of service.
Returns
-------
boto3.client
client with proper credentials.
"""
try:
ACCESS, SECRET = get_credentials()
except AttributeError:
return boto3.client(service)
return boto3.client(service, aws_access_key_id=ACCESS, aws_secret_access_key=SECRET)
def parse_path(s3_datapath):
"""
Return bucket and prefix from full s3 path.
Parameters
----------
s3_datapath : str
path to a bucket.
Should be of the form s3://bucket/prefix/.
Returns
-------
tuple
bucket and prefix.
"""
bucket_path = str(s3_datapath).split("//")[1]
parts = bucket_path.split("/")
bucket = parts[0].strip("/")
prefix = "/".join(parts[1:])
return bucket, prefix
def get_matching_s3_objects(bucket, prefix="", suffix=""):
"""
Generate objects in an S3 bucket.
Parameters
----------
bucket : str
Name of the s3 bucket.
prefix : str, optional
Only fetch objects whose key starts with this prefix, by default ''
suffix : str, optional
Only fetch objects whose keys end with this suffix, by default ''
"""
s3 = s3_client(service="s3")
kwargs = {"Bucket": bucket}
# If the prefix is a single string (not a tuple of strings), we can
# do the filtering directly in the S3 API.
if isinstance(prefix, str):
kwargs["Prefix"] = prefix
while True:
# The S3 API response is a large blob of metadata.
# 'Contents' contains information about the listed objects.
resp = s3.list_objects_v2(**kwargs)
try:
contents = resp["Contents"]
except KeyError:
print("No contents found. Check that both the path to your files on your s3 bucket and your aws credentials are correct.")
return
for obj in contents:
key = obj["Key"]
if key.startswith(prefix) and key.endswith(suffix):
yield key
# The S3 API is paginated, returning up to 1000 keys at a time.
# Pass the continuation token into the next response, until we
# reach the final page (when this field is missing).
try:
kwargs["ContinuationToken"] = resp["NextContinuationToken"]
except KeyError:
break
def s3_get_data(bucket, remote, local, info="", force=False):
"""Given and s3 directory, copies files/subdirectories in that directory to local
Parameters
----------
bucket : str
s3 bucket you are accessing data from
remote : str
The path to the data on your S3 bucket. The data will be
downloaded to the provided bids_dir on your machine.
local : list
Local input directory where you want the files copied to and subject/session info [input, sub-#/ses-#]
info : str, optional
Relevant subject and session information in the form of sub-#/ses-#
force : bool, optional
Whether to overwrite the local directory containing the s3 files if it already exists, by default False
"""
if info == "sub-":
print("Subject not specified, comparing input folder to remote directory...")
else:
if os.path.exists(os.path.join(local, info)) and not force:
if os.listdir(os.path.join(local, info)):
print(
f"Local directory: {os.path.join(local,info)} already exists. Not pulling s3 data. Delete contents to re-download data."
)
return # TODO: make sure this doesn't append None a bunch of times to a list in a loop on this function
# get client with credentials if they exist
client = s3_client(service="s3")
# check that bucket exists
bkts = [bk["Name"] for bk in client.list_buckets()["Buckets"]]
if bucket not in bkts:
raise ValueError(
"Error: could not locate bucket. Available buckets: " + ", ".join(bkts)
)
info = info.rstrip("/") + "/"
bpath = get_matching_s3_objects(bucket, f"{remote}/{info}")
# go through all folders inside of remote directory and download relevant files
for obj in bpath:
bdir, data = os.path.split(obj)
localpath = os.path.join(local, bdir.replace(f"{remote}/", ""))
# Make directory for data if it doesn't exist
if not os.path.exists(localpath):
os.makedirs(localpath)
if not os.path.exists(f"{localpath}/{data}"):
print(f"Downloading {bdir}/{data} from {bucket} s3 bucket...")
# Download file
client.download_file(bucket, f"{bdir}/{data}", f"{localpath}/{data}")
if os.path.exists(f"{localpath}/{data}"):
print("Success!")
else:
print("Error: File not downloaded")
else:
print(f"File {data} already exists at {localpath}/{data}")
def s3_push_data(bucket, remote, outDir, subject=None, session=None, creds=True):
"""Pushes all files in a given folder to a specified S3 bucket
Parameters
----------
bucket : str
s3 bucket you are pushing files to
remote : str
The path to the directory on your S3 bucket containing the data used in the pipeline, the string in 'modifier' will be put after the
first directory specified in the path as its own directory (/remote[0]/modifier/remote[1]/...)
outDir : str
Path of local directory being pushed to the s3 bucket
subject : str
subject we're pushing with
session : str
session we're pushing with
creds : bool, optional
Whether s3 credentials are being provided, may fail to push big files if False, by default True
"""
# get client with credentials if they exist
client = s3_client(service="s3")
# check that bucket exists
bkts = [bk["Name"] for bk in client.list_buckets()["Buckets"]]
if bucket not in bkts:
sys.exit(
"Error: could not locate bucket. Available buckets: " + ", ".join(bkts)
)
# List all files and upload
for root, _, files in os.walk(outDir):
for file_ in files:
if not "tmp/" in root: # exclude things in the tmp/ folder
if f"sub-{subject}/ses-{session}" in root:
print(f"Uploading: {os.path.join(root, file_)}")
spath = root[root.find("sub-") :] # remove everything before /sub-*
client.upload_file(
os.path.join(root, file_),
bucket,
f"{remote}/{os.path.join(spath,file_)}",
ExtraArgs={"ACL": "public-read"},
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from os.path import join as joinpath
import ffmpymedia
from tests import TEST_FILE_PATH
class TestMediaUse(unittest.TestCase):
def test_compare_two_files(self):
# User1 wants to compare two media files to see if their stream layouts are the same.
# First he passes the same file to the API to see if they compare as the same
filename1 = filename2 = joinpath(TEST_FILE_PATH, 'SIN001 Sinuca.mp4')
file1 = ffmpymedia.MediaFile.parse_file(filename1)
file2 = ffmpymedia.MediaFile.parse_file(filename2)
self.assertTrue(file1 == file2)
# Then he wants to be sure and see the that difference between the two files is {}
self.assertEqual(ffmpymedia.MediaFile.parse_file(filename1).difference(ffmpymedia.MediaFile.parse_file(filename2)), {})
# Then he decides to try two different files to be sure different files are treated differenty
filename3 = joinpath(TEST_FILE_PATH, 'COLB001 Color Bar.mp4')
file3 = ffmpymedia.MediaFile.parse_file(filename3)
self.assertFalse(file1 == file3)
# As he is very curious, he then wants to see the difference between the files
self.assertNotEqual(ffmpymedia.MediaFile.parse_file(filename1).difference(ffmpymedia.MediaFile.parse_file(filename3)), {})
# After all these comparisons, he decided to take a look at the streams of each file.
print(file1.__repr__())
print(file2.__repr__())
print(file3.__repr__())
def test_media_analyser(self):
# Developer1 whises to test the MediaAnalyser API funcionality.
# With that in mind, he decides to try out all 4 API calls from this helper class.
filename1 = filename2 = joinpath(TEST_FILE_PATH, 'SIN001 Sinuca.mp4')
filename3 = joinpath(TEST_FILE_PATH, 'COLB001 Color Bar.mp4')
template1 = ffmpymedia.MediaFileTemplate(**{'format_name': 'mov,mp4,m4a,3gp,3g2,mj2', 'duration': '12.0', 'metadata': None, 'start_time': '0.000000', 'streams': [{'type': 'video', 'height': '1080', 'bitrate': '2574', 'metadata': {'handler_name': 'VideoHandler'}, 'codec': 'h264', 'index': '0', 'disposition': {'lyrics': 0, 'default': 1, 'clean_effects': 0, 'karaoke': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'forced': 0, 'comment': 0, 'dub': 0, 'original': 0, 'attached_pic': 0}, 'codec_tag': '0x31637661', 'codec_tag_string': 'avc1', 'width': '1920', 'sample_aspect_ratio': '1:1', 'pixel_format': 'yuv420p', 'reported_frame_rate': '25', 'display_aspect_ratio': '16:9', 'container_time_base': '12800', 'average_frame_rate': '25', 'codec_time_base': '50', 'language': 'und', 'profile': 'High', 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10'}], 'filename': '/home/flaviopontes/PycharmProjects/ffmpymedia/test_files/SIN001 Sinuca.mp4', 'bit_rate': '2577000'})
template3 = ffmpymedia.MediaFileTemplate(**{'format_name': 'mov,mp4,m4a,3gp,3g2,mj2', 'duration': '12.0', 'metadata': None, 'start_time': '0.000000', 'streams': [{'type': 'video', 'height': '1080', 'bitrate': '2574', 'metadata': {'handler_name': 'VideoHandler'}, 'codec': 'h264', 'index': '0', 'disposition': {'lyrics': 0, 'default': 1, 'clean_effects': 0, 'karaoke': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'forced': 0, 'comment': 0, 'dub': 0, 'original': 0, 'attached_pic': 0}, 'codec_tag': '0x31637661', 'codec_tag_string': 'avc1', 'width': '1920', 'sample_aspect_ratio': '1:1', 'pixel_format': 'yuv420p', 'reported_frame_rate': '25', 'display_aspect_ratio': '16:9', 'container_time_base': '12800', 'average_frame_rate': '25', 'codec_time_base': '50', 'language': 'und', 'profile': 'High', 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10'}], 'filename': '/home/flaviopontes/PycharmProjects/ffmpymedia/test_files/SIN001 Sinuca.mp4', 'bit_rate': '2577000'})
self.assertFalse(ffmpymedia.MediaAnalyser.compare_media_file_with_template(filename1, template1))
|
#pragma once
#include <MetaNN/facilities/cont_metafuns/helpers.h>
namespace MetaNN::ValueSequential
{
template <typename TValueSeq, auto val>
struct Contains_;
template <template<auto...> class TValueCont, auto val, auto... vals>
struct Contains_<TValueCont<vals...>, val>
{
constexpr static bool value = ((vals == val) || ...);
};
template <typename TValueSeq, auto val>
constexpr static bool Contains = Contains_<TValueSeq, val>::value;
// Order===================================================================================
namespace NSOrder
{
template <typename TIndexCont, typename TTypeCont>
struct impl;
template <template <auto...> typename TTypeCont, auto...TTypes, int...index>
struct impl<Helper::IndexSequence<index...>, TTypeCont<TTypes...>>
: Helper::KVBinder<Helper::Int_<TTypes>, Helper::Int_<index>> ...
{
using Helper::KVBinder<Helper::Int_<TTypes>, Helper::Int_<index>>::apply ...;
};
}
template <typename TCon, auto TReq>
struct Order_;
template <template <auto...> typename TCon, auto... TParams, auto TReq>
struct Order_<TCon<TParams...>, TReq>
{
using IndexSeq = Helper::MakeIndexSequence<sizeof...(TParams)>;
using LookUpTable = NSOrder::impl<IndexSeq, TCon<TParams...>>;
using ReqType = Helper::Int_<TReq>;
using AimType = decltype(LookUpTable::apply((ReqType*)nullptr));
constexpr static int value = AimType::value;
};
template <typename TCon, auto TReq>
constexpr static int Order = Order_<TCon, TReq>::value;
//=========================================================================================
}
|
"use strict";
const { expect } = require("chai");
const i18n = require("../../src/middleware/i18n");
describe("i18n class check", () => {
let originalPuzzle = null;
let cwd = "";
before(() => {
originalPuzzle = global.puzzle;
global.puzzle = require("./middlewarePuzzle");
cwd = process.cwd();
process.chdir(`${__dirname}/../_toolkit/starter`);
delete puzzle.http.res.__;
});
after(() => {
global.puzzle = originalPuzzle;
process.chdir(cwd);
});
beforeEach(() => {
puzzle.modules = {};
});
it("className should be i18n", () => {
const pobj = new i18n();
expect(pobj.className).to.be.a("string");
expect(pobj.className).to.equal("i18n");
});
it("i18n should register the puzzle in http mode", () => {
const pobj = new i18n();
expect(puzzle.http.res.__).to.be.undefined;
pobj.use(puzzle);
expect(puzzle.modules.i18n).to.deep.equals(pobj);
expect(puzzle.http.res.__).to.not.be.undefined;
expect(puzzle.http.res.__("test")).to.equals("test");
expect(puzzle.http.res.__("test_with_params", 1)).to.equals("test with params [1]");
});
it("i18n should register the puzzle in cli mode", () => {
const pobj = new i18n();
const httpBefore = puzzle.http;
delete puzzle.http;
pobj.use(puzzle);
expect(puzzle.modules.i18n).to.deep.equals(pobj);
expect(puzzle.http).to.be.undefined;
expect(pobj.__("test")).to.equals("test");
expect(pobj.__("test_with_params", 1)).to.equals("test with params [1]");
expect(pobj._locale).to.equals(puzzle.config.i18n.defaultLocale);
puzzle.http = httpBefore;
});
it("i18n in CLI mode should look after PUZZLE_LANG and register a valid lang", () => {
const pobj = new i18n();
const httpBefore = puzzle.http;
delete puzzle.http;
process.env["PUZZLE_LANG"] = "ro";
pobj.use(puzzle);
expect(pobj._locale).to.not.equals(puzzle.config.i18n.defaultLocale);
expect(pobj._locale).to.equals("ro");
delete process.env["PUZZLE_LANG"];
puzzle.http = httpBefore;
});
it("i18n in CLI mode should look after PLANG and register a valid lang", () => {
const pobj = new i18n();
const httpBefore = puzzle.http;
delete puzzle.http;
process.env["PLANG"] = "ro";
pobj.use(puzzle);
expect(pobj._locale).to.not.equals(puzzle.config.i18n.defaultLocale);
expect(pobj._locale).to.equals("ro");
delete process.env["PLANG"];
puzzle.http = httpBefore;
});
it("i18n in CLI mode should look after PUZZLE_LANG and do not register an invalid lang", () => {
const pobj = new i18n();
const httpBefore = puzzle.http;
delete puzzle.http;
process.env["PUZZLE_LANG"] = "de";
pobj.use(puzzle);
expect(pobj._locale).to.not.equals("de");
expect(pobj._locale).to.equals("en");
delete process.env["PUZZLE_LANG"];
puzzle.http = httpBefore;
});
it("i18n translate various label types", () => {
const pobj = new i18n();
pobj.use(puzzle);
expect(pobj.__("test")).to.equals("test");
expect(pobj.__("test_with_params", 1)).to.equals("test with params [1]");
expect(pobj.__("sample.key")).to.equals("sample value");
expect(pobj.__("drop.down")).to.equals("key");
expect(pobj.__("drop.up")).to.equals("drop.up");
expect(pobj.__("drop.null")).to.equals("drop.null");
expect(pobj.__(new Error("this is a message"))).to.equals("this is a message");
expect(pobj.__(new Object())).to.equals("Object");
});
it("i18n __n should return the same string as __", () => {
const pobj = new i18n();
pobj.use(puzzle);
expect(pobj.__n("test")).to.equals(pobj.__("test"));
expect(pobj.__n("test_with_params", 1)).to.equals(pobj.__("test_with_params", 1));
expect(pobj.__n("sample.key")).to.equals(pobj.__("sample.key"));
expect(pobj.__n("drop.down")).to.equals(pobj.__("drop.down"));
});
});
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
# ensure graphscope.proto preponderate over outside `proto` directory.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from . import attr_value_pb2
from . import coordinator_service_pb2
from . import coordinator_service_pb2_grpc
from . import data_types_pb2
from . import ddl_service_pb2
from . import ddl_service_pb2_grpc
from . import engine_service_pb2
from . import engine_service_pb2_grpc
from . import error_codes_pb2
from . import graph_def_pb2
from . import message_pb2
from . import op_def_pb2
from . import query_args_pb2
from . import types_pb2
del attr_value_pb2
del coordinator_service_pb2
del coordinator_service_pb2_grpc
del data_types_pb2
del ddl_service_pb2
del ddl_service_pb2_grpc
del engine_service_pb2
del engine_service_pb2_grpc
del error_codes_pb2
del graph_def_pb2
del message_pb2
del op_def_pb2
del query_args_pb2
del types_pb2
sys.path.pop(0)
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = "django_test.users"
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
try:
import users.signals # noqa F401
except ImportError:
pass
|