file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2014-2019 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import time
from concurrent.futures import ThreadPoolExecutor
from .util import (
PortSeed,
GENESISTIME,
MAX_NODES,
assert_equal,
bitcoind_processes,
check_json_precision,
connect_nodes_bi,
connect_nodes,
copy_datadir,
disable_mocktime,
disconnect_nodes,
enable_coverage,
get_mocktime,
get_rpc_proxy,
initialize_datadir,
get_datadir_path,
log_filename,
p2p_port,
rpc_url,
set_cache_mocktime,
set_genesis_mocktime,
set_mocktime,
set_node_times,
satoshi_round,
_start_node,
_start_nodes,
_stop_node,
_stop_nodes,
sync_blocks,
sync_mempools,
sync_masternodes,
wait_for_bitcoind_start,
wait_to_sync)
from .authproxy import JSONRPCException
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework(object):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the following methods:
- __init__()
- add_options()
- setup_chain()
- setup_network()
- run_test()
The main() method should not be overridden.
This class also contains various public and private helper methods."""
# Methods to override in subclass test scripts.
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
set_genesis_mocktime()
else:
self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
set_cache_mocktime()
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self, stderr=None):
|
def run_test(self):
raise NotImplementedError
# Main function. This should not be overridden by the subclass test scripts.
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave dashds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop dashds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing dashd/dash-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if not self.options.noshutdown:
self.log.info("Stopping nodes")
try:
if self.nodes:
self.stop_nodes()
except BaseException as e:
success = False
self.log.exception("Unexpected exception caught during shutdown")
else:
self.log.info("Note: dashds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From" , fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_node(i, dirname, extra_args, rpchost, timewait, binary, stderr)
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
return _start_nodes(num_nodes, dirname, extra_args, rpchost, timewait, binary, stderr)
def stop_node(self, num_node):
_stop_node(self.nodes[num_node], num_node)
def stop_nodes(self):
_stop_nodes(self.nodes)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir, extra_args=None, stderr=None):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run dashds:
set_genesis_mocktime()
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("DASHD", "dashd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0", "-mocktime="+str(GENESISTIME)]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
if extra_args is not None:
args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
self.log.debug("initialize_chain: dashd started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], datadir, i)
self.log.debug("initialize_chain: RPC successfully started")
self.nodes = []
for i in range(MAX_NODES):
try:
self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))
except:
self.log.exception("Error connecting to node %d" % i)
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
block_time = GENESISTIME
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in dsah.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo:
def __init__(self, proTxHash, ownerAddr, votingAddr, pubKeyOperator, keyOperator, collateral_address, collateral_txid, collateral_vout):
self.proTxHash = proTxHash
self.ownerAddr = ownerAddr
self.votingAddr = votingAddr
self.pubKeyOperator = pubKeyOperator
self.keyOperator = keyOperator
self.collateral_address = collateral_address
self.collateral_txid = collateral_txid
self.collateral_vout = collateral_vout
class DashTestFramework(BitcoinTestFramework):
def __init__(self, num_nodes, masterodes_count, extra_args, fast_dip3_enforcement=False):
super().__init__()
self.mn_count = masterodes_count
self.num_nodes = num_nodes
self.mninfo = []
self.setup_clean_chain = True
self.is_network_split = False
# additional args
self.extra_args = extra_args
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement:
self.extra_args += ["-dip3params=30:50"]
def create_simple_node(self):
idx = len(self.nodes)
args = self.extra_args
self.nodes.append(self.start_node(idx, self.options.tmpdir, args))
for i in range(0, idx):
connect_nodes(self.nodes[i], idx)
def prepare_masternodes(self):
for idx in range(0, self.mn_count):
self.prepare_masternode(idx)
def prepare_masternode(self, idx):
bls = self.nodes[0].bls('generate')
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL)
txraw = self.nodes[0].getrawtransaction(txid, True)
collateral_vout = 0
for vout_idx in range(0, len(txraw["vout"])):
vout = txraw["vout"][vout_idx]
if vout["value"] == MASTERNODE_COLLATERAL:
collateral_vout = vout_idx
self.nodes[0].lockunspent(False, [{'txid': txid, 'vout': collateral_vout}])
# send to same address to reserve some funds for fees
self.nodes[0].sendtoaddress(address, 0.001)
ownerAddr = self.nodes[0].getnewaddress()
votingAddr = self.nodes[0].getnewaddress()
rewardsAddr = self.nodes[0].getnewaddress()
port = p2p_port(len(self.nodes) + idx)
if (idx % 2) == 0:
self.nodes[0].lockunspent(True, [{'txid': txid, 'vout': collateral_vout}])
proTxHash = self.nodes[0].protx('register_fund', address, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
else:
self.nodes[0].generate(1)
proTxHash = self.nodes[0].protx('register', txid, collateral_vout, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
self.nodes[0].generate(1)
self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout))
self.sync_all()
def remove_mastermode(self, idx):
mn = self.mninfo[idx]
rawtx = self.nodes[0].createrawtransaction([{"txid": mn.collateral_txid, "vout": mn.collateral_vout}], {self.nodes[0].getnewaddress(): 999.9999})
rawtx = self.nodes[0].signrawtransaction(rawtx)
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
self.mninfo.remove(mn)
def prepare_datadirs(self):
# stop faucet node so that we can copy the datadir
self.stop_node(0)
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
copy_datadir(0, idx + start_idx, self.options.tmpdir)
# restart faucet node
self.nodes[0] = self.start_node(0, self.options.tmpdir, self.extra_args)
def start_masternodes(self):
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
self.nodes.append(None)
executor = ThreadPoolExecutor(max_workers=20)
def do_start(idx):
args = ['-masternode=1',
'-masternodeblsprivkey=%s' % self.mninfo[idx].keyOperator] + self.extra_args
node = self.start_node(idx + start_idx, self.options.tmpdir, args)
self.mninfo[idx].nodeIdx = idx + start_idx
self.mninfo[idx].node = node
self.nodes[idx + start_idx] = node
wait_to_sync(node, True)
def do_connect(idx):
for i in range(0, idx + 1):
connect_nodes(self.nodes[idx + start_idx], i)
jobs = []
# start up nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_start, idx))
# wait for all nodes to start up
for job in jobs:
job.result()
jobs.clear()
# connect nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_connect, idx))
# wait for all nodes to connect
for job in jobs:
job.result()
jobs.clear()
sync_masternodes(self.nodes, True)
executor.shutdown()
def setup_network(self):
self.nodes = []
# create faucet node for collateral and transactions
self.nodes.append(self.start_node(0, self.options.tmpdir, self.extra_args))
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
while self.nodes[0].getbalance() < required_balance:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# create connected simple nodes
for i in range(0, self.num_nodes - self.mn_count - 1):
self.create_simple_node()
sync_masternodes(self.nodes, True)
# activate DIP3
if not self.fast_dip3_enforcement:
while self.nodes[0].getblockcount() < 500:
self.nodes[0].generate(10)
self.sync_all()
# create masternodes
self.prepare_masternodes()
self.prepare_datadirs()
self.start_masternodes()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# sync nodes
self.sync_all()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
mn_info = self.nodes[0].masternodelist("status")
assert (len(mn_info) == self.mn_count)
for status in mn_info.values():
assert (status == 'ENABLED')
def create_raw_tx(self, node_from, node_to, amount, min_inputs, max_inputs):
assert (min_inputs <= max_inputs)
# fill inputs
inputs = []
balances = node_from.listunspent()
in_amount = 0.0
last_amount = 0.0
for tx in balances:
if len(inputs) < min_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
elif in_amount > amount:
break
elif len(inputs) < max_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
else:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount -= last_amount
in_amount += float(tx['amount'])
inputs[-1] = input
last_amount = float(tx['amount'])
assert (len(inputs) >= min_inputs)
assert (len(inputs) <= max_inputs)
assert (in_amount >= amount)
# fill outputs
receiver_address = node_to.getnewaddress()
change_address = node_from.getnewaddress()
fee = 0.001
outputs = {}
outputs[receiver_address] = satoshi_round(amount)
outputs[change_address] = satoshi_round(in_amount - amount - fee)
rawtx = node_from.createrawtransaction(inputs, outputs)
ret = node_from.signrawtransaction(rawtx)
decoded = node_from.decoderawtransaction(ret['hex'])
ret = {**decoded, **ret}
return ret
def wait_for_instantlock(self, txid, node):
# wait for instantsend locks
start = time.time()
locked = False
while True:
try:
is_tx = node.getrawtransaction(txid, True)
if is_tx['instantlock']:
locked = True
break
except:
# TX not received yet?
pass
if time.time() > start + 10:
break
time.sleep(0.5)
return locked
def wait_for_sporks_same(self, timeout=30):
st = time.time()
while time.time() < st + timeout:
if self.check_sporks_same():
return
time.sleep(0.5)
raise AssertionError("wait_for_sporks_same timed out")
def check_sporks_same(self):
sporks = self.nodes[0].spork('show')
for node in self.nodes[1:]:
sporks2 = node.spork('show')
if sporks != sporks2:
return False
return True
def wait_for_quorum_phase(self, phase, check_received_messages, check_received_messages_count, timeout=30):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for mn in self.mninfo:
s = mn.node.quorum("dkgstatus")["session"]
if "llmq_5_60" not in s:
all_ok = False
break
s = s["llmq_5_60"]
if "phase" not in s:
all_ok = False
break
if s["phase"] != phase:
all_ok = False
break
if check_received_messages is not None:
if s[check_received_messages] < check_received_messages_count:
all_ok = False
break
if all_ok:
return
time.sleep(0.1)
raise AssertionError("wait_for_quorum_phase timed out")
def wait_for_quorum_commitment(self, timeout = 15):
t = time.time()
while time.time() - t < timeout:
all_ok = True
for node in self.nodes:
s = node.quorum("dkgstatus")
if "minableCommitments" not in s:
all_ok = False
break
s = s["minableCommitments"]
if "llmq_5_60" not in s:
all_ok = False
break
if all_ok:
return
time.sleep(0.1)
raise AssertionError("wait_for_quorum_commitment timed out")
def mine_quorum(self, expected_contributions=5, expected_complaints=0, expected_justifications=0, expected_commitments=5):
quorums = self.nodes[0].quorum("list")
# move forward to next DKG
skip_count = 24 - (self.nodes[0].getblockcount() % 24)
if skip_count != 0:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(skip_count)
sync_blocks(self.nodes)
# Make sure all reached phase 1 (init)
self.wait_for_quorum_phase(1, None, 0)
# Give nodes some time to connect to neighbors
time.sleep(2)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 2 (contribute) and received all contributions
self.wait_for_quorum_phase(2, "receivedContributions", expected_contributions)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 3 (complain) and received all complaints
self.wait_for_quorum_phase(3, "receivedComplaints", expected_complaints)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 4 (justify)
self.wait_for_quorum_phase(4, "receivedJustifications", expected_justifications)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 5 (commit)
self.wait_for_quorum_phase(5, "receivedPrematureCommitments", expected_commitments)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(2)
sync_blocks(self.nodes)
# Make sure all reached phase 6 (mining)
self.wait_for_quorum_phase(6, None, 0)
# Wait for final commitment
self.wait_for_quorum_commitment()
# mine the final commitment
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
while quorums == self.nodes[0].quorum("list"):
time.sleep(2)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
sync_blocks(self.nodes)
new_quorum = self.nodes[0].quorum("list", 1)["llmq_5_60"][0]
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions
self.nodes[0].generate(8)
sync_blocks(self.nodes)
return new_quorum
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "dashd"),
help="dashd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "dashd"),
help="dashd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = _start_nodes(self.num_nodes, self.options.tmpdir, extra_args, stderr=stderr) | identifier_body |
mod.rs | pub mod build;
pub mod config;
pub mod dev;
pub mod generate;
pub mod init;
pub mod kv;
pub mod login;
pub mod logout;
pub mod preview;
pub mod publish;
pub mod r2;
pub mod route;
pub mod secret;
pub mod subdomain;
pub mod tail;
pub mod whoami;
pub mod exec {
pub use super::build::build;
pub use super::config::configure;
pub use super::dev::dev;
pub use super::generate::generate;
pub use super::init::init;
pub use super::kv::kv_bulk;
pub use super::kv::kv_key;
pub use super::kv::kv_namespace;
pub use super::login::login;
pub use super::logout::logout;
pub use super::preview::preview;
pub use super::publish::publish;
pub use super::r2::r2_bucket;
pub use super::route::route;
pub use super::secret::secret;
pub use super::subdomain::subdomain;
pub use super::tail::tail;
pub use super::whoami::whoami;
}
use std::net::IpAddr;
use std::path::PathBuf;
use std::str::FromStr;
use crate::commands::dev::Protocol;
use crate::commands::tail::websocket::TailFormat;
use crate::preview::HttpMethod;
use crate::settings::toml::migrations::{
DurableObjectsMigration, Migration, MigrationTag, Migrations, RenameClass, TransferClass,
};
use crate::settings::toml::TargetType;
use clap::AppSettings;
use structopt::StructOpt;
use url::Url;
#[derive(Debug, Clone, StructOpt)]
#[structopt(
name = "wrangler",
author = "The Wrangler Team <wrangler@cloudflare.com>",
setting = AppSettings::ArgRequiredElseHelp,
setting = AppSettings::DeriveDisplayOrder,
setting = AppSettings::VersionlessSubcommands,
)]
pub struct Cli {
/// Toggle verbose output (when applicable)
#[structopt(long, global = true)]
pub verbose: bool,
/// Path to configuration file.
#[structopt(long, short = "c", default_value = "wrangler.toml", global = true)]
pub config: PathBuf,
/// Environment to perform a command on.
#[structopt(name = "env", long, short = "e", global = true)]
pub environment: Option<String>,
#[structopt(subcommand)]
pub command: Command,
}
#[derive(Debug, Clone, StructOpt)]
pub enum | {
/// Interact with your Workers KV Namespaces
#[structopt(name = "kv:namespace", setting = AppSettings::SubcommandRequiredElseHelp)]
KvNamespace(kv::KvNamespace),
/// Individually manage Workers KV key-value pairs
#[structopt(name = "kv:key", setting = AppSettings::SubcommandRequiredElseHelp)]
KvKey(kv::KvKey),
/// Interact with multiple Workers KV key-value pairs at once
#[structopt(name = "kv:bulk", setting = AppSettings::SubcommandRequiredElseHelp)]
KvBulk(kv::KvBulk),
/// Interact with your Workers R2 Buckets
#[structopt(setting = AppSettings::SubcommandRequiredElseHelp)]
R2(r2::R2),
/// List or delete worker routes.
#[structopt(name = "route", setting = AppSettings::SubcommandRequiredElseHelp)]
Route(route::Route),
/// Generate a secret that can be referenced in the worker script
#[structopt(name = "secret", setting = AppSettings::SubcommandRequiredElseHelp)]
Secret(secret::Secret),
/// Generate a new worker project
Generate {
/// The name of your worker!
#[structopt(index = 1, default_value = "worker")]
name: String,
/// A link to a GitHub template! Defaults to https://github.com/cloudflare/worker-template
#[structopt(index = 2)]
template: Option<String>,
/// The type of project you want generated
#[structopt(name = "type", long, short = "t")]
target_type: Option<TargetType>,
/// Initializes a Workers Sites project. Overrides 'type' and 'template'
#[structopt(long, short = "s")]
site: bool,
},
/// Create a wrangler.toml for an existing project
Init {
/// The name of your worker!
#[structopt(index = 1)]
name: Option<String>,
/// The type of project you want generated
#[structopt(name = "type", long, short = "t")]
target_type: Option<TargetType>,
/// Initializes a Workers Sites project. Overrides `type` and `template`
#[structopt(long, short = "s")]
site: bool,
},
/// Build your worker
Build,
/// Preview your code temporarily on cloudflareworkers.com
Preview {
/// Type of request to preview your worker with (get, post)
#[structopt(index = 1, default_value = "get")]
method: HttpMethod,
/// URL to open in the worker preview
#[structopt(short = "u", long, default_value = "https://example.com")]
url: Url,
/// Body string to post to your preview worker request
#[structopt(index = 2)]
body: Option<String>,
/// Watch your project for changes and update the preview automagically
#[structopt(long)]
watch: bool,
/// Don't open the browser on preview
#[structopt(long)]
headless: bool,
},
/// Start a local server for developing your worker
Dev {
/// Host to forward requests to, defaults to the zone of project or to
/// tutorial.cloudflareworkers.com if unauthenticated.
#[structopt(long, short = "h")]
host: Option<String>,
/// IP to listen on. Defaults to 127.0.0.1
#[structopt(long, short = "i")]
ip: Option<IpAddr>,
/// Port to listen on. Defaults to 8787
#[structopt(long, short = "p")]
port: Option<u16>,
/// Sets the protocol on which the wrangler dev listens, by default this is http
/// but can be set to https
#[structopt(name = "local-protocol")]
local_protocol: Option<Protocol>,
/// Sets the protocol on which requests are sent to the host, by default this is https
/// but can be set to http
#[structopt(name = "upstream-protocol")]
upstream_protocol: Option<Protocol>,
/// Inspect the worker using Chrome DevTools
#[structopt(long)]
inspect: bool,
/// Run wrangler dev unauthenticated
#[structopt(long)]
unauthenticated: bool,
},
/// Publish your worker to the orange cloud
#[structopt(name = "publish")]
Publish {
/// [deprecated] alias of wrangler publish
#[structopt(long, hidden = true)]
release: bool,
#[structopt(possible_value = "json")]
output: Option<String>,
#[structopt(flatten)]
migration: AdhocMigration,
},
/// Authenticate Wrangler with a Cloudflare API Token or Global API Key
#[structopt(name = "config")]
Config {
/// Use an email and global API key for authentication.
/// This is not recommended; use API tokens (the default) if possible
#[structopt(name = "api-key", long)]
api_key: bool,
/// Do not verify provided credentials before writing out Wrangler config file
#[structopt(name = "no-verify", long)]
no_verify: bool,
},
/// Configure your workers.dev subdomain
#[structopt(name = "subdomain")]
Subdomain {
/// The subdomain on workers.dev you'd like to reserve
#[structopt(name = "name", index = 1)]
name: Option<String>,
},
/// Retrieve your user info and test your auth config
#[structopt(name = "whoami")]
Whoami,
/// View a stream of logs from a published worker
#[structopt(name = "tail")]
Tail {
/// Name of the worker to tail
#[structopt(index = 1)]
name: Option<String>,
/// Output format for log messages
#[structopt(long, short = "f", default_value = "json", possible_values = &["json", "pretty"])]
format: TailFormat,
/// Stops the tail after receiving the first log (useful for testing)
#[structopt(long)]
once: bool,
/// Adds a sampling rate (0.01 for 1%)
#[structopt(long = "sampling-rate", default_value = "1")]
sampling_rate: f64,
/// Filter by invocation status
#[structopt(long, possible_values = &["ok", "error", "canceled"])]
status: Vec<String>,
/// Filter by HTTP method
#[structopt(long)]
method: Vec<String>,
/// Filter by HTTP header
#[structopt(long)]
header: Vec<String>,
/// Filter by IP address ("self" to filter your own IP address)
#[structopt(long = "ip-address", parse(try_from_str = parse_ip_address))]
ip_address: Vec<String>,
/// Filter by a text match in console.log messages
#[structopt(long)]
search: Option<String>,
/// Set the URL to forward log messages
#[structopt(hidden = true)]
url: Option<Url>,
/// Deprecated, no longer used.
#[structopt(hidden = true, long = "port", short = "p")]
tunnel_port: Option<u16>,
/// Deprecated, no longer used.
#[structopt(hidden = true, long = "metrics")]
metrics_port: Option<u16>,
},
/// Authenticate wrangler with your Cloudflare username and password
#[structopt(name = "login")]
Login {
/// Allows to choose set of scopes
#[structopt(name = "scopes", long, possible_values = login::SCOPES_LIST.as_ref())]
scopes: Vec<String>,
/// List all scopes
#[structopt(name = "scopes-list", long)]
scopes_list: bool,
},
/// Logout from your current authentication method and remove any configuration files.
/// It does not logout if you have authenticated wrangler through environment variables.
#[structopt(name = "logout")]
Logout,
}
#[derive(Debug, Clone, StructOpt)]
pub struct AdhocMigration {
/// Allow durable objects to be created from a class in your script
#[structopt(name = "new-class", long, number_of_values = 1)]
new_class: Vec<String>,
/// Delete all durable objects associated with a class in your script
#[structopt(name = "delete-class", long, number_of_values = 1)]
delete_class: Vec<String>,
/// Rename a durable object class
#[structopt(name = "rename-class", long, number_of_values = 2, value_names(&["from class", "to class"]))]
rename_class: Vec<String>,
/// Transfer all durable objects associated with a class in another script to a class in
/// this script
#[structopt(name = "transfer-class", long, number_of_values = 3, value_names(&["from script", "from class", "to class"]))]
transfer_class: Vec<String>,
/// Specify the existing migration tag for the script.
#[structopt(name = "old-tag", long)]
old_tag: Option<String>,
/// Specify the new migration tag for the script
#[structopt(name = "new-tag", long)]
new_tag: Option<String>,
}
impl AdhocMigration {
pub fn into_migrations(self) -> Option<Migrations> {
let migration = DurableObjectsMigration {
new_classes: self.new_class,
deleted_classes: self.delete_class,
renamed_classes: self
.rename_class
.chunks_exact(2)
.map(|chunk| {
let (from, to) = if let [from, to] = chunk {
(from.clone(), to.clone())
} else {
unreachable!("Chunks exact returned a slice with a length not equal to 2")
};
RenameClass { from, to }
})
.collect(),
transferred_classes: self
.transfer_class
.chunks_exact(3)
.map(|chunk| {
let (from_script, from, to) = if let [from_script, from, to] = chunk {
(from_script.clone(), from.clone(), to.clone())
} else {
unreachable!("Chunks exact returned a slice with a length not equal to 3")
};
TransferClass {
from,
from_script,
to,
}
})
.collect(),
};
let is_migration_empty = migration.new_classes.is_empty()
&& migration.deleted_classes.is_empty()
&& migration.renamed_classes.is_empty()
&& migration.transferred_classes.is_empty();
if !is_migration_empty || self.old_tag.is_some() || self.new_tag.is_some() {
let migration = if !is_migration_empty {
Some(Migration {
durable_objects: migration,
})
} else {
None
};
Some(Migrations::Adhoc {
script_tag: MigrationTag::Unknown,
provided_old_tag: self.old_tag,
new_tag: self.new_tag,
migration,
})
} else {
None
}
}
}
fn parse_ip_address(input: &str) -> Result<String, anyhow::Error> {
match input {
"self" => Ok(String::from("self")),
address => match IpAddr::from_str(address) {
Ok(_) => Ok(address.to_owned()),
Err(err) => anyhow::bail!("{}: {}", err, input),
},
}
}
#[cfg(test)]
mod tests {
use super::*;
fn rename_class(tag: &str) -> RenameClass {
RenameClass {
from: format!("renameFrom{}", tag),
to: format!("renameTo{}", tag),
}
}
fn transfer_class(tag: &str) -> TransferClass {
TransferClass {
from: format!("transferFromClass{}", tag),
from_script: format!("transferFromScript{}", tag),
to: format!("transferToClass{}", tag),
}
}
#[test]
fn adhoc_migration_parsing() {
let command = Cli::from_iter(&[
"wrangler",
"publish",
"--old-tag",
"oldTag",
"--new-tag",
"newTag",
"--new-class",
"newA",
"--new-class",
"newB",
"--delete-class",
"deleteA",
"--delete-class",
"deleteB",
"--rename-class",
"renameFromA",
"renameToA",
"--rename-class",
"renameFromB",
"renameToB",
"--transfer-class",
"transferFromScriptA",
"transferFromClassA",
"transferToClassA",
"--transfer-class",
"transferFromScriptB",
"transferFromClassB",
"transferToClassB",
])
.command;
if let Command::Publish { migration, .. } = command {
assert_eq!(
migration.into_migrations(),
Some(Migrations::Adhoc {
script_tag: MigrationTag::Unknown,
provided_old_tag: Some(String::from("oldTag")),
new_tag: Some(String::from("newTag")),
migration: Some(Migration {
durable_objects: DurableObjectsMigration {
new_classes: vec![String::from("newA"), String::from("newB")],
deleted_classes: vec![String::from("deleteA"), String::from("deleteB")],
renamed_classes: vec![rename_class("A"), rename_class("B")],
transferred_classes: vec![transfer_class("A"), transfer_class("B")],
}
})
})
);
} else {
assert!(false, "Unkown command {:?}", command)
}
}
}
| Command | identifier_name |
mod.rs | pub mod build;
pub mod config;
pub mod dev;
pub mod generate;
pub mod init;
pub mod kv;
pub mod login;
pub mod logout;
pub mod preview;
pub mod publish;
pub mod r2;
pub mod route;
pub mod secret;
pub mod subdomain;
pub mod tail;
pub mod whoami;
pub mod exec {
pub use super::build::build;
pub use super::config::configure;
pub use super::dev::dev;
pub use super::generate::generate;
pub use super::init::init;
pub use super::kv::kv_bulk;
pub use super::kv::kv_key;
pub use super::kv::kv_namespace;
pub use super::login::login;
pub use super::logout::logout;
pub use super::preview::preview;
pub use super::publish::publish;
pub use super::r2::r2_bucket;
pub use super::route::route;
pub use super::secret::secret;
pub use super::subdomain::subdomain;
pub use super::tail::tail;
pub use super::whoami::whoami;
}
use std::net::IpAddr;
use std::path::PathBuf;
use std::str::FromStr;
use crate::commands::dev::Protocol;
use crate::commands::tail::websocket::TailFormat;
use crate::preview::HttpMethod;
use crate::settings::toml::migrations::{
DurableObjectsMigration, Migration, MigrationTag, Migrations, RenameClass, TransferClass,
};
use crate::settings::toml::TargetType;
use clap::AppSettings;
use structopt::StructOpt;
use url::Url;
#[derive(Debug, Clone, StructOpt)]
#[structopt(
name = "wrangler",
author = "The Wrangler Team <wrangler@cloudflare.com>",
setting = AppSettings::ArgRequiredElseHelp,
setting = AppSettings::DeriveDisplayOrder,
setting = AppSettings::VersionlessSubcommands,
)]
pub struct Cli {
/// Toggle verbose output (when applicable)
#[structopt(long, global = true)]
pub verbose: bool,
/// Path to configuration file.
#[structopt(long, short = "c", default_value = "wrangler.toml", global = true)]
pub config: PathBuf,
/// Environment to perform a command on.
#[structopt(name = "env", long, short = "e", global = true)]
pub environment: Option<String>,
#[structopt(subcommand)]
pub command: Command,
}
#[derive(Debug, Clone, StructOpt)]
pub enum Command {
/// Interact with your Workers KV Namespaces
#[structopt(name = "kv:namespace", setting = AppSettings::SubcommandRequiredElseHelp)]
KvNamespace(kv::KvNamespace),
/// Individually manage Workers KV key-value pairs
#[structopt(name = "kv:key", setting = AppSettings::SubcommandRequiredElseHelp)]
KvKey(kv::KvKey),
/// Interact with multiple Workers KV key-value pairs at once
#[structopt(name = "kv:bulk", setting = AppSettings::SubcommandRequiredElseHelp)]
KvBulk(kv::KvBulk),
/// Interact with your Workers R2 Buckets
#[structopt(setting = AppSettings::SubcommandRequiredElseHelp)]
R2(r2::R2),
/// List or delete worker routes.
#[structopt(name = "route", setting = AppSettings::SubcommandRequiredElseHelp)]
Route(route::Route),
/// Generate a secret that can be referenced in the worker script
#[structopt(name = "secret", setting = AppSettings::SubcommandRequiredElseHelp)]
Secret(secret::Secret),
/// Generate a new worker project
Generate {
/// The name of your worker!
#[structopt(index = 1, default_value = "worker")]
name: String,
/// A link to a GitHub template! Defaults to https://github.com/cloudflare/worker-template
#[structopt(index = 2)]
template: Option<String>,
/// The type of project you want generated
#[structopt(name = "type", long, short = "t")]
target_type: Option<TargetType>,
/// Initializes a Workers Sites project. Overrides 'type' and 'template'
#[structopt(long, short = "s")]
site: bool,
},
/// Create a wrangler.toml for an existing project
Init {
/// The name of your worker!
#[structopt(index = 1)]
name: Option<String>,
/// The type of project you want generated
#[structopt(name = "type", long, short = "t")]
target_type: Option<TargetType>,
/// Initializes a Workers Sites project. Overrides `type` and `template`
#[structopt(long, short = "s")]
site: bool,
},
/// Build your worker
Build,
/// Preview your code temporarily on cloudflareworkers.com
Preview {
/// Type of request to preview your worker with (get, post)
#[structopt(index = 1, default_value = "get")]
method: HttpMethod,
/// URL to open in the worker preview
#[structopt(short = "u", long, default_value = "https://example.com")]
url: Url,
/// Body string to post to your preview worker request
#[structopt(index = 2)]
body: Option<String>,
/// Watch your project for changes and update the preview automagically
#[structopt(long)]
watch: bool,
/// Don't open the browser on preview
#[structopt(long)]
headless: bool,
},
/// Start a local server for developing your worker
Dev {
/// Host to forward requests to, defaults to the zone of project or to
/// tutorial.cloudflareworkers.com if unauthenticated.
#[structopt(long, short = "h")]
host: Option<String>,
/// IP to listen on. Defaults to 127.0.0.1
#[structopt(long, short = "i")]
ip: Option<IpAddr>,
/// Port to listen on. Defaults to 8787
#[structopt(long, short = "p")]
port: Option<u16>,
/// Sets the protocol on which the wrangler dev listens, by default this is http
/// but can be set to https
#[structopt(name = "local-protocol")]
local_protocol: Option<Protocol>,
/// Sets the protocol on which requests are sent to the host, by default this is https
/// but can be set to http
#[structopt(name = "upstream-protocol")]
upstream_protocol: Option<Protocol>,
/// Inspect the worker using Chrome DevTools
#[structopt(long)]
inspect: bool,
/// Run wrangler dev unauthenticated
#[structopt(long)]
unauthenticated: bool,
},
/// Publish your worker to the orange cloud
#[structopt(name = "publish")]
Publish {
/// [deprecated] alias of wrangler publish
#[structopt(long, hidden = true)]
release: bool,
#[structopt(possible_value = "json")]
output: Option<String>,
#[structopt(flatten)]
migration: AdhocMigration,
},
/// Authenticate Wrangler with a Cloudflare API Token or Global API Key
#[structopt(name = "config")]
Config {
/// Use an email and global API key for authentication.
/// This is not recommended; use API tokens (the default) if possible
#[structopt(name = "api-key", long)]
api_key: bool,
/// Do not verify provided credentials before writing out Wrangler config file
#[structopt(name = "no-verify", long)]
no_verify: bool,
},
/// Configure your workers.dev subdomain
#[structopt(name = "subdomain")]
Subdomain {
/// The subdomain on workers.dev you'd like to reserve
#[structopt(name = "name", index = 1)]
name: Option<String>,
},
/// Retrieve your user info and test your auth config
#[structopt(name = "whoami")]
Whoami,
/// View a stream of logs from a published worker
#[structopt(name = "tail")]
Tail {
/// Name of the worker to tail
#[structopt(index = 1)]
name: Option<String>,
/// Output format for log messages
#[structopt(long, short = "f", default_value = "json", possible_values = &["json", "pretty"])]
format: TailFormat,
/// Stops the tail after receiving the first log (useful for testing)
#[structopt(long)]
once: bool,
/// Adds a sampling rate (0.01 for 1%)
#[structopt(long = "sampling-rate", default_value = "1")]
sampling_rate: f64,
/// Filter by invocation status
#[structopt(long, possible_values = &["ok", "error", "canceled"])]
status: Vec<String>,
/// Filter by HTTP method
#[structopt(long)]
method: Vec<String>,
/// Filter by HTTP header
#[structopt(long)]
header: Vec<String>,
/// Filter by IP address ("self" to filter your own IP address)
#[structopt(long = "ip-address", parse(try_from_str = parse_ip_address))]
ip_address: Vec<String>,
/// Filter by a text match in console.log messages
#[structopt(long)]
search: Option<String>,
/// Set the URL to forward log messages
#[structopt(hidden = true)]
url: Option<Url>,
/// Deprecated, no longer used.
#[structopt(hidden = true, long = "port", short = "p")]
tunnel_port: Option<u16>,
/// Deprecated, no longer used.
#[structopt(hidden = true, long = "metrics")]
metrics_port: Option<u16>,
},
/// Authenticate wrangler with your Cloudflare username and password
#[structopt(name = "login")]
Login {
/// Allows to choose set of scopes
#[structopt(name = "scopes", long, possible_values = login::SCOPES_LIST.as_ref())]
scopes: Vec<String>,
/// List all scopes
#[structopt(name = "scopes-list", long)]
scopes_list: bool,
},
/// Logout from your current authentication method and remove any configuration files.
/// It does not logout if you have authenticated wrangler through environment variables.
#[structopt(name = "logout")]
Logout,
}
#[derive(Debug, Clone, StructOpt)]
pub struct AdhocMigration {
/// Allow durable objects to be created from a class in your script
#[structopt(name = "new-class", long, number_of_values = 1)]
new_class: Vec<String>,
/// Delete all durable objects associated with a class in your script
#[structopt(name = "delete-class", long, number_of_values = 1)]
delete_class: Vec<String>,
/// Rename a durable object class
#[structopt(name = "rename-class", long, number_of_values = 2, value_names(&["from class", "to class"]))]
rename_class: Vec<String>,
/// Transfer all durable objects associated with a class in another script to a class in
/// this script
#[structopt(name = "transfer-class", long, number_of_values = 3, value_names(&["from script", "from class", "to class"]))]
transfer_class: Vec<String>,
/// Specify the existing migration tag for the script.
#[structopt(name = "old-tag", long)]
old_tag: Option<String>,
/// Specify the new migration tag for the script
#[structopt(name = "new-tag", long)]
new_tag: Option<String>,
}
impl AdhocMigration {
pub fn into_migrations(self) -> Option<Migrations> {
let migration = DurableObjectsMigration {
new_classes: self.new_class,
deleted_classes: self.delete_class,
renamed_classes: self
.rename_class
.chunks_exact(2)
.map(|chunk| {
let (from, to) = if let [from, to] = chunk {
(from.clone(), to.clone())
} else {
unreachable!("Chunks exact returned a slice with a length not equal to 2")
};
RenameClass { from, to }
})
.collect(),
transferred_classes: self
.transfer_class
.chunks_exact(3)
.map(|chunk| {
let (from_script, from, to) = if let [from_script, from, to] = chunk {
(from_script.clone(), from.clone(), to.clone())
} else {
unreachable!("Chunks exact returned a slice with a length not equal to 3")
};
TransferClass {
from,
from_script,
to,
}
})
.collect(),
};
let is_migration_empty = migration.new_classes.is_empty()
&& migration.deleted_classes.is_empty()
&& migration.renamed_classes.is_empty()
&& migration.transferred_classes.is_empty();
if !is_migration_empty || self.old_tag.is_some() || self.new_tag.is_some() {
let migration = if !is_migration_empty {
Some(Migration {
durable_objects: migration,
})
} else {
None
};
Some(Migrations::Adhoc {
script_tag: MigrationTag::Unknown,
provided_old_tag: self.old_tag,
new_tag: self.new_tag,
migration,
})
} else {
None
}
}
}
fn parse_ip_address(input: &str) -> Result<String, anyhow::Error> |
#[cfg(test)]
mod tests {
use super::*;
fn rename_class(tag: &str) -> RenameClass {
RenameClass {
from: format!("renameFrom{}", tag),
to: format!("renameTo{}", tag),
}
}
fn transfer_class(tag: &str) -> TransferClass {
TransferClass {
from: format!("transferFromClass{}", tag),
from_script: format!("transferFromScript{}", tag),
to: format!("transferToClass{}", tag),
}
}
#[test]
fn adhoc_migration_parsing() {
let command = Cli::from_iter(&[
"wrangler",
"publish",
"--old-tag",
"oldTag",
"--new-tag",
"newTag",
"--new-class",
"newA",
"--new-class",
"newB",
"--delete-class",
"deleteA",
"--delete-class",
"deleteB",
"--rename-class",
"renameFromA",
"renameToA",
"--rename-class",
"renameFromB",
"renameToB",
"--transfer-class",
"transferFromScriptA",
"transferFromClassA",
"transferToClassA",
"--transfer-class",
"transferFromScriptB",
"transferFromClassB",
"transferToClassB",
])
.command;
if let Command::Publish { migration, .. } = command {
assert_eq!(
migration.into_migrations(),
Some(Migrations::Adhoc {
script_tag: MigrationTag::Unknown,
provided_old_tag: Some(String::from("oldTag")),
new_tag: Some(String::from("newTag")),
migration: Some(Migration {
durable_objects: DurableObjectsMigration {
new_classes: vec![String::from("newA"), String::from("newB")],
deleted_classes: vec![String::from("deleteA"), String::from("deleteB")],
renamed_classes: vec![rename_class("A"), rename_class("B")],
transferred_classes: vec![transfer_class("A"), transfer_class("B")],
}
})
})
);
} else {
assert!(false, "Unkown command {:?}", command)
}
}
}
| {
match input {
"self" => Ok(String::from("self")),
address => match IpAddr::from_str(address) {
Ok(_) => Ok(address.to_owned()),
Err(err) => anyhow::bail!("{}: {}", err, input),
},
}
} | identifier_body |
mod.rs | pub mod build;
pub mod config;
pub mod dev;
pub mod generate;
pub mod init;
pub mod kv;
pub mod login;
pub mod logout;
pub mod preview;
pub mod publish;
pub mod r2;
pub mod route;
pub mod secret; | pub mod exec {
pub use super::build::build;
pub use super::config::configure;
pub use super::dev::dev;
pub use super::generate::generate;
pub use super::init::init;
pub use super::kv::kv_bulk;
pub use super::kv::kv_key;
pub use super::kv::kv_namespace;
pub use super::login::login;
pub use super::logout::logout;
pub use super::preview::preview;
pub use super::publish::publish;
pub use super::r2::r2_bucket;
pub use super::route::route;
pub use super::secret::secret;
pub use super::subdomain::subdomain;
pub use super::tail::tail;
pub use super::whoami::whoami;
}
use std::net::IpAddr;
use std::path::PathBuf;
use std::str::FromStr;
use crate::commands::dev::Protocol;
use crate::commands::tail::websocket::TailFormat;
use crate::preview::HttpMethod;
use crate::settings::toml::migrations::{
DurableObjectsMigration, Migration, MigrationTag, Migrations, RenameClass, TransferClass,
};
use crate::settings::toml::TargetType;
use clap::AppSettings;
use structopt::StructOpt;
use url::Url;
#[derive(Debug, Clone, StructOpt)]
#[structopt(
name = "wrangler",
author = "The Wrangler Team <wrangler@cloudflare.com>",
setting = AppSettings::ArgRequiredElseHelp,
setting = AppSettings::DeriveDisplayOrder,
setting = AppSettings::VersionlessSubcommands,
)]
pub struct Cli {
/// Toggle verbose output (when applicable)
#[structopt(long, global = true)]
pub verbose: bool,
/// Path to configuration file.
#[structopt(long, short = "c", default_value = "wrangler.toml", global = true)]
pub config: PathBuf,
/// Environment to perform a command on.
#[structopt(name = "env", long, short = "e", global = true)]
pub environment: Option<String>,
#[structopt(subcommand)]
pub command: Command,
}
#[derive(Debug, Clone, StructOpt)]
pub enum Command {
/// Interact with your Workers KV Namespaces
#[structopt(name = "kv:namespace", setting = AppSettings::SubcommandRequiredElseHelp)]
KvNamespace(kv::KvNamespace),
/// Individually manage Workers KV key-value pairs
#[structopt(name = "kv:key", setting = AppSettings::SubcommandRequiredElseHelp)]
KvKey(kv::KvKey),
/// Interact with multiple Workers KV key-value pairs at once
#[structopt(name = "kv:bulk", setting = AppSettings::SubcommandRequiredElseHelp)]
KvBulk(kv::KvBulk),
/// Interact with your Workers R2 Buckets
#[structopt(setting = AppSettings::SubcommandRequiredElseHelp)]
R2(r2::R2),
/// List or delete worker routes.
#[structopt(name = "route", setting = AppSettings::SubcommandRequiredElseHelp)]
Route(route::Route),
/// Generate a secret that can be referenced in the worker script
#[structopt(name = "secret", setting = AppSettings::SubcommandRequiredElseHelp)]
Secret(secret::Secret),
/// Generate a new worker project
Generate {
/// The name of your worker!
#[structopt(index = 1, default_value = "worker")]
name: String,
/// A link to a GitHub template! Defaults to https://github.com/cloudflare/worker-template
#[structopt(index = 2)]
template: Option<String>,
/// The type of project you want generated
#[structopt(name = "type", long, short = "t")]
target_type: Option<TargetType>,
/// Initializes a Workers Sites project. Overrides 'type' and 'template'
#[structopt(long, short = "s")]
site: bool,
},
/// Create a wrangler.toml for an existing project
Init {
/// The name of your worker!
#[structopt(index = 1)]
name: Option<String>,
/// The type of project you want generated
#[structopt(name = "type", long, short = "t")]
target_type: Option<TargetType>,
/// Initializes a Workers Sites project. Overrides `type` and `template`
#[structopt(long, short = "s")]
site: bool,
},
/// Build your worker
Build,
/// Preview your code temporarily on cloudflareworkers.com
Preview {
/// Type of request to preview your worker with (get, post)
#[structopt(index = 1, default_value = "get")]
method: HttpMethod,
/// URL to open in the worker preview
#[structopt(short = "u", long, default_value = "https://example.com")]
url: Url,
/// Body string to post to your preview worker request
#[structopt(index = 2)]
body: Option<String>,
/// Watch your project for changes and update the preview automagically
#[structopt(long)]
watch: bool,
/// Don't open the browser on preview
#[structopt(long)]
headless: bool,
},
/// Start a local server for developing your worker
Dev {
/// Host to forward requests to, defaults to the zone of project or to
/// tutorial.cloudflareworkers.com if unauthenticated.
#[structopt(long, short = "h")]
host: Option<String>,
/// IP to listen on. Defaults to 127.0.0.1
#[structopt(long, short = "i")]
ip: Option<IpAddr>,
/// Port to listen on. Defaults to 8787
#[structopt(long, short = "p")]
port: Option<u16>,
/// Sets the protocol on which the wrangler dev listens, by default this is http
/// but can be set to https
#[structopt(name = "local-protocol")]
local_protocol: Option<Protocol>,
/// Sets the protocol on which requests are sent to the host, by default this is https
/// but can be set to http
#[structopt(name = "upstream-protocol")]
upstream_protocol: Option<Protocol>,
/// Inspect the worker using Chrome DevTools
#[structopt(long)]
inspect: bool,
/// Run wrangler dev unauthenticated
#[structopt(long)]
unauthenticated: bool,
},
/// Publish your worker to the orange cloud
#[structopt(name = "publish")]
Publish {
/// [deprecated] alias of wrangler publish
#[structopt(long, hidden = true)]
release: bool,
#[structopt(possible_value = "json")]
output: Option<String>,
#[structopt(flatten)]
migration: AdhocMigration,
},
/// Authenticate Wrangler with a Cloudflare API Token or Global API Key
#[structopt(name = "config")]
Config {
/// Use an email and global API key for authentication.
/// This is not recommended; use API tokens (the default) if possible
#[structopt(name = "api-key", long)]
api_key: bool,
/// Do not verify provided credentials before writing out Wrangler config file
#[structopt(name = "no-verify", long)]
no_verify: bool,
},
/// Configure your workers.dev subdomain
#[structopt(name = "subdomain")]
Subdomain {
/// The subdomain on workers.dev you'd like to reserve
#[structopt(name = "name", index = 1)]
name: Option<String>,
},
/// Retrieve your user info and test your auth config
#[structopt(name = "whoami")]
Whoami,
/// View a stream of logs from a published worker
#[structopt(name = "tail")]
Tail {
/// Name of the worker to tail
#[structopt(index = 1)]
name: Option<String>,
/// Output format for log messages
#[structopt(long, short = "f", default_value = "json", possible_values = &["json", "pretty"])]
format: TailFormat,
/// Stops the tail after receiving the first log (useful for testing)
#[structopt(long)]
once: bool,
/// Adds a sampling rate (0.01 for 1%)
#[structopt(long = "sampling-rate", default_value = "1")]
sampling_rate: f64,
/// Filter by invocation status
#[structopt(long, possible_values = &["ok", "error", "canceled"])]
status: Vec<String>,
/// Filter by HTTP method
#[structopt(long)]
method: Vec<String>,
/// Filter by HTTP header
#[structopt(long)]
header: Vec<String>,
/// Filter by IP address ("self" to filter your own IP address)
#[structopt(long = "ip-address", parse(try_from_str = parse_ip_address))]
ip_address: Vec<String>,
/// Filter by a text match in console.log messages
#[structopt(long)]
search: Option<String>,
/// Set the URL to forward log messages
#[structopt(hidden = true)]
url: Option<Url>,
/// Deprecated, no longer used.
#[structopt(hidden = true, long = "port", short = "p")]
tunnel_port: Option<u16>,
/// Deprecated, no longer used.
#[structopt(hidden = true, long = "metrics")]
metrics_port: Option<u16>,
},
/// Authenticate wrangler with your Cloudflare username and password
#[structopt(name = "login")]
Login {
/// Allows to choose set of scopes
#[structopt(name = "scopes", long, possible_values = login::SCOPES_LIST.as_ref())]
scopes: Vec<String>,
/// List all scopes
#[structopt(name = "scopes-list", long)]
scopes_list: bool,
},
/// Logout from your current authentication method and remove any configuration files.
/// It does not logout if you have authenticated wrangler through environment variables.
#[structopt(name = "logout")]
Logout,
}
#[derive(Debug, Clone, StructOpt)]
pub struct AdhocMigration {
/// Allow durable objects to be created from a class in your script
#[structopt(name = "new-class", long, number_of_values = 1)]
new_class: Vec<String>,
/// Delete all durable objects associated with a class in your script
#[structopt(name = "delete-class", long, number_of_values = 1)]
delete_class: Vec<String>,
/// Rename a durable object class
#[structopt(name = "rename-class", long, number_of_values = 2, value_names(&["from class", "to class"]))]
rename_class: Vec<String>,
/// Transfer all durable objects associated with a class in another script to a class in
/// this script
#[structopt(name = "transfer-class", long, number_of_values = 3, value_names(&["from script", "from class", "to class"]))]
transfer_class: Vec<String>,
/// Specify the existing migration tag for the script.
#[structopt(name = "old-tag", long)]
old_tag: Option<String>,
/// Specify the new migration tag for the script
#[structopt(name = "new-tag", long)]
new_tag: Option<String>,
}
impl AdhocMigration {
pub fn into_migrations(self) -> Option<Migrations> {
let migration = DurableObjectsMigration {
new_classes: self.new_class,
deleted_classes: self.delete_class,
renamed_classes: self
.rename_class
.chunks_exact(2)
.map(|chunk| {
let (from, to) = if let [from, to] = chunk {
(from.clone(), to.clone())
} else {
unreachable!("Chunks exact returned a slice with a length not equal to 2")
};
RenameClass { from, to }
})
.collect(),
transferred_classes: self
.transfer_class
.chunks_exact(3)
.map(|chunk| {
let (from_script, from, to) = if let [from_script, from, to] = chunk {
(from_script.clone(), from.clone(), to.clone())
} else {
unreachable!("Chunks exact returned a slice with a length not equal to 3")
};
TransferClass {
from,
from_script,
to,
}
})
.collect(),
};
let is_migration_empty = migration.new_classes.is_empty()
&& migration.deleted_classes.is_empty()
&& migration.renamed_classes.is_empty()
&& migration.transferred_classes.is_empty();
if !is_migration_empty || self.old_tag.is_some() || self.new_tag.is_some() {
let migration = if !is_migration_empty {
Some(Migration {
durable_objects: migration,
})
} else {
None
};
Some(Migrations::Adhoc {
script_tag: MigrationTag::Unknown,
provided_old_tag: self.old_tag,
new_tag: self.new_tag,
migration,
})
} else {
None
}
}
}
fn parse_ip_address(input: &str) -> Result<String, anyhow::Error> {
match input {
"self" => Ok(String::from("self")),
address => match IpAddr::from_str(address) {
Ok(_) => Ok(address.to_owned()),
Err(err) => anyhow::bail!("{}: {}", err, input),
},
}
}
#[cfg(test)]
mod tests {
use super::*;
fn rename_class(tag: &str) -> RenameClass {
RenameClass {
from: format!("renameFrom{}", tag),
to: format!("renameTo{}", tag),
}
}
fn transfer_class(tag: &str) -> TransferClass {
TransferClass {
from: format!("transferFromClass{}", tag),
from_script: format!("transferFromScript{}", tag),
to: format!("transferToClass{}", tag),
}
}
#[test]
fn adhoc_migration_parsing() {
let command = Cli::from_iter(&[
"wrangler",
"publish",
"--old-tag",
"oldTag",
"--new-tag",
"newTag",
"--new-class",
"newA",
"--new-class",
"newB",
"--delete-class",
"deleteA",
"--delete-class",
"deleteB",
"--rename-class",
"renameFromA",
"renameToA",
"--rename-class",
"renameFromB",
"renameToB",
"--transfer-class",
"transferFromScriptA",
"transferFromClassA",
"transferToClassA",
"--transfer-class",
"transferFromScriptB",
"transferFromClassB",
"transferToClassB",
])
.command;
if let Command::Publish { migration, .. } = command {
assert_eq!(
migration.into_migrations(),
Some(Migrations::Adhoc {
script_tag: MigrationTag::Unknown,
provided_old_tag: Some(String::from("oldTag")),
new_tag: Some(String::from("newTag")),
migration: Some(Migration {
durable_objects: DurableObjectsMigration {
new_classes: vec![String::from("newA"), String::from("newB")],
deleted_classes: vec![String::from("deleteA"), String::from("deleteB")],
renamed_classes: vec![rename_class("A"), rename_class("B")],
transferred_classes: vec![transfer_class("A"), transfer_class("B")],
}
})
})
);
} else {
assert!(false, "Unkown command {:?}", command)
}
}
} | pub mod subdomain;
pub mod tail;
pub mod whoami;
| random_line_split |
models.py | """showCrime.dailyIncid.models.py:
updated 23 Mar 17: include GIS,
updated 20 Jun 17: combine dateTime, add source
updated 17 Oct 17: incorporate dailyLog incident data
updated 14 Jul 18: add OPDBeats, CensusTracts
updated 8 Aug 19: add new CrimeCatCDMatch, PC2CC
updated 15 Aug 19: add new OCUpdate for audit
capture socrata's :updated_at meta variable
BoxID instead of JSON files
DailyParse instead of JSON files
"""
__author__ = "rik@electronicArtifacts.com"
__version__ = "0.41"
from django.contrib.gis.db import models
from django.contrib.postgres.fields import JSONField
## Stick global variables in models
DateFormat = '%Y-%m-%d'
TimeFormat = '%H:%M:%S'
MinDateStr = '2007-01-01'
MaxDateStr = '2017-12-31'
class OakCrime(models.Model):
# non-NULL fields
idx = models.AutoField(primary_key=True)
opd_rd = models.CharField(max_length=10,db_index=True)
oidx = models.IntegerField()
# socrataDT: last time incident updated with socrata data; null if source is PatrolLog only
socrataDT = models.DateTimeField(null=True)
cdateTime = models.DateTimeField(db_index=True)
# list of all source_date in chron order, separated by +
source = models.CharField(max_length=500)
# NULL ok fields
ctype = models.CharField(max_length=100,blank=True,null=True)
desc = models.CharField(max_length=200,blank=True,null=True)
# beat from OPD (vs. geobeat determined by geo query)
beat = models.CharField(max_length=20,blank=True,null=True)
addr = models.CharField(max_length=100,blank=True,null=True)
xlng = models.FloatField(null=True)
ylat = models.FloatField(null=True)
# Defaults to SRID=4326 (aka WGS84)
# units are in degrees of longitude and latitude
point = models.PointField(null=True)
ucr = models.CharField(max_length=5,blank=True,null=True)
statute = models.CharField(max_length=50,blank=True,null=True)
crimeCat = models.CharField(max_length=50,blank=True,null=True,db_index=True)
lastModDateTime = models.DateTimeField(auto_now=True)
# derived geo attributes
zip = models.CharField(max_length=5,blank=True,null=True)
# beat as determined by geo query (vs. beat from OPD)
# 2do
geobeat = models.CharField(max_length=3,blank=True,null=True)
# full geoid = 06001423300, name=4233, tracttce=423300
# (CA-AlamedaCty-specific) longest census tract name < 10 char, eg "4062.01"
ctractGeoID = models.CharField(max_length=11,blank=True,null=True)
# Precincts_AlamedaCounty range from 200100-880600
precinct = models.IntegerField(blank=True,null=True)
## dailyLog fields
dlogData = models.NullBooleanField(blank=True,null=True) # indicating data from dailyLog
lossList = models.CharField(max_length=200,blank=True,null=True) # list of lost items
gswP = models.NullBooleanField(blank=True,null=True) # gun shot wound
weapon = models.CharField(max_length=50,blank=True,null=True)
callout = models.CharField(max_length=50,blank=True,null=True) # 'yes:' + reg
ncustody = models.IntegerField(blank=True,null=True)
nsuspect = models.IntegerField(blank=True,null=True)
nvictim = models.IntegerField(blank=True,null=True)
nhospital = models.IntegerField(blank=True,null=True)
roList = models.CharField(max_length=200,blank=True,null=True)
pcList = models.CharField(max_length=200,blank=True,null=True)
def __unicode__(self):
return '%d:%s' % (self.idx,self.opd_rd)
class OCUpdate(models.Model):
# audit trail of changes made to dailyIncid after initial posting
idx = models.AutoField(primary_key=True)
# 2do: replace with reference to foreign OakCrime instance
opd_rd = models.CharField(max_length=10,db_index=True)
oidx = models.IntegerField(default=0)
newSrc = models.CharField(max_length=50)
# cf harvestSocrata
# modifiableFields = ('cdateTime', 'ctype', 'desc', 'beat', 'addr','point', 'crimeCat')
fieldName = models.CharField(max_length=20)
prevVal = models.CharField(max_length=200)
newVal = models.CharField(max_length=200)
# HACK null=True required for legacy testing OCUpdate
prevSocDT = models.DateTimeField(null=True)
newSocDT = models.DateTimeField(null=True)
lastModDateTime = models.DateTimeField(auto_now=True)
class CrimeCat(models.Model):
idx = models.AutoField(primary_key=True)
crimeCat = models.CharField(max_length=50)
class CrimeCatMatch(models.Model):
# Rules for matching crime type and/or description --> CrimeCat
MatchTypes = ( ('cd', 'CrimeType+Desc'), ('c', 'CrimeType'), ('d', 'Desc'),)
idx = models.AutoField(primary_key=True)
matchType = models.CharField(max_length=2,choices=MatchTypes)
ctype = models.CharField(max_length=100,db_index=True)
desc = models.CharField(max_length=100,db_index=True)
crimeCat = models.CharField(max_length=50)
class PC2CC(models.Model):
# Penal Code -> CrimeCat
idx = models.AutoField(primary_key=True)
pc = models.CharField(max_length=30)
crimeCat = models.CharField(max_length=100)
class TargetPlace(models.Model):
'''specific places to be selected for crimes nearby
'''
placeType = models.CharField(max_length=20)
ylat = models.FloatField()
xlng = models.FloatField()
name = models.CharField(max_length=254)
desc = models.CharField(max_length=254)
def __unicode__(self):
return '%s' % (self.desc)
# 170329
# python manage.py ogrinspect /Data/sharedData/c4a_oakland/OAK_data/maps_oakland/tl_2010_06_zcta510/tl_2010_06_zcta510.shp Zip5Geo --multi --mapping
class Zip5Geo(models.Model):
statefp10 = models.CharField(max_length=2)
zcta5ce10 = models.CharField(max_length=5)
geoid10 = models.CharField(max_length=7)
classfp10 = models.CharField(max_length=2)
mtfcc10 = models.CharField(max_length=5)
funcstat10 = models.CharField(max_length=1)
aland10 = models.FloatField()
awater10 = models.FloatField()
intptlat10 = models.CharField(max_length=11)
intptlon10 = models.CharField(max_length=12)
partflg10 = models.CharField(max_length=1)
geom = models.MultiPolygonField(srid=4326)
def __str__(self): # __unicode__ on Python 2
return 'zcta5ce10: %s' % self.zcta5ce10
# Auto-generated `LayerMapping` dictionary for Zip5Geo model
Zip5Geozip5geo_mapping = {
'statefp10' : 'STATEFP10',
'zcta5ce10' : 'ZCTA5CE10',
'geoid10' : 'GEOID10',
'classfp10' : 'CLASSFP10',
'mtfcc10' : 'MTFCC10',
'funcstat10' : 'FUNCSTAT10',
'aland10' : 'ALAND10',
'awater10' : 'AWATER10',
'intptlat10' : 'INTPTLAT10',
'intptlon10' : 'INTPTLON10',
'partflg10' : 'PARTFLG10',
'geom' : 'MULTIPOLYGON',
}
# 180712
# python manage.py ogrinspect /Data/c4a-Data/OAK_data/maps_oakland/beats-shp OPDBeatMap --srid=4326 --mapping --multi
class OPDBeatMap(models.Model):
name = models.CharField(max_length=254)
# descriptio = models.CharField(max_length=254)
# timestamp = models.CharField(max_length=254)
# begin = models.CharField(max_length=254)
# end = models.CharField(max_length=254)
# altitudemo = models.CharField(max_length=254)
# tessellate = models.BigIntegerField() # constant=-1
# extrude = models.BigIntegerField() # constant=-1
# visibility = models.BigIntegerField() # constant=-1
# draworder = models.CharField(max_length=254)
# icon = models.CharField(max_length=254)
# name_1 = models.CharField(max_length=254)
objectid = models.CharField(max_length=254)
cp_beat = models.CharField(max_length=254)
pol_beat = models.CharField(max_length=254)
pol_dist = models.CharField(max_length=254)
pol_sect = models.CharField(max_length=254)
beatid = models.CharField(max_length=254)
# action = models.CharField(max_length=254) # constant="P"
# agency = models.CharField(max_length=254) # constant="OP"
# message = models.CharField(max_length=254) # constant="0"
# sourcethm = models.CharField(max_length=254) # constant="Pb"
acres = models.CharField(max_length=254)
shape_area = models.CharField(max_length=254)
shape_len = models.CharField(max_length=254)
geom = models.MultiPolygonField(srid=4326)
# Auto-generated `LayerMapping` dictionary for OPDBeatMap model
OPDBeatmap_mapping = {
'name': 'Name',
# 'descriptio': 'descriptio',
# 'timestamp': 'timestamp',
# 'begin': 'begin',
# 'end': 'end',
# 'altitudemo': 'altitudeMo',
# 'tessellate': 'tessellate',
# 'extrude': 'extrude',
# 'visibility': 'visibility',
# 'draworder': 'drawOrder',
# 'icon': 'icon',
# 'name_1': 'Name_1',
# 'objectid': 'OBJECTID',
'cp_beat': 'CP_BEAT',
'pol_beat': 'POL_BEAT',
'pol_dist': 'POL_DIST',
'pol_sect': 'POL_SECT',
'beatid': 'ID',
# 'action': 'ACTION',
# 'agency': 'AGENCY',
# 'message': 'MESSAGE',
# 'sourcethm': 'SOURCETHM',
'acres': 'ACRES',
'shape_area': 'SHAPE_AREA',
'shape_len': 'SHAPE_LEN',
'geom': 'MULTIPOLYGON',
}
# 180712
# python manage.py ogrinspect /Data/c4a-Data/OAK_data/maps_oakland/cb_2015_06_tract_500k/cb_2015_06_tract_500k.shp CensusTract --srid=4269 --mapping --multi
class CensusTract(models.Model):
statefp = models.CharField(max_length=2)
countyfp = models.CharField(max_length=3)
tractce = models.CharField(max_length=6)
affgeoid = models.CharField(max_length=20)
geoid = models.CharField(max_length=11)
name = models.CharField(max_length=100)
lsad = models.CharField(max_length=2)
aland = models.BigIntegerField()
awater = models.BigIntegerField()
geom = models.MultiPolygonField(srid=4269)
# Auto-generated `LayerMapping` dictionary for CensusTract model
Censustract_mapping = {
'statefp': 'STATEFP',
'countyfp': 'COUNTYFP',
'tractce': 'TRACTCE',
'affgeoid': 'AFFGEOID',
'geoid': 'GEOID',
'name': 'NAME',
'lsad': 'LSAD',
'aland': 'ALAND',
'awater': 'AWATER',
'geom': 'MULTIPOLYGON',
}
# 190817
# support BoxID info ala boxIDTbl = {'root': {'id': OPDPatrolFolderID,'kids': []} }
class BoxID(models.Model):
|
# capture results of daily log file's parse
# cf. collectDailyLogs()
class DailyParse(models.Model):
idx = models.AutoField(primary_key=True)
boxobj = models.ForeignKey('boxid',on_delete=models.CASCADE)
froot = models.CharField(max_length=100,db_index=True)
parseDT = models.DateTimeField(null=True)
parseOrder = models.IntegerField(default=0)
opd_rd = models.CharField(max_length=10,db_index=True)
incidDT = models.DateTimeField(null=True)
parseDict = JSONField(default=dict)
| idx = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
boxidx = models.BigIntegerField(db_index=True)
boxModDT = models.DateTimeField()
# boxType = models.CharField(max_length=10) # folder,file
kids = models.ManyToManyField('self', symmetrical=False,related_name='parent')
froot = models.CharField(max_length=100,db_index=True)
harvestDT = models.DateTimeField(null=True)
parseDT = models.DateTimeField(null=True) | identifier_body |
models.py | """showCrime.dailyIncid.models.py:
updated 23 Mar 17: include GIS,
updated 20 Jun 17: combine dateTime, add source
updated 17 Oct 17: incorporate dailyLog incident data
updated 14 Jul 18: add OPDBeats, CensusTracts
updated 8 Aug 19: add new CrimeCatCDMatch, PC2CC
updated 15 Aug 19: add new OCUpdate for audit
capture socrata's :updated_at meta variable
BoxID instead of JSON files
DailyParse instead of JSON files
"""
__author__ = "rik@electronicArtifacts.com"
__version__ = "0.41"
from django.contrib.gis.db import models
from django.contrib.postgres.fields import JSONField
## Stick global variables in models
DateFormat = '%Y-%m-%d'
TimeFormat = '%H:%M:%S'
MinDateStr = '2007-01-01'
MaxDateStr = '2017-12-31'
class OakCrime(models.Model):
# non-NULL fields
idx = models.AutoField(primary_key=True)
opd_rd = models.CharField(max_length=10,db_index=True)
oidx = models.IntegerField()
# socrataDT: last time incident updated with socrata data; null if source is PatrolLog only
socrataDT = models.DateTimeField(null=True)
cdateTime = models.DateTimeField(db_index=True)
# list of all source_date in chron order, separated by +
source = models.CharField(max_length=500)
# NULL ok fields
ctype = models.CharField(max_length=100,blank=True,null=True)
desc = models.CharField(max_length=200,blank=True,null=True)
# beat from OPD (vs. geobeat determined by geo query)
beat = models.CharField(max_length=20,blank=True,null=True)
addr = models.CharField(max_length=100,blank=True,null=True)
xlng = models.FloatField(null=True)
ylat = models.FloatField(null=True)
# Defaults to SRID=4326 (aka WGS84)
# units are in degrees of longitude and latitude
point = models.PointField(null=True)
ucr = models.CharField(max_length=5,blank=True,null=True)
statute = models.CharField(max_length=50,blank=True,null=True)
crimeCat = models.CharField(max_length=50,blank=True,null=True,db_index=True)
lastModDateTime = models.DateTimeField(auto_now=True)
# derived geo attributes
zip = models.CharField(max_length=5,blank=True,null=True)
# beat as determined by geo query (vs. beat from OPD)
# 2do
geobeat = models.CharField(max_length=3,blank=True,null=True)
# full geoid = 06001423300, name=4233, tracttce=423300
# (CA-AlamedaCty-specific) longest census tract name < 10 char, eg "4062.01"
ctractGeoID = models.CharField(max_length=11,blank=True,null=True)
# Precincts_AlamedaCounty range from 200100-880600
precinct = models.IntegerField(blank=True,null=True)
## dailyLog fields
dlogData = models.NullBooleanField(blank=True,null=True) # indicating data from dailyLog
lossList = models.CharField(max_length=200,blank=True,null=True) # list of lost items
gswP = models.NullBooleanField(blank=True,null=True) # gun shot wound
weapon = models.CharField(max_length=50,blank=True,null=True)
callout = models.CharField(max_length=50,blank=True,null=True) # 'yes:' + reg
ncustody = models.IntegerField(blank=True,null=True)
nsuspect = models.IntegerField(blank=True,null=True)
nvictim = models.IntegerField(blank=True,null=True)
nhospital = models.IntegerField(blank=True,null=True)
roList = models.CharField(max_length=200,blank=True,null=True)
pcList = models.CharField(max_length=200,blank=True,null=True)
def __unicode__(self):
return '%d:%s' % (self.idx,self.opd_rd)
class OCUpdate(models.Model):
# audit trail of changes made to dailyIncid after initial posting
idx = models.AutoField(primary_key=True)
# 2do: replace with reference to foreign OakCrime instance
opd_rd = models.CharField(max_length=10,db_index=True)
oidx = models.IntegerField(default=0)
newSrc = models.CharField(max_length=50)
# cf harvestSocrata
# modifiableFields = ('cdateTime', 'ctype', 'desc', 'beat', 'addr','point', 'crimeCat')
fieldName = models.CharField(max_length=20)
prevVal = models.CharField(max_length=200)
newVal = models.CharField(max_length=200)
# HACK null=True required for legacy testing OCUpdate
prevSocDT = models.DateTimeField(null=True)
newSocDT = models.DateTimeField(null=True)
lastModDateTime = models.DateTimeField(auto_now=True)
class CrimeCat(models.Model):
idx = models.AutoField(primary_key=True)
crimeCat = models.CharField(max_length=50)
class CrimeCatMatch(models.Model):
# Rules for matching crime type and/or description --> CrimeCat
MatchTypes = ( ('cd', 'CrimeType+Desc'), ('c', 'CrimeType'), ('d', 'Desc'),)
idx = models.AutoField(primary_key=True)
matchType = models.CharField(max_length=2,choices=MatchTypes)
ctype = models.CharField(max_length=100,db_index=True)
desc = models.CharField(max_length=100,db_index=True)
crimeCat = models.CharField(max_length=50)
class PC2CC(models.Model):
# Penal Code -> CrimeCat
idx = models.AutoField(primary_key=True)
pc = models.CharField(max_length=30)
crimeCat = models.CharField(max_length=100)
class TargetPlace(models.Model):
'''specific places to be selected for crimes nearby
'''
placeType = models.CharField(max_length=20)
ylat = models.FloatField()
xlng = models.FloatField()
name = models.CharField(max_length=254)
desc = models.CharField(max_length=254)
def | (self):
return '%s' % (self.desc)
# 170329
# python manage.py ogrinspect /Data/sharedData/c4a_oakland/OAK_data/maps_oakland/tl_2010_06_zcta510/tl_2010_06_zcta510.shp Zip5Geo --multi --mapping
class Zip5Geo(models.Model):
statefp10 = models.CharField(max_length=2)
zcta5ce10 = models.CharField(max_length=5)
geoid10 = models.CharField(max_length=7)
classfp10 = models.CharField(max_length=2)
mtfcc10 = models.CharField(max_length=5)
funcstat10 = models.CharField(max_length=1)
aland10 = models.FloatField()
awater10 = models.FloatField()
intptlat10 = models.CharField(max_length=11)
intptlon10 = models.CharField(max_length=12)
partflg10 = models.CharField(max_length=1)
geom = models.MultiPolygonField(srid=4326)
def __str__(self): # __unicode__ on Python 2
return 'zcta5ce10: %s' % self.zcta5ce10
# Auto-generated `LayerMapping` dictionary for Zip5Geo model
Zip5Geozip5geo_mapping = {
'statefp10' : 'STATEFP10',
'zcta5ce10' : 'ZCTA5CE10',
'geoid10' : 'GEOID10',
'classfp10' : 'CLASSFP10',
'mtfcc10' : 'MTFCC10',
'funcstat10' : 'FUNCSTAT10',
'aland10' : 'ALAND10',
'awater10' : 'AWATER10',
'intptlat10' : 'INTPTLAT10',
'intptlon10' : 'INTPTLON10',
'partflg10' : 'PARTFLG10',
'geom' : 'MULTIPOLYGON',
}
# 180712
# python manage.py ogrinspect /Data/c4a-Data/OAK_data/maps_oakland/beats-shp OPDBeatMap --srid=4326 --mapping --multi
class OPDBeatMap(models.Model):
name = models.CharField(max_length=254)
# descriptio = models.CharField(max_length=254)
# timestamp = models.CharField(max_length=254)
# begin = models.CharField(max_length=254)
# end = models.CharField(max_length=254)
# altitudemo = models.CharField(max_length=254)
# tessellate = models.BigIntegerField() # constant=-1
# extrude = models.BigIntegerField() # constant=-1
# visibility = models.BigIntegerField() # constant=-1
# draworder = models.CharField(max_length=254)
# icon = models.CharField(max_length=254)
# name_1 = models.CharField(max_length=254)
objectid = models.CharField(max_length=254)
cp_beat = models.CharField(max_length=254)
pol_beat = models.CharField(max_length=254)
pol_dist = models.CharField(max_length=254)
pol_sect = models.CharField(max_length=254)
beatid = models.CharField(max_length=254)
# action = models.CharField(max_length=254) # constant="P"
# agency = models.CharField(max_length=254) # constant="OP"
# message = models.CharField(max_length=254) # constant="0"
# sourcethm = models.CharField(max_length=254) # constant="Pb"
acres = models.CharField(max_length=254)
shape_area = models.CharField(max_length=254)
shape_len = models.CharField(max_length=254)
geom = models.MultiPolygonField(srid=4326)
# Auto-generated `LayerMapping` dictionary for OPDBeatMap model
OPDBeatmap_mapping = {
'name': 'Name',
# 'descriptio': 'descriptio',
# 'timestamp': 'timestamp',
# 'begin': 'begin',
# 'end': 'end',
# 'altitudemo': 'altitudeMo',
# 'tessellate': 'tessellate',
# 'extrude': 'extrude',
# 'visibility': 'visibility',
# 'draworder': 'drawOrder',
# 'icon': 'icon',
# 'name_1': 'Name_1',
# 'objectid': 'OBJECTID',
'cp_beat': 'CP_BEAT',
'pol_beat': 'POL_BEAT',
'pol_dist': 'POL_DIST',
'pol_sect': 'POL_SECT',
'beatid': 'ID',
# 'action': 'ACTION',
# 'agency': 'AGENCY',
# 'message': 'MESSAGE',
# 'sourcethm': 'SOURCETHM',
'acres': 'ACRES',
'shape_area': 'SHAPE_AREA',
'shape_len': 'SHAPE_LEN',
'geom': 'MULTIPOLYGON',
}
# 180712
# python manage.py ogrinspect /Data/c4a-Data/OAK_data/maps_oakland/cb_2015_06_tract_500k/cb_2015_06_tract_500k.shp CensusTract --srid=4269 --mapping --multi
class CensusTract(models.Model):
statefp = models.CharField(max_length=2)
countyfp = models.CharField(max_length=3)
tractce = models.CharField(max_length=6)
affgeoid = models.CharField(max_length=20)
geoid = models.CharField(max_length=11)
name = models.CharField(max_length=100)
lsad = models.CharField(max_length=2)
aland = models.BigIntegerField()
awater = models.BigIntegerField()
geom = models.MultiPolygonField(srid=4269)
# Auto-generated `LayerMapping` dictionary for CensusTract model
Censustract_mapping = {
'statefp': 'STATEFP',
'countyfp': 'COUNTYFP',
'tractce': 'TRACTCE',
'affgeoid': 'AFFGEOID',
'geoid': 'GEOID',
'name': 'NAME',
'lsad': 'LSAD',
'aland': 'ALAND',
'awater': 'AWATER',
'geom': 'MULTIPOLYGON',
}
# 190817
# support BoxID info ala boxIDTbl = {'root': {'id': OPDPatrolFolderID,'kids': []} }
class BoxID(models.Model):
idx = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
boxidx = models.BigIntegerField(db_index=True)
boxModDT = models.DateTimeField()
# boxType = models.CharField(max_length=10) # folder,file
kids = models.ManyToManyField('self', symmetrical=False,related_name='parent')
froot = models.CharField(max_length=100,db_index=True)
harvestDT = models.DateTimeField(null=True)
parseDT = models.DateTimeField(null=True)
# capture results of daily log file's parse
# cf. collectDailyLogs()
class DailyParse(models.Model):
idx = models.AutoField(primary_key=True)
boxobj = models.ForeignKey('boxid',on_delete=models.CASCADE)
froot = models.CharField(max_length=100,db_index=True)
parseDT = models.DateTimeField(null=True)
parseOrder = models.IntegerField(default=0)
opd_rd = models.CharField(max_length=10,db_index=True)
incidDT = models.DateTimeField(null=True)
parseDict = JSONField(default=dict)
| __unicode__ | identifier_name |
models.py | """showCrime.dailyIncid.models.py:
updated 23 Mar 17: include GIS,
updated 20 Jun 17: combine dateTime, add source
updated 17 Oct 17: incorporate dailyLog incident data
updated 14 Jul 18: add OPDBeats, CensusTracts
updated 8 Aug 19: add new CrimeCatCDMatch, PC2CC
updated 15 Aug 19: add new OCUpdate for audit
capture socrata's :updated_at meta variable
BoxID instead of JSON files
DailyParse instead of JSON files
"""
__author__ = "rik@electronicArtifacts.com"
__version__ = "0.41"
from django.contrib.gis.db import models
from django.contrib.postgres.fields import JSONField
## Stick global variables in models
DateFormat = '%Y-%m-%d'
TimeFormat = '%H:%M:%S'
MinDateStr = '2007-01-01'
MaxDateStr = '2017-12-31'
class OakCrime(models.Model):
# non-NULL fields
idx = models.AutoField(primary_key=True)
opd_rd = models.CharField(max_length=10,db_index=True)
oidx = models.IntegerField()
# socrataDT: last time incident updated with socrata data; null if source is PatrolLog only
socrataDT = models.DateTimeField(null=True)
cdateTime = models.DateTimeField(db_index=True)
# list of all source_date in chron order, separated by +
source = models.CharField(max_length=500)
# NULL ok fields
ctype = models.CharField(max_length=100,blank=True,null=True)
desc = models.CharField(max_length=200,blank=True,null=True)
# beat from OPD (vs. geobeat determined by geo query)
beat = models.CharField(max_length=20,blank=True,null=True)
addr = models.CharField(max_length=100,blank=True,null=True)
xlng = models.FloatField(null=True)
ylat = models.FloatField(null=True)
# Defaults to SRID=4326 (aka WGS84)
# units are in degrees of longitude and latitude
point = models.PointField(null=True)
ucr = models.CharField(max_length=5,blank=True,null=True)
statute = models.CharField(max_length=50,blank=True,null=True)
crimeCat = models.CharField(max_length=50,blank=True,null=True,db_index=True)
lastModDateTime = models.DateTimeField(auto_now=True)
# derived geo attributes
zip = models.CharField(max_length=5,blank=True,null=True)
# beat as determined by geo query (vs. beat from OPD)
# 2do
geobeat = models.CharField(max_length=3,blank=True,null=True)
# full geoid = 06001423300, name=4233, tracttce=423300
# (CA-AlamedaCty-specific) longest census tract name < 10 char, eg "4062.01"
ctractGeoID = models.CharField(max_length=11,blank=True,null=True)
# Precincts_AlamedaCounty range from 200100-880600
precinct = models.IntegerField(blank=True,null=True)
## dailyLog fields
dlogData = models.NullBooleanField(blank=True,null=True) # indicating data from dailyLog
lossList = models.CharField(max_length=200,blank=True,null=True) # list of lost items
gswP = models.NullBooleanField(blank=True,null=True) # gun shot wound
weapon = models.CharField(max_length=50,blank=True,null=True)
callout = models.CharField(max_length=50,blank=True,null=True) # 'yes:' + reg
ncustody = models.IntegerField(blank=True,null=True) | nsuspect = models.IntegerField(blank=True,null=True)
nvictim = models.IntegerField(blank=True,null=True)
nhospital = models.IntegerField(blank=True,null=True)
roList = models.CharField(max_length=200,blank=True,null=True)
pcList = models.CharField(max_length=200,blank=True,null=True)
def __unicode__(self):
return '%d:%s' % (self.idx,self.opd_rd)
class OCUpdate(models.Model):
# audit trail of changes made to dailyIncid after initial posting
idx = models.AutoField(primary_key=True)
# 2do: replace with reference to foreign OakCrime instance
opd_rd = models.CharField(max_length=10,db_index=True)
oidx = models.IntegerField(default=0)
newSrc = models.CharField(max_length=50)
# cf harvestSocrata
# modifiableFields = ('cdateTime', 'ctype', 'desc', 'beat', 'addr','point', 'crimeCat')
fieldName = models.CharField(max_length=20)
prevVal = models.CharField(max_length=200)
newVal = models.CharField(max_length=200)
# HACK null=True required for legacy testing OCUpdate
prevSocDT = models.DateTimeField(null=True)
newSocDT = models.DateTimeField(null=True)
lastModDateTime = models.DateTimeField(auto_now=True)
class CrimeCat(models.Model):
idx = models.AutoField(primary_key=True)
crimeCat = models.CharField(max_length=50)
class CrimeCatMatch(models.Model):
# Rules for matching crime type and/or description --> CrimeCat
MatchTypes = ( ('cd', 'CrimeType+Desc'), ('c', 'CrimeType'), ('d', 'Desc'),)
idx = models.AutoField(primary_key=True)
matchType = models.CharField(max_length=2,choices=MatchTypes)
ctype = models.CharField(max_length=100,db_index=True)
desc = models.CharField(max_length=100,db_index=True)
crimeCat = models.CharField(max_length=50)
class PC2CC(models.Model):
# Penal Code -> CrimeCat
idx = models.AutoField(primary_key=True)
pc = models.CharField(max_length=30)
crimeCat = models.CharField(max_length=100)
class TargetPlace(models.Model):
'''specific places to be selected for crimes nearby
'''
placeType = models.CharField(max_length=20)
ylat = models.FloatField()
xlng = models.FloatField()
name = models.CharField(max_length=254)
desc = models.CharField(max_length=254)
def __unicode__(self):
return '%s' % (self.desc)
# 170329
# python manage.py ogrinspect /Data/sharedData/c4a_oakland/OAK_data/maps_oakland/tl_2010_06_zcta510/tl_2010_06_zcta510.shp Zip5Geo --multi --mapping
class Zip5Geo(models.Model):
statefp10 = models.CharField(max_length=2)
zcta5ce10 = models.CharField(max_length=5)
geoid10 = models.CharField(max_length=7)
classfp10 = models.CharField(max_length=2)
mtfcc10 = models.CharField(max_length=5)
funcstat10 = models.CharField(max_length=1)
aland10 = models.FloatField()
awater10 = models.FloatField()
intptlat10 = models.CharField(max_length=11)
intptlon10 = models.CharField(max_length=12)
partflg10 = models.CharField(max_length=1)
geom = models.MultiPolygonField(srid=4326)
def __str__(self): # __unicode__ on Python 2
return 'zcta5ce10: %s' % self.zcta5ce10
# Auto-generated `LayerMapping` dictionary for Zip5Geo model
Zip5Geozip5geo_mapping = {
'statefp10' : 'STATEFP10',
'zcta5ce10' : 'ZCTA5CE10',
'geoid10' : 'GEOID10',
'classfp10' : 'CLASSFP10',
'mtfcc10' : 'MTFCC10',
'funcstat10' : 'FUNCSTAT10',
'aland10' : 'ALAND10',
'awater10' : 'AWATER10',
'intptlat10' : 'INTPTLAT10',
'intptlon10' : 'INTPTLON10',
'partflg10' : 'PARTFLG10',
'geom' : 'MULTIPOLYGON',
}
# 180712
# python manage.py ogrinspect /Data/c4a-Data/OAK_data/maps_oakland/beats-shp OPDBeatMap --srid=4326 --mapping --multi
class OPDBeatMap(models.Model):
name = models.CharField(max_length=254)
# descriptio = models.CharField(max_length=254)
# timestamp = models.CharField(max_length=254)
# begin = models.CharField(max_length=254)
# end = models.CharField(max_length=254)
# altitudemo = models.CharField(max_length=254)
# tessellate = models.BigIntegerField() # constant=-1
# extrude = models.BigIntegerField() # constant=-1
# visibility = models.BigIntegerField() # constant=-1
# draworder = models.CharField(max_length=254)
# icon = models.CharField(max_length=254)
# name_1 = models.CharField(max_length=254)
objectid = models.CharField(max_length=254)
cp_beat = models.CharField(max_length=254)
pol_beat = models.CharField(max_length=254)
pol_dist = models.CharField(max_length=254)
pol_sect = models.CharField(max_length=254)
beatid = models.CharField(max_length=254)
# action = models.CharField(max_length=254) # constant="P"
# agency = models.CharField(max_length=254) # constant="OP"
# message = models.CharField(max_length=254) # constant="0"
# sourcethm = models.CharField(max_length=254) # constant="Pb"
acres = models.CharField(max_length=254)
shape_area = models.CharField(max_length=254)
shape_len = models.CharField(max_length=254)
geom = models.MultiPolygonField(srid=4326)
# Auto-generated `LayerMapping` dictionary for OPDBeatMap model
OPDBeatmap_mapping = {
'name': 'Name',
# 'descriptio': 'descriptio',
# 'timestamp': 'timestamp',
# 'begin': 'begin',
# 'end': 'end',
# 'altitudemo': 'altitudeMo',
# 'tessellate': 'tessellate',
# 'extrude': 'extrude',
# 'visibility': 'visibility',
# 'draworder': 'drawOrder',
# 'icon': 'icon',
# 'name_1': 'Name_1',
# 'objectid': 'OBJECTID',
'cp_beat': 'CP_BEAT',
'pol_beat': 'POL_BEAT',
'pol_dist': 'POL_DIST',
'pol_sect': 'POL_SECT',
'beatid': 'ID',
# 'action': 'ACTION',
# 'agency': 'AGENCY',
# 'message': 'MESSAGE',
# 'sourcethm': 'SOURCETHM',
'acres': 'ACRES',
'shape_area': 'SHAPE_AREA',
'shape_len': 'SHAPE_LEN',
'geom': 'MULTIPOLYGON',
}
# 180712
# python manage.py ogrinspect /Data/c4a-Data/OAK_data/maps_oakland/cb_2015_06_tract_500k/cb_2015_06_tract_500k.shp CensusTract --srid=4269 --mapping --multi
class CensusTract(models.Model):
statefp = models.CharField(max_length=2)
countyfp = models.CharField(max_length=3)
tractce = models.CharField(max_length=6)
affgeoid = models.CharField(max_length=20)
geoid = models.CharField(max_length=11)
name = models.CharField(max_length=100)
lsad = models.CharField(max_length=2)
aland = models.BigIntegerField()
awater = models.BigIntegerField()
geom = models.MultiPolygonField(srid=4269)
# Auto-generated `LayerMapping` dictionary for CensusTract model
Censustract_mapping = {
'statefp': 'STATEFP',
'countyfp': 'COUNTYFP',
'tractce': 'TRACTCE',
'affgeoid': 'AFFGEOID',
'geoid': 'GEOID',
'name': 'NAME',
'lsad': 'LSAD',
'aland': 'ALAND',
'awater': 'AWATER',
'geom': 'MULTIPOLYGON',
}
# 190817
# support BoxID info ala boxIDTbl = {'root': {'id': OPDPatrolFolderID,'kids': []} }
class BoxID(models.Model):
idx = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
boxidx = models.BigIntegerField(db_index=True)
boxModDT = models.DateTimeField()
# boxType = models.CharField(max_length=10) # folder,file
kids = models.ManyToManyField('self', symmetrical=False,related_name='parent')
froot = models.CharField(max_length=100,db_index=True)
harvestDT = models.DateTimeField(null=True)
parseDT = models.DateTimeField(null=True)
# capture results of daily log file's parse
# cf. collectDailyLogs()
class DailyParse(models.Model):
idx = models.AutoField(primary_key=True)
boxobj = models.ForeignKey('boxid',on_delete=models.CASCADE)
froot = models.CharField(max_length=100,db_index=True)
parseDT = models.DateTimeField(null=True)
parseOrder = models.IntegerField(default=0)
opd_rd = models.CharField(max_length=10,db_index=True)
incidDT = models.DateTimeField(null=True)
parseDict = JSONField(default=dict) | random_line_split | |
test.rs | // Utilities for creating test cases.
use core::marker::PhantomData;
use std::time::{Duration, Instant};
use std::vec::Vec;
use common::errors::*;
use crate::random::Rng;
/*
To verify no timing leaks:
- Run any many different input sizes.
- Verify compiler optimizations aren't making looping over many iterations too fast
- Speed is linear w.r.t. num iterations.
- We can even experimentally determine if algorithms are linear, quadratic, etc.
- Compare average time between trials and make sure variance is lower than some constant.
TODO: Add test cases to verify that this can fail sometimes.
*/
#[derive(Default)]
pub struct TimingLeakTestCase<T> {
pub data: T,
pub size: usize,
}
/// Creates new test cases for the timing test.
pub trait TimingLeakTestCaseGenerator {
type Data;
/// Should generate a new test case and write it into 'out'.
/// - If any heap memory is involved, the new test case should re-use as
/// much of the memory buffers already in 'out' as possible to avoid any
/// memory location/cache related noise.
///
/// Returns true if a new test case was generated or false if we ran out of
/// test cases.
fn next_test_case(&mut self, out: &mut TimingLeakTestCase<Self::Data>) -> bool;
}
/// Function which runs some code being profiled once in the calling thread.
///
/// To avoid computations being pruned, this function should return the final
/// 'result' of the computation or some value that can only be determined after
/// the computation is done.
pub trait TimingLeakTestCaseRunner<T, R> = Fn(&T) -> Result<R>;
/// Test wrapper for executing some caller provided code to ensure that it
/// 'probably' executes in constant time regardless of which values are passed
/// to it.
///
/// We expect that the code should vary in execution time with the size of the
/// input data (number of bytes processed) but not with the contents of those
/// bytes.
///
/// This performs testing purely through black box methods so we can't necessary
/// fully gurantee that cache misses or branch mispredictions won't compromise
/// the security of the function.
///
/// The caller provides two things:
/// 1. A TimingLeakTestCaseGenerator which should create a wide range of inputs
/// to the code under test. There should be sufficient test cases to attempt to
/// make the code under test perform very quickly or slowly.
///
/// 2. A TimingLeakTestCaseRunner which takes a test case as input and runs the
/// code being profiled.
///
/// NOTE: This can not track any work performed on other threads.
pub struct TimingLeakTest<R, Gen, Runner> {
test_case_generator: Gen,
test_case_runner: Runner,
options: TimingLeakTestOptions,
r: PhantomData<R>,
}
pub struct TimingLeakTestOptions {
/// Number of separate sets of iterations we will run run the test case
/// runner for a single test case.
///
/// This must be a value that is at least 1. Setting a value > 1 will
/// 'measure' a single test case multiple times and will discard rounds that
/// are outliers (currently only the fasest round is considered).
pub num_rounds: usize,
/// Number of times the test case runner will be executed in a single round.
///
/// The total number of times it will be run is 'num_test_cases * num_rounds
/// * num_iterations'.
///
/// TODO: Automatically figure this out based on a target run time.
pub num_iterations: usize,
}
impl TimingLeakTest<(), (), ()> {
pub fn new_generator() -> TimingLeakTestBinaryGenericTestCaseGenerator {
TimingLeakTestBinaryGenericTestCaseGenerator::default()
}
}
impl<R, Gen: TimingLeakTestCaseGenerator, Runner: TimingLeakTestCaseRunner<Gen::Data, R>>
TimingLeakTest<R, Gen, Runner>
where
Gen::Data: Default,
{
pub fn new(
test_case_generator: Gen,
test_case_runner: Runner,
options: TimingLeakTestOptions,
) -> Self {
Self {
test_case_generator,
test_case_runner,
options,
r: PhantomData,
}
}
#[must_use]
pub fn run(&mut self) -> Result<()> {
let mut cycle_tracker = perf::CPUCycleTracker::create()?;
// Check how long it takes for us to just get the number of cycles executed.
let cycles_noise_floor = {
let mut floor = 0;
let mut last_cycles = cycle_tracker.total_cycles()?;
for _ in 0..10 {
let next_cycles = cycle_tracker.total_cycles()?;
floor = core::cmp::max(floor, (next_cycles - last_cycles));
last_cycles = next_cycles;
}
floor
};
let mut test_case_index = 0;
let mut time_stats = StatisticsTracker::new();
let mut cycle_stats = StatisticsTracker::new();
let mut test_case = TimingLeakTestCase::default();
while self.test_case_generator.next_test_case(&mut test_case) {
let mut case_time_stats = StatisticsTracker::new();
let mut case_cycle_stats = StatisticsTracker::new();
for _ in 0..self.options.num_rounds {
let start = Instant::now();
let start_cycles = cycle_tracker.total_cycles()?;
for _ in 0..self.options.num_iterations {
self.runner_wrap(&test_case.data);
}
let end_cycles = cycle_tracker.total_cycles()?;
let end = Instant::now();
let duration = end.duration_since(start);
let cycle_duration = end_cycles - start_cycles;
case_time_stats.update(duration);
case_cycle_stats.update(cycle_duration);
if cycle_duration < 100 * cycles_noise_floor {
return Err(format_err!(
"Cycle duration of {} too small relative to noise floor {}",
cycle_duration,
cycles_noise_floor
));
}
// If this is true, then most likely the test code was optimized out by the
// compiler.
if duration < Duration::from_millis(2) {
return Err(format_err!(
"Extremely short round execution time: {:?}",
duration
));
}
// println!(
// "Test case {}: {:?} : {}",
// test_case_index, duration, cycle_duration
// );
}
time_stats.update(case_time_stats.min.unwrap());
cycle_stats.update(case_cycle_stats.min.unwrap());
test_case_index += 1;
}
// TODO: Check that the min cycles time is much larger than the
let cycle_range = {
((cycle_stats.max.unwrap() - cycle_stats.min.unwrap()) as f64)
/ (cycle_stats.min.unwrap() as f64)
* 100.0
};
let time_range = {
let min = time_stats.min.unwrap().as_secs_f64();
let max = time_stats.max.unwrap().as_secs_f64();
(max - min) / min * 100.0
};
println!(
"- Fastest round: {:?} ({} cycles)",
time_stats.min.unwrap(),
cycle_stats.min.unwrap()
);
println!(
"- Fastest iteration: {:?}",
time_stats.min.unwrap() / (self.options.num_iterations as u32)
);
println!("- Cycles range: {:0.2}%", cycle_range);
println!("- Time range: {:0.2}%", time_range);
// Must have < 1% deviation across different test inputs.
if cycle_range > 1.0 {
return Err(format_err!(
"Cycle range between test cases too large: {:0.2}% > 1%",
cycle_range
));
}
Ok(())
}
/// Wrapper around the runner which can't be inlined to prevent
/// optimizations.
#[inline(never)]
fn runner_wrap(&self, test_case: &Gen::Data) -> Result<R> {
(self.test_case_runner)(test_case)
}
}
#[derive(Default)]
pub struct TimingLeakTestGenericTestCase {
inputs: Vec<Vec<u8>>,
}
impl TimingLeakTestGenericTestCase {
pub fn get_input(&self, idx: usize) -> &[u8] {
&self.inputs[idx]
}
}
#[derive(Default)]
pub struct TimingLeakTestBinaryGenericTestCaseGenerator {
inputs: Vec<Vec<Vec<u8>>>,
last_position: Option<Vec<usize>>,
}
impl TimingLeakTestBinaryGenericTestCaseGenerator {
pub fn add_input(&mut self, values: Vec<Vec<u8>>) -> usize {
let idx = self.inputs.len();
self.inputs.push(values);
idx
}
fn next_position(&self) -> Option<Vec<usize>> {
for i in &self.inputs {
assert!(i.len() > 0);
}
let mut cur = match self.last_position.clone() {
Some(v) => v,
None => return Some(vec![0; self.inputs.len()]),
};
for i in (0..cur.len()).rev() {
cur[i] += 1;
if cur[i] == self.inputs[i].len() {
cur[i] = 0;
} else {
return Some(cur);
}
}
None
}
}
impl TimingLeakTestCaseGenerator for TimingLeakTestBinaryGenericTestCaseGenerator {
type Data = TimingLeakTestGenericTestCase;
fn next_test_case(
&mut self,
out: &mut TimingLeakTestCase<TimingLeakTestGenericTestCase>,
) -> bool {
let pos = match self.next_position() {
Some(v) => v,
None => return false,
};
out.data.inputs.resize(pos.len(), vec![]);
for i in 0..pos.len() {
out.data.inputs[i].clear();
out.data.inputs[i].extend_from_slice(&self.inputs[i][pos[i]]);
}
self.last_position = Some(pos);
true
}
}
/// Generates synthetic data buffers whichare likely to trigger different edge
/// cases and time complexities in code that is sensitive (in terms of # of bits
/// set, magnitude, ...) to the value of the data passed it.
pub fn typical_boundary_buffers(length: usize) -> Vec<Vec<u8>> |
pub struct StatisticsTracker<T> {
min: Option<T>,
max: Option<T>,
}
impl<T: Ord + Copy> StatisticsTracker<T> {
pub fn new() -> Self {
Self {
min: None,
max: None,
}
}
pub fn update(&mut self, value: T) {
self.min = Some(match self.min {
Some(v) => core::cmp::min(v, value),
None => value,
});
self.max = Some(match self.max {
Some(v) => core::cmp::max(v, value),
None => value,
});
}
}
pub struct RollingMean {
sum: u64,
n: usize,
}
| {
let mut out = vec![];
// All zeros.
out.push(vec![0u8; length]);
// All 0xFF.
out.push(vec![0xFFu8; length]);
// First byte set to value #1
out.push({
let mut v = vec![0u8; length];
v[0] = 0xAB;
v
});
// First byte set to value #2
out.push({
let mut v = vec![0u8; length];
v[0] = 0xCD;
v
});
if length > 1 {
// Last byte set to value #1
out.push({
let mut v = vec![0u8; length];
*v.last_mut().unwrap() = 0x20;
v
});
// Last byte set to value #2
out.push({
let mut v = vec![0u8; length];
*v.last_mut().unwrap() = 0x03;
v
});
}
if length > 2 {
// Even bytes set.
out.push({
let mut v = vec![0u8; length];
for i in (0..v.len()).step_by(2) {
v[i] = i as u8
}
v
});
// Odd bytes set
out.push({
let mut v = vec![0u8; length];
for i in (1..v.len()).step_by(2) {
v[i] = i as u8
}
v
});
let mid_idx = length / 2;
// First half set
out.push({
let mut v = vec![0u8; length];
for i in 0..mid_idx {
v[i] = i as u8
}
v
});
// Second half set
out.push({
let mut v = vec![0u8; length];
for i in mid_idx..length {
v[i] = i as u8
}
v
});
}
let mut rng = crate::random::MersenneTwisterRng::mt19937();
rng.seed_u32(1234);
// A few random buffers.
for _ in 0..3 {
out.push({
let mut v = vec![0u8; length];
rng.generate_bytes(&mut v);
v
});
}
out
} | identifier_body |
test.rs | // Utilities for creating test cases.
use core::marker::PhantomData;
use std::time::{Duration, Instant};
use std::vec::Vec;
use common::errors::*;
use crate::random::Rng;
/*
To verify no timing leaks:
- Run any many different input sizes.
- Verify compiler optimizations aren't making looping over many iterations too fast
- Speed is linear w.r.t. num iterations.
- We can even experimentally determine if algorithms are linear, quadratic, etc.
- Compare average time between trials and make sure variance is lower than some constant.
TODO: Add test cases to verify that this can fail sometimes.
*/
#[derive(Default)]
pub struct TimingLeakTestCase<T> {
pub data: T,
pub size: usize,
}
/// Creates new test cases for the timing test.
pub trait TimingLeakTestCaseGenerator {
type Data;
/// Should generate a new test case and write it into 'out'.
/// - If any heap memory is involved, the new test case should re-use as
/// much of the memory buffers already in 'out' as possible to avoid any
/// memory location/cache related noise.
///
/// Returns true if a new test case was generated or false if we ran out of
/// test cases.
fn next_test_case(&mut self, out: &mut TimingLeakTestCase<Self::Data>) -> bool;
}
/// Function which runs some code being profiled once in the calling thread.
///
/// To avoid computations being pruned, this function should return the final
/// 'result' of the computation or some value that can only be determined after
/// the computation is done.
pub trait TimingLeakTestCaseRunner<T, R> = Fn(&T) -> Result<R>;
/// Test wrapper for executing some caller provided code to ensure that it
/// 'probably' executes in constant time regardless of which values are passed
/// to it.
///
/// We expect that the code should vary in execution time with the size of the
/// input data (number of bytes processed) but not with the contents of those
/// bytes.
///
/// This performs testing purely through black box methods so we can't necessary
/// fully gurantee that cache misses or branch mispredictions won't compromise
/// the security of the function.
///
/// The caller provides two things:
/// 1. A TimingLeakTestCaseGenerator which should create a wide range of inputs
/// to the code under test. There should be sufficient test cases to attempt to
/// make the code under test perform very quickly or slowly.
/// | /// code being profiled.
///
/// NOTE: This can not track any work performed on other threads.
pub struct TimingLeakTest<R, Gen, Runner> {
test_case_generator: Gen,
test_case_runner: Runner,
options: TimingLeakTestOptions,
r: PhantomData<R>,
}
pub struct TimingLeakTestOptions {
/// Number of separate sets of iterations we will run run the test case
/// runner for a single test case.
///
/// This must be a value that is at least 1. Setting a value > 1 will
/// 'measure' a single test case multiple times and will discard rounds that
/// are outliers (currently only the fasest round is considered).
pub num_rounds: usize,
/// Number of times the test case runner will be executed in a single round.
///
/// The total number of times it will be run is 'num_test_cases * num_rounds
/// * num_iterations'.
///
/// TODO: Automatically figure this out based on a target run time.
pub num_iterations: usize,
}
impl TimingLeakTest<(), (), ()> {
pub fn new_generator() -> TimingLeakTestBinaryGenericTestCaseGenerator {
TimingLeakTestBinaryGenericTestCaseGenerator::default()
}
}
impl<R, Gen: TimingLeakTestCaseGenerator, Runner: TimingLeakTestCaseRunner<Gen::Data, R>>
TimingLeakTest<R, Gen, Runner>
where
Gen::Data: Default,
{
pub fn new(
test_case_generator: Gen,
test_case_runner: Runner,
options: TimingLeakTestOptions,
) -> Self {
Self {
test_case_generator,
test_case_runner,
options,
r: PhantomData,
}
}
#[must_use]
pub fn run(&mut self) -> Result<()> {
let mut cycle_tracker = perf::CPUCycleTracker::create()?;
// Check how long it takes for us to just get the number of cycles executed.
let cycles_noise_floor = {
let mut floor = 0;
let mut last_cycles = cycle_tracker.total_cycles()?;
for _ in 0..10 {
let next_cycles = cycle_tracker.total_cycles()?;
floor = core::cmp::max(floor, (next_cycles - last_cycles));
last_cycles = next_cycles;
}
floor
};
let mut test_case_index = 0;
let mut time_stats = StatisticsTracker::new();
let mut cycle_stats = StatisticsTracker::new();
let mut test_case = TimingLeakTestCase::default();
while self.test_case_generator.next_test_case(&mut test_case) {
let mut case_time_stats = StatisticsTracker::new();
let mut case_cycle_stats = StatisticsTracker::new();
for _ in 0..self.options.num_rounds {
let start = Instant::now();
let start_cycles = cycle_tracker.total_cycles()?;
for _ in 0..self.options.num_iterations {
self.runner_wrap(&test_case.data);
}
let end_cycles = cycle_tracker.total_cycles()?;
let end = Instant::now();
let duration = end.duration_since(start);
let cycle_duration = end_cycles - start_cycles;
case_time_stats.update(duration);
case_cycle_stats.update(cycle_duration);
if cycle_duration < 100 * cycles_noise_floor {
return Err(format_err!(
"Cycle duration of {} too small relative to noise floor {}",
cycle_duration,
cycles_noise_floor
));
}
// If this is true, then most likely the test code was optimized out by the
// compiler.
if duration < Duration::from_millis(2) {
return Err(format_err!(
"Extremely short round execution time: {:?}",
duration
));
}
// println!(
// "Test case {}: {:?} : {}",
// test_case_index, duration, cycle_duration
// );
}
time_stats.update(case_time_stats.min.unwrap());
cycle_stats.update(case_cycle_stats.min.unwrap());
test_case_index += 1;
}
// TODO: Check that the min cycles time is much larger than the
let cycle_range = {
((cycle_stats.max.unwrap() - cycle_stats.min.unwrap()) as f64)
/ (cycle_stats.min.unwrap() as f64)
* 100.0
};
let time_range = {
let min = time_stats.min.unwrap().as_secs_f64();
let max = time_stats.max.unwrap().as_secs_f64();
(max - min) / min * 100.0
};
println!(
"- Fastest round: {:?} ({} cycles)",
time_stats.min.unwrap(),
cycle_stats.min.unwrap()
);
println!(
"- Fastest iteration: {:?}",
time_stats.min.unwrap() / (self.options.num_iterations as u32)
);
println!("- Cycles range: {:0.2}%", cycle_range);
println!("- Time range: {:0.2}%", time_range);
// Must have < 1% deviation across different test inputs.
if cycle_range > 1.0 {
return Err(format_err!(
"Cycle range between test cases too large: {:0.2}% > 1%",
cycle_range
));
}
Ok(())
}
/// Wrapper around the runner which can't be inlined to prevent
/// optimizations.
#[inline(never)]
fn runner_wrap(&self, test_case: &Gen::Data) -> Result<R> {
(self.test_case_runner)(test_case)
}
}
#[derive(Default)]
pub struct TimingLeakTestGenericTestCase {
inputs: Vec<Vec<u8>>,
}
impl TimingLeakTestGenericTestCase {
pub fn get_input(&self, idx: usize) -> &[u8] {
&self.inputs[idx]
}
}
#[derive(Default)]
pub struct TimingLeakTestBinaryGenericTestCaseGenerator {
inputs: Vec<Vec<Vec<u8>>>,
last_position: Option<Vec<usize>>,
}
impl TimingLeakTestBinaryGenericTestCaseGenerator {
pub fn add_input(&mut self, values: Vec<Vec<u8>>) -> usize {
let idx = self.inputs.len();
self.inputs.push(values);
idx
}
fn next_position(&self) -> Option<Vec<usize>> {
for i in &self.inputs {
assert!(i.len() > 0);
}
let mut cur = match self.last_position.clone() {
Some(v) => v,
None => return Some(vec![0; self.inputs.len()]),
};
for i in (0..cur.len()).rev() {
cur[i] += 1;
if cur[i] == self.inputs[i].len() {
cur[i] = 0;
} else {
return Some(cur);
}
}
None
}
}
impl TimingLeakTestCaseGenerator for TimingLeakTestBinaryGenericTestCaseGenerator {
type Data = TimingLeakTestGenericTestCase;
fn next_test_case(
&mut self,
out: &mut TimingLeakTestCase<TimingLeakTestGenericTestCase>,
) -> bool {
let pos = match self.next_position() {
Some(v) => v,
None => return false,
};
out.data.inputs.resize(pos.len(), vec![]);
for i in 0..pos.len() {
out.data.inputs[i].clear();
out.data.inputs[i].extend_from_slice(&self.inputs[i][pos[i]]);
}
self.last_position = Some(pos);
true
}
}
/// Generates synthetic data buffers whichare likely to trigger different edge
/// cases and time complexities in code that is sensitive (in terms of # of bits
/// set, magnitude, ...) to the value of the data passed it.
pub fn typical_boundary_buffers(length: usize) -> Vec<Vec<u8>> {
let mut out = vec![];
// All zeros.
out.push(vec![0u8; length]);
// All 0xFF.
out.push(vec![0xFFu8; length]);
// First byte set to value #1
out.push({
let mut v = vec![0u8; length];
v[0] = 0xAB;
v
});
// First byte set to value #2
out.push({
let mut v = vec![0u8; length];
v[0] = 0xCD;
v
});
if length > 1 {
// Last byte set to value #1
out.push({
let mut v = vec![0u8; length];
*v.last_mut().unwrap() = 0x20;
v
});
// Last byte set to value #2
out.push({
let mut v = vec![0u8; length];
*v.last_mut().unwrap() = 0x03;
v
});
}
if length > 2 {
// Even bytes set.
out.push({
let mut v = vec![0u8; length];
for i in (0..v.len()).step_by(2) {
v[i] = i as u8
}
v
});
// Odd bytes set
out.push({
let mut v = vec![0u8; length];
for i in (1..v.len()).step_by(2) {
v[i] = i as u8
}
v
});
let mid_idx = length / 2;
// First half set
out.push({
let mut v = vec![0u8; length];
for i in 0..mid_idx {
v[i] = i as u8
}
v
});
// Second half set
out.push({
let mut v = vec![0u8; length];
for i in mid_idx..length {
v[i] = i as u8
}
v
});
}
let mut rng = crate::random::MersenneTwisterRng::mt19937();
rng.seed_u32(1234);
// A few random buffers.
for _ in 0..3 {
out.push({
let mut v = vec![0u8; length];
rng.generate_bytes(&mut v);
v
});
}
out
}
pub struct StatisticsTracker<T> {
min: Option<T>,
max: Option<T>,
}
impl<T: Ord + Copy> StatisticsTracker<T> {
pub fn new() -> Self {
Self {
min: None,
max: None,
}
}
pub fn update(&mut self, value: T) {
self.min = Some(match self.min {
Some(v) => core::cmp::min(v, value),
None => value,
});
self.max = Some(match self.max {
Some(v) => core::cmp::max(v, value),
None => value,
});
}
}
pub struct RollingMean {
sum: u64,
n: usize,
} | /// 2. A TimingLeakTestCaseRunner which takes a test case as input and runs the | random_line_split |
test.rs | // Utilities for creating test cases.
use core::marker::PhantomData;
use std::time::{Duration, Instant};
use std::vec::Vec;
use common::errors::*;
use crate::random::Rng;
/*
To verify no timing leaks:
- Run any many different input sizes.
- Verify compiler optimizations aren't making looping over many iterations too fast
- Speed is linear w.r.t. num iterations.
- We can even experimentally determine if algorithms are linear, quadratic, etc.
- Compare average time between trials and make sure variance is lower than some constant.
TODO: Add test cases to verify that this can fail sometimes.
*/
#[derive(Default)]
pub struct TimingLeakTestCase<T> {
pub data: T,
pub size: usize,
}
/// Creates new test cases for the timing test.
pub trait TimingLeakTestCaseGenerator {
type Data;
/// Should generate a new test case and write it into 'out'.
/// - If any heap memory is involved, the new test case should re-use as
/// much of the memory buffers already in 'out' as possible to avoid any
/// memory location/cache related noise.
///
/// Returns true if a new test case was generated or false if we ran out of
/// test cases.
fn next_test_case(&mut self, out: &mut TimingLeakTestCase<Self::Data>) -> bool;
}
/// Function which runs some code being profiled once in the calling thread.
///
/// To avoid computations being pruned, this function should return the final
/// 'result' of the computation or some value that can only be determined after
/// the computation is done.
pub trait TimingLeakTestCaseRunner<T, R> = Fn(&T) -> Result<R>;
/// Test wrapper for executing some caller provided code to ensure that it
/// 'probably' executes in constant time regardless of which values are passed
/// to it.
///
/// We expect that the code should vary in execution time with the size of the
/// input data (number of bytes processed) but not with the contents of those
/// bytes.
///
/// This performs testing purely through black box methods so we can't necessary
/// fully gurantee that cache misses or branch mispredictions won't compromise
/// the security of the function.
///
/// The caller provides two things:
/// 1. A TimingLeakTestCaseGenerator which should create a wide range of inputs
/// to the code under test. There should be sufficient test cases to attempt to
/// make the code under test perform very quickly or slowly.
///
/// 2. A TimingLeakTestCaseRunner which takes a test case as input and runs the
/// code being profiled.
///
/// NOTE: This can not track any work performed on other threads.
pub struct TimingLeakTest<R, Gen, Runner> {
test_case_generator: Gen,
test_case_runner: Runner,
options: TimingLeakTestOptions,
r: PhantomData<R>,
}
pub struct TimingLeakTestOptions {
/// Number of separate sets of iterations we will run run the test case
/// runner for a single test case.
///
/// This must be a value that is at least 1. Setting a value > 1 will
/// 'measure' a single test case multiple times and will discard rounds that
/// are outliers (currently only the fasest round is considered).
pub num_rounds: usize,
/// Number of times the test case runner will be executed in a single round.
///
/// The total number of times it will be run is 'num_test_cases * num_rounds
/// * num_iterations'.
///
/// TODO: Automatically figure this out based on a target run time.
pub num_iterations: usize,
}
impl TimingLeakTest<(), (), ()> {
pub fn new_generator() -> TimingLeakTestBinaryGenericTestCaseGenerator {
TimingLeakTestBinaryGenericTestCaseGenerator::default()
}
}
impl<R, Gen: TimingLeakTestCaseGenerator, Runner: TimingLeakTestCaseRunner<Gen::Data, R>>
TimingLeakTest<R, Gen, Runner>
where
Gen::Data: Default,
{
pub fn new(
test_case_generator: Gen,
test_case_runner: Runner,
options: TimingLeakTestOptions,
) -> Self {
Self {
test_case_generator,
test_case_runner,
options,
r: PhantomData,
}
}
#[must_use]
pub fn run(&mut self) -> Result<()> {
let mut cycle_tracker = perf::CPUCycleTracker::create()?;
// Check how long it takes for us to just get the number of cycles executed.
let cycles_noise_floor = {
let mut floor = 0;
let mut last_cycles = cycle_tracker.total_cycles()?;
for _ in 0..10 {
let next_cycles = cycle_tracker.total_cycles()?;
floor = core::cmp::max(floor, (next_cycles - last_cycles));
last_cycles = next_cycles;
}
floor
};
let mut test_case_index = 0;
let mut time_stats = StatisticsTracker::new();
let mut cycle_stats = StatisticsTracker::new();
let mut test_case = TimingLeakTestCase::default();
while self.test_case_generator.next_test_case(&mut test_case) {
let mut case_time_stats = StatisticsTracker::new();
let mut case_cycle_stats = StatisticsTracker::new();
for _ in 0..self.options.num_rounds {
let start = Instant::now();
let start_cycles = cycle_tracker.total_cycles()?;
for _ in 0..self.options.num_iterations {
self.runner_wrap(&test_case.data);
}
let end_cycles = cycle_tracker.total_cycles()?;
let end = Instant::now();
let duration = end.duration_since(start);
let cycle_duration = end_cycles - start_cycles;
case_time_stats.update(duration);
case_cycle_stats.update(cycle_duration);
if cycle_duration < 100 * cycles_noise_floor {
return Err(format_err!(
"Cycle duration of {} too small relative to noise floor {}",
cycle_duration,
cycles_noise_floor
));
}
// If this is true, then most likely the test code was optimized out by the
// compiler.
if duration < Duration::from_millis(2) {
return Err(format_err!(
"Extremely short round execution time: {:?}",
duration
));
}
// println!(
// "Test case {}: {:?} : {}",
// test_case_index, duration, cycle_duration
// );
}
time_stats.update(case_time_stats.min.unwrap());
cycle_stats.update(case_cycle_stats.min.unwrap());
test_case_index += 1;
}
// TODO: Check that the min cycles time is much larger than the
let cycle_range = {
((cycle_stats.max.unwrap() - cycle_stats.min.unwrap()) as f64)
/ (cycle_stats.min.unwrap() as f64)
* 100.0
};
let time_range = {
let min = time_stats.min.unwrap().as_secs_f64();
let max = time_stats.max.unwrap().as_secs_f64();
(max - min) / min * 100.0
};
println!(
"- Fastest round: {:?} ({} cycles)",
time_stats.min.unwrap(),
cycle_stats.min.unwrap()
);
println!(
"- Fastest iteration: {:?}",
time_stats.min.unwrap() / (self.options.num_iterations as u32)
);
println!("- Cycles range: {:0.2}%", cycle_range);
println!("- Time range: {:0.2}%", time_range);
// Must have < 1% deviation across different test inputs.
if cycle_range > 1.0 {
return Err(format_err!(
"Cycle range between test cases too large: {:0.2}% > 1%",
cycle_range
));
}
Ok(())
}
/// Wrapper around the runner which can't be inlined to prevent
/// optimizations.
#[inline(never)]
fn runner_wrap(&self, test_case: &Gen::Data) -> Result<R> {
(self.test_case_runner)(test_case)
}
}
#[derive(Default)]
pub struct TimingLeakTestGenericTestCase {
inputs: Vec<Vec<u8>>,
}
impl TimingLeakTestGenericTestCase {
pub fn get_input(&self, idx: usize) -> &[u8] {
&self.inputs[idx]
}
}
#[derive(Default)]
pub struct TimingLeakTestBinaryGenericTestCaseGenerator {
inputs: Vec<Vec<Vec<u8>>>,
last_position: Option<Vec<usize>>,
}
impl TimingLeakTestBinaryGenericTestCaseGenerator {
pub fn add_input(&mut self, values: Vec<Vec<u8>>) -> usize {
let idx = self.inputs.len();
self.inputs.push(values);
idx
}
fn next_position(&self) -> Option<Vec<usize>> {
for i in &self.inputs {
assert!(i.len() > 0);
}
let mut cur = match self.last_position.clone() {
Some(v) => v,
None => return Some(vec![0; self.inputs.len()]),
};
for i in (0..cur.len()).rev() {
cur[i] += 1;
if cur[i] == self.inputs[i].len() {
cur[i] = 0;
} else {
return Some(cur);
}
}
None
}
}
impl TimingLeakTestCaseGenerator for TimingLeakTestBinaryGenericTestCaseGenerator {
type Data = TimingLeakTestGenericTestCase;
fn next_test_case(
&mut self,
out: &mut TimingLeakTestCase<TimingLeakTestGenericTestCase>,
) -> bool {
let pos = match self.next_position() {
Some(v) => v,
None => return false,
};
out.data.inputs.resize(pos.len(), vec![]);
for i in 0..pos.len() {
out.data.inputs[i].clear();
out.data.inputs[i].extend_from_slice(&self.inputs[i][pos[i]]);
}
self.last_position = Some(pos);
true
}
}
/// Generates synthetic data buffers whichare likely to trigger different edge
/// cases and time complexities in code that is sensitive (in terms of # of bits
/// set, magnitude, ...) to the value of the data passed it.
pub fn typical_boundary_buffers(length: usize) -> Vec<Vec<u8>> {
let mut out = vec![];
// All zeros.
out.push(vec![0u8; length]);
// All 0xFF.
out.push(vec![0xFFu8; length]);
// First byte set to value #1
out.push({
let mut v = vec![0u8; length];
v[0] = 0xAB;
v
});
// First byte set to value #2
out.push({
let mut v = vec![0u8; length];
v[0] = 0xCD;
v
});
if length > 1 {
// Last byte set to value #1
out.push({
let mut v = vec![0u8; length];
*v.last_mut().unwrap() = 0x20;
v
});
// Last byte set to value #2
out.push({
let mut v = vec![0u8; length];
*v.last_mut().unwrap() = 0x03;
v
});
}
if length > 2 {
// Even bytes set.
out.push({
let mut v = vec![0u8; length];
for i in (0..v.len()).step_by(2) {
v[i] = i as u8
}
v
});
// Odd bytes set
out.push({
let mut v = vec![0u8; length];
for i in (1..v.len()).step_by(2) {
v[i] = i as u8
}
v
});
let mid_idx = length / 2;
// First half set
out.push({
let mut v = vec![0u8; length];
for i in 0..mid_idx {
v[i] = i as u8
}
v
});
// Second half set
out.push({
let mut v = vec![0u8; length];
for i in mid_idx..length {
v[i] = i as u8
}
v
});
}
let mut rng = crate::random::MersenneTwisterRng::mt19937();
rng.seed_u32(1234);
// A few random buffers.
for _ in 0..3 {
out.push({
let mut v = vec![0u8; length];
rng.generate_bytes(&mut v);
v
});
}
out
}
pub struct StatisticsTracker<T> {
min: Option<T>,
max: Option<T>,
}
impl<T: Ord + Copy> StatisticsTracker<T> {
pub fn new() -> Self {
Self {
min: None,
max: None,
}
}
pub fn update(&mut self, value: T) {
self.min = Some(match self.min {
Some(v) => core::cmp::min(v, value),
None => value,
});
self.max = Some(match self.max {
Some(v) => core::cmp::max(v, value),
None => value,
});
}
}
pub struct | {
sum: u64,
n: usize,
}
| RollingMean | identifier_name |
registry-manager.ts | /*
* Copyright 2014-2019 Guy Bedford (http://guybedford.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import rimraf = require('rimraf');
import { Semver, SemverRange } from 'sver';
import fs = require('graceful-fs');
import path = require('path');
import mkdirp = require('mkdirp');
import { PackageName, ExactPackage, PackageConfig, ProcessedPackageConfig, parsePackageName,
processPackageConfig, overridePackageConfig, serializePackageConfig } from './package';
import { readJSON, JspmError, JspmUserError, sha256, md5, encodeInvalidFileChars, bold, isWindows,
winSepRegEx, highlight, underline, hasProperties } from '../utils/common';
import { readJSONStyled, writeJSONStyled, defaultStyle } from '../config/config-file';
import Cache from '../utils/cache';
import globalConfig from '../config/global-config-file';
import { Logger, input, confirm } from '../project';
import { resolveSource, downloadSource } from '../install/source';
import FetchClass, { Fetch, GetCredentials, Credentials } from './fetch';
import { convertCJSPackage, convertCJSConfig } from '../compile/cjs-convert';
import { runBinaryBuild } from './binary-build';
import { Readable } from 'stream';
const VerifyState = {
NOT_INSTALLED: 0,
INVALID: 1,
HASH_VALID: 2,
VERIFIED_VALID: 3
};
export interface LookupData {
meta: any,
redirect?: string,
versions?: {
[name: string]: {
resolved: Resolved | void,
meta?: any
}
}
}
export interface Resolved {
version?: string,
source?: string,
override?: PackageConfig,
deprecated?: string
}
export interface SourceInfo {
source: string,
opts: any
}
export interface PublishOptions {
tag?: string;
access?: string;
otp?: string;
}
export interface RegistryEndpoint {
configure?: () => Promise<void>;
dispose: () => Promise<void>;
auth: (url: URL, method: string, credentials: Credentials, unauthorizedHeaders?: Record<string, string>) => Promise<void | boolean>;
lookup: (pkgName: string, versionRange: SemverRange, lookup: LookupData) => Promise<void | boolean>;
resolve?: (pkgName: string, version: string, lookup: LookupData) => Promise<void | boolean>;
publish?: (packagePath: string, pjson: any, tarStream: Readable, options: PublishOptions) => Promise<void>;
}
export interface RegistryEndpointConstructable {
new(utils: EndpointUtils, config: any): RegistryEndpoint;
}
export interface EndpointUtils {
encodeVersion: (version: string) => string;
JspmUserError: typeof JspmUserError;
log: Logger;
input: input;
confirm: confirm;
bold: typeof bold;
highlight: typeof highlight;
underline: typeof underline;
globalConfig: typeof globalConfig;
fetch: Fetch;
getCredentials: GetCredentials;
}
export interface Registry {
handler: RegistryEndpointConstructable | string;
config: any;
}
export interface ConstructorOptions {
cacheDir: string,
timeouts: {
resolve: number,
download: number
},
defaultRegistry: string,
log: Logger,
input: input,
confirm: confirm,
Cache: typeof Cache,
userInput: boolean,
offline: boolean,
preferOffline: boolean,
strictSSL: boolean,
fetch: FetchClass,
registries: {[name: string]: Registry}
}
export default class RegistryManager {
userInput: boolean;
offline: boolean;
preferOffline: boolean;
timeouts: { resolve: number, download: number };
cacheDir: string;
defaultRegistry: string;
// getEndpoint: (string) => { endpoint: RegistryEndpoint, cache: Cache };
cache: Cache;
verifiedCache: {
[hash: string]: number
};
endpoints: Map<string,{ endpoint: RegistryEndpoint, cache: Cache }>;
util: EndpointUtils;
instanceId: number;
strictSSL: boolean;
fetch: FetchClass;
registries: {[name: string]: Registry};
constructor ({ cacheDir, timeouts, Cache, userInput, offline, preferOffline, strictSSL, defaultRegistry, log, input, confirm, fetch, registries }: ConstructorOptions) {
this.userInput = userInput;
this.offline = offline;
this.preferOffline = preferOffline;
this.cacheDir = cacheDir;
this.strictSSL = strictSSL;
this.timeouts = timeouts;
this.defaultRegistry = defaultRegistry;
this.instanceId = Math.round(Math.random() * 10**10);
this.registries = registries;
this.util = {
encodeVersion: encodeInvalidFileChars,
JspmUserError,
log,
input,
confirm,
bold,
highlight,
underline,
globalConfig,
fetch: fetch.fetch.bind(fetch),
getCredentials: fetch.getCredentials.bind(fetch)
};
this.fetch = fetch;
// note which installs have been verified in this session
// so we only have to perform verification once per package
this.verifiedCache = {};
this.endpoints = new Map<string, { endpoint: RegistryEndpoint, cache: Cache }>();
this.cache = new Cache(path.resolve(cacheDir, 'pcfg'));
mkdirp.sync(path.resolve(cacheDir, 'packages'));
}
loadEndpoints() {
Object.keys(this.registries).forEach((registryName) => {
if (registryName === 'jspm')
return;
try {
this.getEndpoint(registryName);
}
catch (err) {
if (err && err.code === 'REGISTRY_NOT_FOUND')
this.util.log.warn(err.message.substr(err.message.indexOf('\n')).trim());
else
throw err;
}
});
}
getEndpoint (name) {
let endpointEntry = this.endpoints.get(name);
if (endpointEntry)
return endpointEntry;
// config returned by config get is a new object owned by us
const registry = this.registries[name];
const config = registry.config;
if (config.strictSSL !== 'boolean')
config.strictSSL = this.strictSSL;
config.timeout = this.timeouts.resolve;
config.userInput = this.userInput;
config.offline = this.offline;
config.preferOffline = this.preferOffline;
let EndpointConstructor: RegistryEndpointConstructable;
if (typeof registry.handler === "string") {
try {
EndpointConstructor = require(registry.handler) as RegistryEndpointConstructable;
}
catch (e) {
if (e && e.code === 'MODULE_NOT_FOUND') {
if (e.message && e.message.indexOf(registry.handler) !== -1) {
this.util.log.warn(`Registry module '${registry.handler}' not found loading package ${bold(name)}.
This may be from a previous jspm version and can be removed with ${bold(`jspm config --unset registries.${name}`)}.`);
return;
}
else {
throw new JspmError(`Error loading registry ${bold(name)} from module '${registry.handler}'.`, 'REGISTRY_LOAD_ERROR', e);
}
}
else {
throw e;
}
}
} else {
EndpointConstructor = registry.handler;
}
const endpoint = new EndpointConstructor(this.util, config);
const cache = new Cache(path.resolve(this.cacheDir, 'registry_cache', name));
endpointEntry = { endpoint, cache };
this.endpoints.set(name, endpointEntry);
return endpointEntry;
}
dispose () {
return Promise.all(Array.from(this.endpoints.values()).map(entry => entry.endpoint.dispose ? entry.endpoint.dispose() : undefined));
}
async configure (registryName: string) {
const { endpoint } = this.getEndpoint(registryName);
if (!endpoint.configure)
throw new JspmUserError(`The ${registryName} registry doesn't have any configuration hook.`);
await endpoint.configure();
}
async auth (url: URL, method: string, credentials: Credentials, unauthorizedHeaders?: Record<string, string>): Promise<string> {
for (let [registry, { endpoint }] of this.endpoints.entries()) {
if (!endpoint.auth)
continue;
if (await endpoint.auth(url, method, credentials, unauthorizedHeaders))
return registry;
}
return undefined;
}
async resolve (pkg: PackageName, override: ProcessedPackageConfig | void, edge = false): Promise<{
pkg: ExactPackage,
target: PackageName,
source: string,
override: ProcessedPackageConfig | void,
deprecated: string
}> {
let registry = pkg.registry || this.defaultRegistry;
let { endpoint, cache } = this.getEndpoint(registry);
let resolveRange = new SemverRange(pkg.version || '*');
let lookup: LookupData, resolvedVersion: string, redirect: string, resolved: Resolved;
try {
// loop over any redirects
while (true) {
lookup = await cache.getUnlocked(pkg.name, this.timeouts.resolve);
if (resolveRange.isExact) {
resolvedVersion = resolveRange.version.toString();
lookup = lookup || { versions: {}, meta: {} };
}
else if (lookup && (this.offline || this.preferOffline)) {
if (lookup.redirect) {
redirect = lookup.redirect;
}
else {
let versionList = Object.keys(lookup.versions);
resolvedVersion = resolveRange.bestMatch(versionList, edge);
if (resolvedVersion === undefined && edge === false)
resolvedVersion = resolveRange.bestMatch(versionList, true);
if (resolvedVersion !== undefined)
resolvedVersion = resolvedVersion.toString();
}
}
if (resolvedVersion === undefined && redirect === undefined) {
// no resolution available offline
if (this.offline)
return;
const unlock = await cache.lock(pkg.name, this.timeouts.resolve);
try {
// cache could have been written while we were getting the lock, although don't bother rechecking resolved as small benefit
lookup = await cache.get(pkg.name) || { versions: {}, meta: {} };
const logEnd = this.util.log.taskStart(`Looking up ${this.util.highlight(pkg.name)}`);
let changed;
try {
changed = await endpoint.lookup(pkg.name, resolveRange, lookup);
}
finally {
logEnd();
}
logEnd();
if (changed && hasProperties(lookup.versions))
cache.setUnlock(pkg.name, lookup).catch(() => {});
else
unlock().catch(() => {});
}
catch (e) {
unlock().catch(() => {});
throw e;
}
}
if (lookup.redirect)
redirect = lookup.redirect;
if (redirect) {
var redirects = redirects || [];
redirects.push(redirect);
if (redirects.indexOf(redirect) !== redirects.length - 1)
throw new JspmUserError(`Circular redirect during lookup - ${redirects.join(' -> ')}.`);
// loop while redirecting
let redirectPkg = parsePackageName(redirect);
pkg = redirectPkg;
({ endpoint, cache } = this.getEndpoint(registry = pkg.registry));
}
else {
if (resolvedVersion === undefined) {
const versionList = Object.keys(lookup.versions);
resolvedVersion = resolveRange.bestMatch(versionList, edge);
if (resolvedVersion === undefined && edge === false)
resolvedVersion = resolveRange.bestMatch(versionList, true);
if (resolvedVersion !== undefined)
resolvedVersion = resolvedVersion.toString();
}
// 404
if (!resolvedVersion)
return;
let version = lookup.versions[resolvedVersion];
if ((this.preferOffline || this.offline) && version && version.resolved) {
resolved = version.resolved;
}
else {
if (this.offline)
return;
// this could result in a cache change... but it's local so we don't lock before we start
const logEnd = this.util.log.taskStart(`Resolving ${this.util.highlight(`${pkg.name}@${resolvedVersion}`)}`);
let changed;
try {
changed = await endpoint.resolve(pkg.name, resolvedVersion, lookup);
}
finally {
logEnd();
}
if (changed) {
version = lookup.versions[resolvedVersion];
if (!version)
return;
resolved = <Resolved>version.resolved;
// cache update individual resolve
(async () => {
const unlock = await cache.lock(pkg.name, this.timeouts.resolve);
await cache.set(pkg.name, lookup);
return unlock();
})().catch(() => {});
}
else if (!version) {
return;
}
else {
resolved = <Resolved>version.resolved;
}
if (!resolved)
throw new Error(`jspm registry endpoint for ${bold(registry)} did not properly resolve ${highlight(pkg.name)}.`);
}
break;
}
}
}
catch (e) {
if (redirects)
e.redirects = redirects;
throw e;
}
let resolvedOverride;
if (resolved.override) {
resolvedOverride = processPackageConfig(resolved.override, true, override && override.registry);
if (override)
({ config: override } = overridePackageConfig(resolvedOverride, override));
else
override = resolvedOverride;
}
return {
pkg: <ExactPackage>{
registry,
name: pkg.name,
version: resolved.version || resolvedVersion,
semver: new Semver(resolvedVersion)
},
target: redirects ? <PackageName>{
registry,
name: pkg.name,
version: pkg.version
} : pkg,
source: resolved.source,
override,
deprecated: resolved.deprecated
};
}
async resolveSource (source: string, packagePath: string, projectPath: string): Promise<string> {
if (source.startsWith('link:') || source.startsWith('file:') || source.startsWith('git+file:')) {
let sourceProtocol = source.substr(0, source[0] === 'g' ? 9 : 5);
let sourcePath = path.resolve(source.substr(source[0] === 'g' ? 9 : 5));
// relative file path installs that are not for the top-level project are relative to their package real path
if (packagePath !== process.cwd()) {
if ((isWindows && (source[0] === '/' || source[0] === '\\')) ||
sourcePath[0] === '.' && (sourcePath[1] === '/' || sourcePath[1] === '\\' || (
sourcePath[1] === '.' && (sourcePath[2] === '/' || sourcePath[2] === '\\')))) {
const realPackagePath = await new Promise<string>((resolve, reject) => fs.realpath(packagePath, (err, realpath) => err ? reject(err) : resolve(realpath)));
sourcePath = path.resolve(realPackagePath, sourcePath);
}
}
// if a file: install and it is a directory then it is a link: install
if (source.startsWith('file:')) {
try {
const stats = fs.statSync(sourcePath);
if (stats.isDirectory())
sourceProtocol = 'link:';
}
catch (e) {
if (e && e.code === 'ENOENT')
throw new JspmUserError(`Path ${sourcePath} is not a valid file or directory.`);
throw e;
}
}
sourcePath = path.relative(projectPath, sourcePath) + '/';
if (isWindows)
sourcePath = sourcePath.replace(winSepRegEx, '/');
source = sourceProtocol + sourcePath;
}
if (this.offline)
return source;
return resolveSource(this.util.log, this.fetch, source, this.timeouts.resolve);
}
async verifyInstallDir (dir: string, verifyHash: string, fullVerification: boolean): Promise<number> {
const cachedState = this.verifiedCache[verifyHash];
if (cachedState !== undefined && (cachedState !== VerifyState.HASH_VALID || !fullVerification))
return cachedState;
const installFile = path.resolve(dir, '.jspm');
const jspmJson = await readJSON(installFile);
if (!jspmJson)
return this.verifiedCache[verifyHash] = VerifyState.NOT_INSTALLED;
if (typeof jspmJson.mtime !== 'number' || jspmJson.hash !== verifyHash)
return this.verifiedCache[verifyHash] = VerifyState.INVALID;
// if not doing full verification for perf, stop here
if (!fullVerification)
return this.verifiedCache[verifyHash] = VerifyState.HASH_VALID;
// mtime check (skipping .jspm file)
let failure = false;
await dirWalk(dir, async (filePath, stats) => {
if (filePath === installFile)
return;
if (stats.mtimeMs > jspmJson.mtime) {
failure = true;
return true;
}
});
return this.verifiedCache[verifyHash] = failure ? VerifyState.INVALID : VerifyState.VERIFIED_VALID;
/*
let fileHashes = await Promise.all(fileList.map(getFileHash));
let installedDirHash = sha256(fileHashes.sort().join(''));
// hash match -> update the mtime in the install file so we dont check next time
if (installedDirHash === dirHash) {
await new Promise((resolve, reject) => {
fs.writeFile(installFile, mtime + '\n' + hash + '\n' + dirHash, err => err ? reject(err) : resolve())
});
return true;
}*/
}
// on verification failure, we remove the directory and redownload
// moving to a tmp location can be done during the verificationFailure call, to diff and determine route forward
// if deciding to checkout, "ensureInstall" operation is cancelled by returning true
// build support will be added to build into a newly prefixed folder, with build as a boolean argument
async ensureInstall (source: string, override: ProcessedPackageConfig | void, verificationFailure: (dir: string) => Promise<boolean>, fullVerification: boolean = false): Promise<{
config: ProcessedPackageConfig,
override: ProcessedPackageConfig | void,
dir: string,
hash: string,
changed: boolean
}> {
let sourceHash = sha256(source);
var { config = undefined, hash = undefined }: { config: ProcessedPackageConfig, hash: string }
= await this.cache.getUnlocked(sourceHash, this.timeouts.download) || {};
if (config) {
config = processPackageConfig(<any>config);
if (override) {
({ config, override } = overridePackageConfig(config, override));
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
}
convertCJSConfig(config);
var dir = path.join(this.cacheDir, 'packages', hash);
const verifyState = await this.verifyInstallDir(dir, hash, fullVerification);
if (verifyState > VerifyState.INVALID)
return { config, override, dir, hash, changed: false };
else if (verifyState !== VerifyState.NOT_INSTALLED && await verificationFailure(dir))
return;
}
if (this.offline)
throw new JspmUserError(`Package is not available for offline install.`);
let unlock = await this.cache.lock(sourceHash, this.timeouts.download);
try {
// could have been a write while we were getting the lock
if (!config) {
var { config = undefined, hash = undefined }: {
config: ProcessedPackageConfig,
hash: string
} = await this.cache.get(sourceHash) || {};
if (config) {
config = processPackageConfig(<any>config);
if (override) {
({ config, override } = overridePackageConfig(config, override));
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
}
convertCJSConfig(config);
var dir = path.join(this.cacheDir, 'packages', hash);
const verifyState = await this.verifyInstallDir(dir, hash, fullVerification);
if (verifyState > VerifyState.INVALID)
return { config, override, dir, hash, changed: false };
else if (verifyState !== VerifyState.NOT_INSTALLED && await verificationFailure(dir))
return;
}
}
// if we dont know the config then we dont know the canonical override (and hence hash)
// so we download to a temporary folder first
if (!config)
dir = path.join(this.cacheDir, 'tmp', sha256(Math.random().toString()));
await new Promise((resolve, reject) => rimraf(dir, err => err ? reject(err) : resolve()));
await new Promise((resolve, reject) => mkdirp(dir, err => err ? reject(err) : resolve()));
if (this.offline)
throw new JspmUserError(`Source ${source} is not available offline.`);
// if source is linked, can return the linked dir directly
await downloadSource(this.util.log, this.fetch, source, dir, this.timeouts.download);
const logEnd = this.util.log.taskStart('Finalizing ' + highlight(source));
try {
let pjsonPath = path.resolve(dir, 'package.json');
let { json: pjson, style } = await readJSONStyled(pjsonPath);
if (!pjson)
pjson = {};
if (!config) {
let pjsonConfig = processPackageConfig(pjson);
const serializedConfig = serializePackageConfig(pjsonConfig, this.defaultRegistry);
if (override)
({ config, override } = overridePackageConfig(pjsonConfig, override));
else
config = pjsonConfig;
convertCJSConfig(config);
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
await Promise.all([
this.cache.set(sourceHash, { config: serializedConfig, hash }),
// move the tmp folder to the known hash now
(async () => {
const toDir = path.join(this.cacheDir, 'packages', hash);
await new Promise((resolve, reject) => rimraf(toDir, err => err ? reject(err) : resolve()));
await new Promise((resolve, reject) => {
fs.rename(dir, dir = toDir, err => err ? reject(err) : resolve());
});
})()
]);
pjsonPath = path.resolve(dir, 'package.json');
}
await writeJSONStyled(pjsonPath, Object.assign(pjson, serializePackageConfig(config)), style || defaultStyle);
await runBinaryBuild(this.util.log, dir, pjson.name, pjson.scripts);
// run package conversion
// (on any subfolder containing a "type": "commonjs")
await convertCJSPackage(this.util.log, dir, config.name, config, this.defaultRegistry);
var mtime = await new Promise((resolve, reject) => fs.stat(pjsonPath, (err, stats) => err ? reject(err) : resolve(stats.mtimeMs)));
// todo: diffs for invalid?
// const fileHashes = await calculateFileHashes(dir);
// will be useful for avoiding mistaken mtime bumps when viewing
await new Promise((resolve, reject) => {
fs.writeFile(path.join(dir, '.jspm'), JSON.stringify({ mtime, hash }), err => err ? reject(err) : resolve())
});
this.verifiedCache[hash] = VerifyState.VERIFIED_VALID;
return { config, override, dir, hash, changed: true };
}
finally {
logEnd();
}
}
finally {
unlock();
}
}
async publish (packagePath: string, registry: string, pjson: any, tarStream: Readable, opts: PublishOptions) {
const { endpoint } = this.getEndpoint(registry);
if (!endpoint.publish)
throw new JspmUserError(`Registry ${highlight(pjson.registry)} does not support publishing.`);
const logEnd = this.util.log.taskStart(`Publishing ${this.util.highlight(`${registry}:${pjson.name}@${pjson.version}`)}`);
try {
await endpoint.publish(packagePath, pjson, tarStream, opts);
}
finally {
logEnd();
}
}
}
function dirWalk (dir: string, visit: (filePath: string, stats, files?: string[]) => void | boolean | Promise<void | boolean>) {
return new Promise((resolve, reject) => {
let errored = false;
let cnt = 0;
visitFileOrDir(path.resolve(dir));
function handleError (err) {
if (!errored) {
errored = true;
reject(err);
}
}
function | (fileOrDir) {
cnt++;
fs.stat(fileOrDir, async (err, stats) => {
if (err || errored)
return handleError(err);
try {
if (await visit(fileOrDir, stats))
return resolve();
}
catch (err) {
return handleError(err);
}
if (stats.isDirectory()) {
fs.readdir(fileOrDir, (err, paths) => {
if (err || errored)
return handleError(err);
cnt--;
if (paths.length === 0 && !errored && cnt === 0)
return resolve();
paths.forEach(fileOrDirPath => visitFileOrDir(path.resolve(fileOrDir, fileOrDirPath)));
});
}
else if (!errored && --cnt === 0) {
resolve();
}
});
}
});
} | visitFileOrDir | identifier_name |
registry-manager.ts | /*
* Copyright 2014-2019 Guy Bedford (http://guybedford.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import rimraf = require('rimraf');
import { Semver, SemverRange } from 'sver';
import fs = require('graceful-fs');
import path = require('path');
import mkdirp = require('mkdirp');
import { PackageName, ExactPackage, PackageConfig, ProcessedPackageConfig, parsePackageName,
processPackageConfig, overridePackageConfig, serializePackageConfig } from './package';
import { readJSON, JspmError, JspmUserError, sha256, md5, encodeInvalidFileChars, bold, isWindows,
winSepRegEx, highlight, underline, hasProperties } from '../utils/common';
import { readJSONStyled, writeJSONStyled, defaultStyle } from '../config/config-file';
import Cache from '../utils/cache';
import globalConfig from '../config/global-config-file';
import { Logger, input, confirm } from '../project';
import { resolveSource, downloadSource } from '../install/source';
import FetchClass, { Fetch, GetCredentials, Credentials } from './fetch';
import { convertCJSPackage, convertCJSConfig } from '../compile/cjs-convert';
import { runBinaryBuild } from './binary-build';
import { Readable } from 'stream';
const VerifyState = {
NOT_INSTALLED: 0,
INVALID: 1,
HASH_VALID: 2,
VERIFIED_VALID: 3
};
export interface LookupData {
meta: any,
redirect?: string,
versions?: {
[name: string]: {
resolved: Resolved | void,
meta?: any
}
}
}
export interface Resolved {
version?: string,
source?: string,
override?: PackageConfig,
deprecated?: string
}
export interface SourceInfo {
source: string,
opts: any
}
export interface PublishOptions {
tag?: string;
access?: string;
otp?: string;
}
export interface RegistryEndpoint {
configure?: () => Promise<void>;
dispose: () => Promise<void>;
auth: (url: URL, method: string, credentials: Credentials, unauthorizedHeaders?: Record<string, string>) => Promise<void | boolean>;
lookup: (pkgName: string, versionRange: SemverRange, lookup: LookupData) => Promise<void | boolean>;
resolve?: (pkgName: string, version: string, lookup: LookupData) => Promise<void | boolean>;
publish?: (packagePath: string, pjson: any, tarStream: Readable, options: PublishOptions) => Promise<void>;
}
export interface RegistryEndpointConstructable {
new(utils: EndpointUtils, config: any): RegistryEndpoint;
}
export interface EndpointUtils {
encodeVersion: (version: string) => string;
JspmUserError: typeof JspmUserError;
log: Logger;
input: input;
confirm: confirm;
bold: typeof bold;
highlight: typeof highlight;
underline: typeof underline;
globalConfig: typeof globalConfig;
fetch: Fetch;
getCredentials: GetCredentials;
}
export interface Registry {
handler: RegistryEndpointConstructable | string;
config: any;
}
export interface ConstructorOptions {
cacheDir: string,
timeouts: {
resolve: number,
download: number
},
defaultRegistry: string,
log: Logger,
input: input,
confirm: confirm,
Cache: typeof Cache,
userInput: boolean,
offline: boolean,
preferOffline: boolean,
strictSSL: boolean,
fetch: FetchClass,
registries: {[name: string]: Registry}
}
export default class RegistryManager {
userInput: boolean;
offline: boolean;
preferOffline: boolean;
timeouts: { resolve: number, download: number };
cacheDir: string;
defaultRegistry: string;
// getEndpoint: (string) => { endpoint: RegistryEndpoint, cache: Cache };
cache: Cache;
verifiedCache: {
[hash: string]: number
};
endpoints: Map<string,{ endpoint: RegistryEndpoint, cache: Cache }>;
util: EndpointUtils;
instanceId: number;
strictSSL: boolean;
fetch: FetchClass;
registries: {[name: string]: Registry};
constructor ({ cacheDir, timeouts, Cache, userInput, offline, preferOffline, strictSSL, defaultRegistry, log, input, confirm, fetch, registries }: ConstructorOptions) {
this.userInput = userInput;
this.offline = offline;
this.preferOffline = preferOffline;
this.cacheDir = cacheDir;
this.strictSSL = strictSSL;
this.timeouts = timeouts;
this.defaultRegistry = defaultRegistry;
this.instanceId = Math.round(Math.random() * 10**10);
this.registries = registries;
this.util = {
encodeVersion: encodeInvalidFileChars,
JspmUserError,
log,
input,
confirm,
bold,
highlight,
underline,
globalConfig,
fetch: fetch.fetch.bind(fetch),
getCredentials: fetch.getCredentials.bind(fetch)
};
this.fetch = fetch;
// note which installs have been verified in this session
// so we only have to perform verification once per package
this.verifiedCache = {};
this.endpoints = new Map<string, { endpoint: RegistryEndpoint, cache: Cache }>();
this.cache = new Cache(path.resolve(cacheDir, 'pcfg'));
mkdirp.sync(path.resolve(cacheDir, 'packages'));
}
loadEndpoints() {
Object.keys(this.registries).forEach((registryName) => {
if (registryName === 'jspm')
return;
try {
this.getEndpoint(registryName);
}
catch (err) {
if (err && err.code === 'REGISTRY_NOT_FOUND')
this.util.log.warn(err.message.substr(err.message.indexOf('\n')).trim());
else
throw err;
}
});
}
getEndpoint (name) {
let endpointEntry = this.endpoints.get(name);
if (endpointEntry)
return endpointEntry;
// config returned by config get is a new object owned by us
const registry = this.registries[name];
const config = registry.config;
if (config.strictSSL !== 'boolean')
config.strictSSL = this.strictSSL;
config.timeout = this.timeouts.resolve;
config.userInput = this.userInput;
config.offline = this.offline;
config.preferOffline = this.preferOffline;
let EndpointConstructor: RegistryEndpointConstructable;
if (typeof registry.handler === "string") {
try {
EndpointConstructor = require(registry.handler) as RegistryEndpointConstructable;
}
catch (e) {
if (e && e.code === 'MODULE_NOT_FOUND') {
if (e.message && e.message.indexOf(registry.handler) !== -1) {
this.util.log.warn(`Registry module '${registry.handler}' not found loading package ${bold(name)}.
This may be from a previous jspm version and can be removed with ${bold(`jspm config --unset registries.${name}`)}.`);
return;
}
else {
throw new JspmError(`Error loading registry ${bold(name)} from module '${registry.handler}'.`, 'REGISTRY_LOAD_ERROR', e);
}
}
else {
throw e;
}
}
} else {
EndpointConstructor = registry.handler;
}
const endpoint = new EndpointConstructor(this.util, config);
const cache = new Cache(path.resolve(this.cacheDir, 'registry_cache', name));
endpointEntry = { endpoint, cache };
this.endpoints.set(name, endpointEntry);
return endpointEntry;
}
dispose () {
return Promise.all(Array.from(this.endpoints.values()).map(entry => entry.endpoint.dispose ? entry.endpoint.dispose() : undefined));
}
async configure (registryName: string) {
const { endpoint } = this.getEndpoint(registryName);
if (!endpoint.configure)
throw new JspmUserError(`The ${registryName} registry doesn't have any configuration hook.`);
await endpoint.configure();
}
async auth (url: URL, method: string, credentials: Credentials, unauthorizedHeaders?: Record<string, string>): Promise<string> {
for (let [registry, { endpoint }] of this.endpoints.entries()) {
if (!endpoint.auth)
continue;
if (await endpoint.auth(url, method, credentials, unauthorizedHeaders))
return registry;
}
return undefined;
}
async resolve (pkg: PackageName, override: ProcessedPackageConfig | void, edge = false): Promise<{
pkg: ExactPackage,
target: PackageName,
source: string,
override: ProcessedPackageConfig | void,
deprecated: string
}> {
let registry = pkg.registry || this.defaultRegistry;
let { endpoint, cache } = this.getEndpoint(registry);
let resolveRange = new SemverRange(pkg.version || '*');
let lookup: LookupData, resolvedVersion: string, redirect: string, resolved: Resolved;
try {
// loop over any redirects
while (true) {
lookup = await cache.getUnlocked(pkg.name, this.timeouts.resolve);
if (resolveRange.isExact) {
resolvedVersion = resolveRange.version.toString();
lookup = lookup || { versions: {}, meta: {} };
}
else if (lookup && (this.offline || this.preferOffline)) {
if (lookup.redirect) {
redirect = lookup.redirect;
}
else {
let versionList = Object.keys(lookup.versions);
resolvedVersion = resolveRange.bestMatch(versionList, edge);
if (resolvedVersion === undefined && edge === false)
resolvedVersion = resolveRange.bestMatch(versionList, true);
if (resolvedVersion !== undefined)
resolvedVersion = resolvedVersion.toString();
}
}
if (resolvedVersion === undefined && redirect === undefined) {
// no resolution available offline
if (this.offline)
return;
const unlock = await cache.lock(pkg.name, this.timeouts.resolve);
try {
// cache could have been written while we were getting the lock, although don't bother rechecking resolved as small benefit
lookup = await cache.get(pkg.name) || { versions: {}, meta: {} };
const logEnd = this.util.log.taskStart(`Looking up ${this.util.highlight(pkg.name)}`);
let changed;
try {
changed = await endpoint.lookup(pkg.name, resolveRange, lookup);
}
finally {
logEnd();
}
logEnd();
if (changed && hasProperties(lookup.versions))
cache.setUnlock(pkg.name, lookup).catch(() => {});
else
unlock().catch(() => {});
}
catch (e) {
unlock().catch(() => {});
throw e;
}
}
if (lookup.redirect)
redirect = lookup.redirect;
if (redirect) {
var redirects = redirects || [];
redirects.push(redirect);
if (redirects.indexOf(redirect) !== redirects.length - 1)
throw new JspmUserError(`Circular redirect during lookup - ${redirects.join(' -> ')}.`);
// loop while redirecting
let redirectPkg = parsePackageName(redirect);
pkg = redirectPkg;
({ endpoint, cache } = this.getEndpoint(registry = pkg.registry));
}
else {
if (resolvedVersion === undefined) {
const versionList = Object.keys(lookup.versions);
resolvedVersion = resolveRange.bestMatch(versionList, edge);
if (resolvedVersion === undefined && edge === false)
resolvedVersion = resolveRange.bestMatch(versionList, true);
if (resolvedVersion !== undefined)
resolvedVersion = resolvedVersion.toString();
}
// 404
if (!resolvedVersion)
return;
let version = lookup.versions[resolvedVersion];
if ((this.preferOffline || this.offline) && version && version.resolved) {
resolved = version.resolved;
}
else {
if (this.offline)
return;
// this could result in a cache change... but it's local so we don't lock before we start
const logEnd = this.util.log.taskStart(`Resolving ${this.util.highlight(`${pkg.name}@${resolvedVersion}`)}`);
let changed;
try {
changed = await endpoint.resolve(pkg.name, resolvedVersion, lookup);
}
finally {
logEnd();
}
if (changed) {
version = lookup.versions[resolvedVersion];
if (!version)
return;
resolved = <Resolved>version.resolved;
// cache update individual resolve
(async () => {
const unlock = await cache.lock(pkg.name, this.timeouts.resolve);
await cache.set(pkg.name, lookup);
return unlock();
})().catch(() => {});
}
else if (!version) {
return;
}
else {
resolved = <Resolved>version.resolved;
}
if (!resolved)
throw new Error(`jspm registry endpoint for ${bold(registry)} did not properly resolve ${highlight(pkg.name)}.`);
}
break;
}
}
}
catch (e) {
if (redirects)
e.redirects = redirects;
throw e;
}
let resolvedOverride;
if (resolved.override) {
resolvedOverride = processPackageConfig(resolved.override, true, override && override.registry);
if (override)
({ config: override } = overridePackageConfig(resolvedOverride, override));
else
override = resolvedOverride;
}
return {
pkg: <ExactPackage>{
registry,
name: pkg.name,
version: resolved.version || resolvedVersion,
semver: new Semver(resolvedVersion)
},
target: redirects ? <PackageName>{
registry,
name: pkg.name,
version: pkg.version
} : pkg,
source: resolved.source,
override,
deprecated: resolved.deprecated
};
}
async resolveSource (source: string, packagePath: string, projectPath: string): Promise<string> {
if (source.startsWith('link:') || source.startsWith('file:') || source.startsWith('git+file:')) {
let sourceProtocol = source.substr(0, source[0] === 'g' ? 9 : 5);
let sourcePath = path.resolve(source.substr(source[0] === 'g' ? 9 : 5));
| sourcePath[0] === '.' && (sourcePath[1] === '/' || sourcePath[1] === '\\' || (
sourcePath[1] === '.' && (sourcePath[2] === '/' || sourcePath[2] === '\\')))) {
const realPackagePath = await new Promise<string>((resolve, reject) => fs.realpath(packagePath, (err, realpath) => err ? reject(err) : resolve(realpath)));
sourcePath = path.resolve(realPackagePath, sourcePath);
}
}
// if a file: install and it is a directory then it is a link: install
if (source.startsWith('file:')) {
try {
const stats = fs.statSync(sourcePath);
if (stats.isDirectory())
sourceProtocol = 'link:';
}
catch (e) {
if (e && e.code === 'ENOENT')
throw new JspmUserError(`Path ${sourcePath} is not a valid file or directory.`);
throw e;
}
}
sourcePath = path.relative(projectPath, sourcePath) + '/';
if (isWindows)
sourcePath = sourcePath.replace(winSepRegEx, '/');
source = sourceProtocol + sourcePath;
}
if (this.offline)
return source;
return resolveSource(this.util.log, this.fetch, source, this.timeouts.resolve);
}
async verifyInstallDir (dir: string, verifyHash: string, fullVerification: boolean): Promise<number> {
const cachedState = this.verifiedCache[verifyHash];
if (cachedState !== undefined && (cachedState !== VerifyState.HASH_VALID || !fullVerification))
return cachedState;
const installFile = path.resolve(dir, '.jspm');
const jspmJson = await readJSON(installFile);
if (!jspmJson)
return this.verifiedCache[verifyHash] = VerifyState.NOT_INSTALLED;
if (typeof jspmJson.mtime !== 'number' || jspmJson.hash !== verifyHash)
return this.verifiedCache[verifyHash] = VerifyState.INVALID;
// if not doing full verification for perf, stop here
if (!fullVerification)
return this.verifiedCache[verifyHash] = VerifyState.HASH_VALID;
// mtime check (skipping .jspm file)
let failure = false;
await dirWalk(dir, async (filePath, stats) => {
if (filePath === installFile)
return;
if (stats.mtimeMs > jspmJson.mtime) {
failure = true;
return true;
}
});
return this.verifiedCache[verifyHash] = failure ? VerifyState.INVALID : VerifyState.VERIFIED_VALID;
/*
let fileHashes = await Promise.all(fileList.map(getFileHash));
let installedDirHash = sha256(fileHashes.sort().join(''));
// hash match -> update the mtime in the install file so we dont check next time
if (installedDirHash === dirHash) {
await new Promise((resolve, reject) => {
fs.writeFile(installFile, mtime + '\n' + hash + '\n' + dirHash, err => err ? reject(err) : resolve())
});
return true;
}*/
}
// on verification failure, we remove the directory and redownload
// moving to a tmp location can be done during the verificationFailure call, to diff and determine route forward
// if deciding to checkout, "ensureInstall" operation is cancelled by returning true
// build support will be added to build into a newly prefixed folder, with build as a boolean argument
async ensureInstall (source: string, override: ProcessedPackageConfig | void, verificationFailure: (dir: string) => Promise<boolean>, fullVerification: boolean = false): Promise<{
config: ProcessedPackageConfig,
override: ProcessedPackageConfig | void,
dir: string,
hash: string,
changed: boolean
}> {
let sourceHash = sha256(source);
var { config = undefined, hash = undefined }: { config: ProcessedPackageConfig, hash: string }
= await this.cache.getUnlocked(sourceHash, this.timeouts.download) || {};
if (config) {
config = processPackageConfig(<any>config);
if (override) {
({ config, override } = overridePackageConfig(config, override));
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
}
convertCJSConfig(config);
var dir = path.join(this.cacheDir, 'packages', hash);
const verifyState = await this.verifyInstallDir(dir, hash, fullVerification);
if (verifyState > VerifyState.INVALID)
return { config, override, dir, hash, changed: false };
else if (verifyState !== VerifyState.NOT_INSTALLED && await verificationFailure(dir))
return;
}
if (this.offline)
throw new JspmUserError(`Package is not available for offline install.`);
let unlock = await this.cache.lock(sourceHash, this.timeouts.download);
try {
// could have been a write while we were getting the lock
if (!config) {
var { config = undefined, hash = undefined }: {
config: ProcessedPackageConfig,
hash: string
} = await this.cache.get(sourceHash) || {};
if (config) {
config = processPackageConfig(<any>config);
if (override) {
({ config, override } = overridePackageConfig(config, override));
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
}
convertCJSConfig(config);
var dir = path.join(this.cacheDir, 'packages', hash);
const verifyState = await this.verifyInstallDir(dir, hash, fullVerification);
if (verifyState > VerifyState.INVALID)
return { config, override, dir, hash, changed: false };
else if (verifyState !== VerifyState.NOT_INSTALLED && await verificationFailure(dir))
return;
}
}
// if we dont know the config then we dont know the canonical override (and hence hash)
// so we download to a temporary folder first
if (!config)
dir = path.join(this.cacheDir, 'tmp', sha256(Math.random().toString()));
await new Promise((resolve, reject) => rimraf(dir, err => err ? reject(err) : resolve()));
await new Promise((resolve, reject) => mkdirp(dir, err => err ? reject(err) : resolve()));
if (this.offline)
throw new JspmUserError(`Source ${source} is not available offline.`);
// if source is linked, can return the linked dir directly
await downloadSource(this.util.log, this.fetch, source, dir, this.timeouts.download);
const logEnd = this.util.log.taskStart('Finalizing ' + highlight(source));
try {
let pjsonPath = path.resolve(dir, 'package.json');
let { json: pjson, style } = await readJSONStyled(pjsonPath);
if (!pjson)
pjson = {};
if (!config) {
let pjsonConfig = processPackageConfig(pjson);
const serializedConfig = serializePackageConfig(pjsonConfig, this.defaultRegistry);
if (override)
({ config, override } = overridePackageConfig(pjsonConfig, override));
else
config = pjsonConfig;
convertCJSConfig(config);
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
await Promise.all([
this.cache.set(sourceHash, { config: serializedConfig, hash }),
// move the tmp folder to the known hash now
(async () => {
const toDir = path.join(this.cacheDir, 'packages', hash);
await new Promise((resolve, reject) => rimraf(toDir, err => err ? reject(err) : resolve()));
await new Promise((resolve, reject) => {
fs.rename(dir, dir = toDir, err => err ? reject(err) : resolve());
});
})()
]);
pjsonPath = path.resolve(dir, 'package.json');
}
await writeJSONStyled(pjsonPath, Object.assign(pjson, serializePackageConfig(config)), style || defaultStyle);
await runBinaryBuild(this.util.log, dir, pjson.name, pjson.scripts);
// run package conversion
// (on any subfolder containing a "type": "commonjs")
await convertCJSPackage(this.util.log, dir, config.name, config, this.defaultRegistry);
var mtime = await new Promise((resolve, reject) => fs.stat(pjsonPath, (err, stats) => err ? reject(err) : resolve(stats.mtimeMs)));
// todo: diffs for invalid?
// const fileHashes = await calculateFileHashes(dir);
// will be useful for avoiding mistaken mtime bumps when viewing
await new Promise((resolve, reject) => {
fs.writeFile(path.join(dir, '.jspm'), JSON.stringify({ mtime, hash }), err => err ? reject(err) : resolve())
});
this.verifiedCache[hash] = VerifyState.VERIFIED_VALID;
return { config, override, dir, hash, changed: true };
}
finally {
logEnd();
}
}
finally {
unlock();
}
}
async publish (packagePath: string, registry: string, pjson: any, tarStream: Readable, opts: PublishOptions) {
const { endpoint } = this.getEndpoint(registry);
if (!endpoint.publish)
throw new JspmUserError(`Registry ${highlight(pjson.registry)} does not support publishing.`);
const logEnd = this.util.log.taskStart(`Publishing ${this.util.highlight(`${registry}:${pjson.name}@${pjson.version}`)}`);
try {
await endpoint.publish(packagePath, pjson, tarStream, opts);
}
finally {
logEnd();
}
}
}
function dirWalk (dir: string, visit: (filePath: string, stats, files?: string[]) => void | boolean | Promise<void | boolean>) {
return new Promise((resolve, reject) => {
let errored = false;
let cnt = 0;
visitFileOrDir(path.resolve(dir));
function handleError (err) {
if (!errored) {
errored = true;
reject(err);
}
}
function visitFileOrDir (fileOrDir) {
cnt++;
fs.stat(fileOrDir, async (err, stats) => {
if (err || errored)
return handleError(err);
try {
if (await visit(fileOrDir, stats))
return resolve();
}
catch (err) {
return handleError(err);
}
if (stats.isDirectory()) {
fs.readdir(fileOrDir, (err, paths) => {
if (err || errored)
return handleError(err);
cnt--;
if (paths.length === 0 && !errored && cnt === 0)
return resolve();
paths.forEach(fileOrDirPath => visitFileOrDir(path.resolve(fileOrDir, fileOrDirPath)));
});
}
else if (!errored && --cnt === 0) {
resolve();
}
});
}
});
} | // relative file path installs that are not for the top-level project are relative to their package real path
if (packagePath !== process.cwd()) {
if ((isWindows && (source[0] === '/' || source[0] === '\\')) || | random_line_split |
registry-manager.ts | /*
* Copyright 2014-2019 Guy Bedford (http://guybedford.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import rimraf = require('rimraf');
import { Semver, SemverRange } from 'sver';
import fs = require('graceful-fs');
import path = require('path');
import mkdirp = require('mkdirp');
import { PackageName, ExactPackage, PackageConfig, ProcessedPackageConfig, parsePackageName,
processPackageConfig, overridePackageConfig, serializePackageConfig } from './package';
import { readJSON, JspmError, JspmUserError, sha256, md5, encodeInvalidFileChars, bold, isWindows,
winSepRegEx, highlight, underline, hasProperties } from '../utils/common';
import { readJSONStyled, writeJSONStyled, defaultStyle } from '../config/config-file';
import Cache from '../utils/cache';
import globalConfig from '../config/global-config-file';
import { Logger, input, confirm } from '../project';
import { resolveSource, downloadSource } from '../install/source';
import FetchClass, { Fetch, GetCredentials, Credentials } from './fetch';
import { convertCJSPackage, convertCJSConfig } from '../compile/cjs-convert';
import { runBinaryBuild } from './binary-build';
import { Readable } from 'stream';
const VerifyState = {
NOT_INSTALLED: 0,
INVALID: 1,
HASH_VALID: 2,
VERIFIED_VALID: 3
};
export interface LookupData {
meta: any,
redirect?: string,
versions?: {
[name: string]: {
resolved: Resolved | void,
meta?: any
}
}
}
export interface Resolved {
version?: string,
source?: string,
override?: PackageConfig,
deprecated?: string
}
export interface SourceInfo {
source: string,
opts: any
}
export interface PublishOptions {
tag?: string;
access?: string;
otp?: string;
}
export interface RegistryEndpoint {
configure?: () => Promise<void>;
dispose: () => Promise<void>;
auth: (url: URL, method: string, credentials: Credentials, unauthorizedHeaders?: Record<string, string>) => Promise<void | boolean>;
lookup: (pkgName: string, versionRange: SemverRange, lookup: LookupData) => Promise<void | boolean>;
resolve?: (pkgName: string, version: string, lookup: LookupData) => Promise<void | boolean>;
publish?: (packagePath: string, pjson: any, tarStream: Readable, options: PublishOptions) => Promise<void>;
}
export interface RegistryEndpointConstructable {
new(utils: EndpointUtils, config: any): RegistryEndpoint;
}
export interface EndpointUtils {
encodeVersion: (version: string) => string;
JspmUserError: typeof JspmUserError;
log: Logger;
input: input;
confirm: confirm;
bold: typeof bold;
highlight: typeof highlight;
underline: typeof underline;
globalConfig: typeof globalConfig;
fetch: Fetch;
getCredentials: GetCredentials;
}
export interface Registry {
handler: RegistryEndpointConstructable | string;
config: any;
}
export interface ConstructorOptions {
cacheDir: string,
timeouts: {
resolve: number,
download: number
},
defaultRegistry: string,
log: Logger,
input: input,
confirm: confirm,
Cache: typeof Cache,
userInput: boolean,
offline: boolean,
preferOffline: boolean,
strictSSL: boolean,
fetch: FetchClass,
registries: {[name: string]: Registry}
}
export default class RegistryManager {
userInput: boolean;
offline: boolean;
preferOffline: boolean;
timeouts: { resolve: number, download: number };
cacheDir: string;
defaultRegistry: string;
// getEndpoint: (string) => { endpoint: RegistryEndpoint, cache: Cache };
cache: Cache;
verifiedCache: {
[hash: string]: number
};
endpoints: Map<string,{ endpoint: RegistryEndpoint, cache: Cache }>;
util: EndpointUtils;
instanceId: number;
strictSSL: boolean;
fetch: FetchClass;
registries: {[name: string]: Registry};
constructor ({ cacheDir, timeouts, Cache, userInput, offline, preferOffline, strictSSL, defaultRegistry, log, input, confirm, fetch, registries }: ConstructorOptions) {
this.userInput = userInput;
this.offline = offline;
this.preferOffline = preferOffline;
this.cacheDir = cacheDir;
this.strictSSL = strictSSL;
this.timeouts = timeouts;
this.defaultRegistry = defaultRegistry;
this.instanceId = Math.round(Math.random() * 10**10);
this.registries = registries;
this.util = {
encodeVersion: encodeInvalidFileChars,
JspmUserError,
log,
input,
confirm,
bold,
highlight,
underline,
globalConfig,
fetch: fetch.fetch.bind(fetch),
getCredentials: fetch.getCredentials.bind(fetch)
};
this.fetch = fetch;
// note which installs have been verified in this session
// so we only have to perform verification once per package
this.verifiedCache = {};
this.endpoints = new Map<string, { endpoint: RegistryEndpoint, cache: Cache }>();
this.cache = new Cache(path.resolve(cacheDir, 'pcfg'));
mkdirp.sync(path.resolve(cacheDir, 'packages'));
}
loadEndpoints() {
Object.keys(this.registries).forEach((registryName) => {
if (registryName === 'jspm')
return;
try {
this.getEndpoint(registryName);
}
catch (err) {
if (err && err.code === 'REGISTRY_NOT_FOUND')
this.util.log.warn(err.message.substr(err.message.indexOf('\n')).trim());
else
throw err;
}
});
}
getEndpoint (name) {
let endpointEntry = this.endpoints.get(name);
if (endpointEntry)
return endpointEntry;
// config returned by config get is a new object owned by us
const registry = this.registries[name];
const config = registry.config;
if (config.strictSSL !== 'boolean')
config.strictSSL = this.strictSSL;
config.timeout = this.timeouts.resolve;
config.userInput = this.userInput;
config.offline = this.offline;
config.preferOffline = this.preferOffline;
let EndpointConstructor: RegistryEndpointConstructable;
if (typeof registry.handler === "string") {
try {
EndpointConstructor = require(registry.handler) as RegistryEndpointConstructable;
}
catch (e) {
if (e && e.code === 'MODULE_NOT_FOUND') {
if (e.message && e.message.indexOf(registry.handler) !== -1) {
this.util.log.warn(`Registry module '${registry.handler}' not found loading package ${bold(name)}.
This may be from a previous jspm version and can be removed with ${bold(`jspm config --unset registries.${name}`)}.`);
return;
}
else {
throw new JspmError(`Error loading registry ${bold(name)} from module '${registry.handler}'.`, 'REGISTRY_LOAD_ERROR', e);
}
}
else {
throw e;
}
}
} else {
EndpointConstructor = registry.handler;
}
const endpoint = new EndpointConstructor(this.util, config);
const cache = new Cache(path.resolve(this.cacheDir, 'registry_cache', name));
endpointEntry = { endpoint, cache };
this.endpoints.set(name, endpointEntry);
return endpointEntry;
}
dispose () {
return Promise.all(Array.from(this.endpoints.values()).map(entry => entry.endpoint.dispose ? entry.endpoint.dispose() : undefined));
}
async configure (registryName: string) |
async auth (url: URL, method: string, credentials: Credentials, unauthorizedHeaders?: Record<string, string>): Promise<string> {
for (let [registry, { endpoint }] of this.endpoints.entries()) {
if (!endpoint.auth)
continue;
if (await endpoint.auth(url, method, credentials, unauthorizedHeaders))
return registry;
}
return undefined;
}
async resolve (pkg: PackageName, override: ProcessedPackageConfig | void, edge = false): Promise<{
pkg: ExactPackage,
target: PackageName,
source: string,
override: ProcessedPackageConfig | void,
deprecated: string
}> {
let registry = pkg.registry || this.defaultRegistry;
let { endpoint, cache } = this.getEndpoint(registry);
let resolveRange = new SemverRange(pkg.version || '*');
let lookup: LookupData, resolvedVersion: string, redirect: string, resolved: Resolved;
try {
// loop over any redirects
while (true) {
lookup = await cache.getUnlocked(pkg.name, this.timeouts.resolve);
if (resolveRange.isExact) {
resolvedVersion = resolveRange.version.toString();
lookup = lookup || { versions: {}, meta: {} };
}
else if (lookup && (this.offline || this.preferOffline)) {
if (lookup.redirect) {
redirect = lookup.redirect;
}
else {
let versionList = Object.keys(lookup.versions);
resolvedVersion = resolveRange.bestMatch(versionList, edge);
if (resolvedVersion === undefined && edge === false)
resolvedVersion = resolveRange.bestMatch(versionList, true);
if (resolvedVersion !== undefined)
resolvedVersion = resolvedVersion.toString();
}
}
if (resolvedVersion === undefined && redirect === undefined) {
// no resolution available offline
if (this.offline)
return;
const unlock = await cache.lock(pkg.name, this.timeouts.resolve);
try {
// cache could have been written while we were getting the lock, although don't bother rechecking resolved as small benefit
lookup = await cache.get(pkg.name) || { versions: {}, meta: {} };
const logEnd = this.util.log.taskStart(`Looking up ${this.util.highlight(pkg.name)}`);
let changed;
try {
changed = await endpoint.lookup(pkg.name, resolveRange, lookup);
}
finally {
logEnd();
}
logEnd();
if (changed && hasProperties(lookup.versions))
cache.setUnlock(pkg.name, lookup).catch(() => {});
else
unlock().catch(() => {});
}
catch (e) {
unlock().catch(() => {});
throw e;
}
}
if (lookup.redirect)
redirect = lookup.redirect;
if (redirect) {
var redirects = redirects || [];
redirects.push(redirect);
if (redirects.indexOf(redirect) !== redirects.length - 1)
throw new JspmUserError(`Circular redirect during lookup - ${redirects.join(' -> ')}.`);
// loop while redirecting
let redirectPkg = parsePackageName(redirect);
pkg = redirectPkg;
({ endpoint, cache } = this.getEndpoint(registry = pkg.registry));
}
else {
if (resolvedVersion === undefined) {
const versionList = Object.keys(lookup.versions);
resolvedVersion = resolveRange.bestMatch(versionList, edge);
if (resolvedVersion === undefined && edge === false)
resolvedVersion = resolveRange.bestMatch(versionList, true);
if (resolvedVersion !== undefined)
resolvedVersion = resolvedVersion.toString();
}
// 404
if (!resolvedVersion)
return;
let version = lookup.versions[resolvedVersion];
if ((this.preferOffline || this.offline) && version && version.resolved) {
resolved = version.resolved;
}
else {
if (this.offline)
return;
// this could result in a cache change... but it's local so we don't lock before we start
const logEnd = this.util.log.taskStart(`Resolving ${this.util.highlight(`${pkg.name}@${resolvedVersion}`)}`);
let changed;
try {
changed = await endpoint.resolve(pkg.name, resolvedVersion, lookup);
}
finally {
logEnd();
}
if (changed) {
version = lookup.versions[resolvedVersion];
if (!version)
return;
resolved = <Resolved>version.resolved;
// cache update individual resolve
(async () => {
const unlock = await cache.lock(pkg.name, this.timeouts.resolve);
await cache.set(pkg.name, lookup);
return unlock();
})().catch(() => {});
}
else if (!version) {
return;
}
else {
resolved = <Resolved>version.resolved;
}
if (!resolved)
throw new Error(`jspm registry endpoint for ${bold(registry)} did not properly resolve ${highlight(pkg.name)}.`);
}
break;
}
}
}
catch (e) {
if (redirects)
e.redirects = redirects;
throw e;
}
let resolvedOverride;
if (resolved.override) {
resolvedOverride = processPackageConfig(resolved.override, true, override && override.registry);
if (override)
({ config: override } = overridePackageConfig(resolvedOverride, override));
else
override = resolvedOverride;
}
return {
pkg: <ExactPackage>{
registry,
name: pkg.name,
version: resolved.version || resolvedVersion,
semver: new Semver(resolvedVersion)
},
target: redirects ? <PackageName>{
registry,
name: pkg.name,
version: pkg.version
} : pkg,
source: resolved.source,
override,
deprecated: resolved.deprecated
};
}
async resolveSource (source: string, packagePath: string, projectPath: string): Promise<string> {
if (source.startsWith('link:') || source.startsWith('file:') || source.startsWith('git+file:')) {
let sourceProtocol = source.substr(0, source[0] === 'g' ? 9 : 5);
let sourcePath = path.resolve(source.substr(source[0] === 'g' ? 9 : 5));
// relative file path installs that are not for the top-level project are relative to their package real path
if (packagePath !== process.cwd()) {
if ((isWindows && (source[0] === '/' || source[0] === '\\')) ||
sourcePath[0] === '.' && (sourcePath[1] === '/' || sourcePath[1] === '\\' || (
sourcePath[1] === '.' && (sourcePath[2] === '/' || sourcePath[2] === '\\')))) {
const realPackagePath = await new Promise<string>((resolve, reject) => fs.realpath(packagePath, (err, realpath) => err ? reject(err) : resolve(realpath)));
sourcePath = path.resolve(realPackagePath, sourcePath);
}
}
// if a file: install and it is a directory then it is a link: install
if (source.startsWith('file:')) {
try {
const stats = fs.statSync(sourcePath);
if (stats.isDirectory())
sourceProtocol = 'link:';
}
catch (e) {
if (e && e.code === 'ENOENT')
throw new JspmUserError(`Path ${sourcePath} is not a valid file or directory.`);
throw e;
}
}
sourcePath = path.relative(projectPath, sourcePath) + '/';
if (isWindows)
sourcePath = sourcePath.replace(winSepRegEx, '/');
source = sourceProtocol + sourcePath;
}
if (this.offline)
return source;
return resolveSource(this.util.log, this.fetch, source, this.timeouts.resolve);
}
async verifyInstallDir (dir: string, verifyHash: string, fullVerification: boolean): Promise<number> {
const cachedState = this.verifiedCache[verifyHash];
if (cachedState !== undefined && (cachedState !== VerifyState.HASH_VALID || !fullVerification))
return cachedState;
const installFile = path.resolve(dir, '.jspm');
const jspmJson = await readJSON(installFile);
if (!jspmJson)
return this.verifiedCache[verifyHash] = VerifyState.NOT_INSTALLED;
if (typeof jspmJson.mtime !== 'number' || jspmJson.hash !== verifyHash)
return this.verifiedCache[verifyHash] = VerifyState.INVALID;
// if not doing full verification for perf, stop here
if (!fullVerification)
return this.verifiedCache[verifyHash] = VerifyState.HASH_VALID;
// mtime check (skipping .jspm file)
let failure = false;
await dirWalk(dir, async (filePath, stats) => {
if (filePath === installFile)
return;
if (stats.mtimeMs > jspmJson.mtime) {
failure = true;
return true;
}
});
return this.verifiedCache[verifyHash] = failure ? VerifyState.INVALID : VerifyState.VERIFIED_VALID;
/*
let fileHashes = await Promise.all(fileList.map(getFileHash));
let installedDirHash = sha256(fileHashes.sort().join(''));
// hash match -> update the mtime in the install file so we dont check next time
if (installedDirHash === dirHash) {
await new Promise((resolve, reject) => {
fs.writeFile(installFile, mtime + '\n' + hash + '\n' + dirHash, err => err ? reject(err) : resolve())
});
return true;
}*/
}
// on verification failure, we remove the directory and redownload
// moving to a tmp location can be done during the verificationFailure call, to diff and determine route forward
// if deciding to checkout, "ensureInstall" operation is cancelled by returning true
// build support will be added to build into a newly prefixed folder, with build as a boolean argument
async ensureInstall (source: string, override: ProcessedPackageConfig | void, verificationFailure: (dir: string) => Promise<boolean>, fullVerification: boolean = false): Promise<{
config: ProcessedPackageConfig,
override: ProcessedPackageConfig | void,
dir: string,
hash: string,
changed: boolean
}> {
let sourceHash = sha256(source);
var { config = undefined, hash = undefined }: { config: ProcessedPackageConfig, hash: string }
= await this.cache.getUnlocked(sourceHash, this.timeouts.download) || {};
if (config) {
config = processPackageConfig(<any>config);
if (override) {
({ config, override } = overridePackageConfig(config, override));
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
}
convertCJSConfig(config);
var dir = path.join(this.cacheDir, 'packages', hash);
const verifyState = await this.verifyInstallDir(dir, hash, fullVerification);
if (verifyState > VerifyState.INVALID)
return { config, override, dir, hash, changed: false };
else if (verifyState !== VerifyState.NOT_INSTALLED && await verificationFailure(dir))
return;
}
if (this.offline)
throw new JspmUserError(`Package is not available for offline install.`);
let unlock = await this.cache.lock(sourceHash, this.timeouts.download);
try {
// could have been a write while we were getting the lock
if (!config) {
var { config = undefined, hash = undefined }: {
config: ProcessedPackageConfig,
hash: string
} = await this.cache.get(sourceHash) || {};
if (config) {
config = processPackageConfig(<any>config);
if (override) {
({ config, override } = overridePackageConfig(config, override));
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
}
convertCJSConfig(config);
var dir = path.join(this.cacheDir, 'packages', hash);
const verifyState = await this.verifyInstallDir(dir, hash, fullVerification);
if (verifyState > VerifyState.INVALID)
return { config, override, dir, hash, changed: false };
else if (verifyState !== VerifyState.NOT_INSTALLED && await verificationFailure(dir))
return;
}
}
// if we dont know the config then we dont know the canonical override (and hence hash)
// so we download to a temporary folder first
if (!config)
dir = path.join(this.cacheDir, 'tmp', sha256(Math.random().toString()));
await new Promise((resolve, reject) => rimraf(dir, err => err ? reject(err) : resolve()));
await new Promise((resolve, reject) => mkdirp(dir, err => err ? reject(err) : resolve()));
if (this.offline)
throw new JspmUserError(`Source ${source} is not available offline.`);
// if source is linked, can return the linked dir directly
await downloadSource(this.util.log, this.fetch, source, dir, this.timeouts.download);
const logEnd = this.util.log.taskStart('Finalizing ' + highlight(source));
try {
let pjsonPath = path.resolve(dir, 'package.json');
let { json: pjson, style } = await readJSONStyled(pjsonPath);
if (!pjson)
pjson = {};
if (!config) {
let pjsonConfig = processPackageConfig(pjson);
const serializedConfig = serializePackageConfig(pjsonConfig, this.defaultRegistry);
if (override)
({ config, override } = overridePackageConfig(pjsonConfig, override));
else
config = pjsonConfig;
convertCJSConfig(config);
hash = sourceHash + (override ? md5(JSON.stringify(override)) : '');
await Promise.all([
this.cache.set(sourceHash, { config: serializedConfig, hash }),
// move the tmp folder to the known hash now
(async () => {
const toDir = path.join(this.cacheDir, 'packages', hash);
await new Promise((resolve, reject) => rimraf(toDir, err => err ? reject(err) : resolve()));
await new Promise((resolve, reject) => {
fs.rename(dir, dir = toDir, err => err ? reject(err) : resolve());
});
})()
]);
pjsonPath = path.resolve(dir, 'package.json');
}
await writeJSONStyled(pjsonPath, Object.assign(pjson, serializePackageConfig(config)), style || defaultStyle);
await runBinaryBuild(this.util.log, dir, pjson.name, pjson.scripts);
// run package conversion
// (on any subfolder containing a "type": "commonjs")
await convertCJSPackage(this.util.log, dir, config.name, config, this.defaultRegistry);
var mtime = await new Promise((resolve, reject) => fs.stat(pjsonPath, (err, stats) => err ? reject(err) : resolve(stats.mtimeMs)));
// todo: diffs for invalid?
// const fileHashes = await calculateFileHashes(dir);
// will be useful for avoiding mistaken mtime bumps when viewing
await new Promise((resolve, reject) => {
fs.writeFile(path.join(dir, '.jspm'), JSON.stringify({ mtime, hash }), err => err ? reject(err) : resolve())
});
this.verifiedCache[hash] = VerifyState.VERIFIED_VALID;
return { config, override, dir, hash, changed: true };
}
finally {
logEnd();
}
}
finally {
unlock();
}
}
async publish (packagePath: string, registry: string, pjson: any, tarStream: Readable, opts: PublishOptions) {
const { endpoint } = this.getEndpoint(registry);
if (!endpoint.publish)
throw new JspmUserError(`Registry ${highlight(pjson.registry)} does not support publishing.`);
const logEnd = this.util.log.taskStart(`Publishing ${this.util.highlight(`${registry}:${pjson.name}@${pjson.version}`)}`);
try {
await endpoint.publish(packagePath, pjson, tarStream, opts);
}
finally {
logEnd();
}
}
}
function dirWalk (dir: string, visit: (filePath: string, stats, files?: string[]) => void | boolean | Promise<void | boolean>) {
return new Promise((resolve, reject) => {
let errored = false;
let cnt = 0;
visitFileOrDir(path.resolve(dir));
function handleError (err) {
if (!errored) {
errored = true;
reject(err);
}
}
function visitFileOrDir (fileOrDir) {
cnt++;
fs.stat(fileOrDir, async (err, stats) => {
if (err || errored)
return handleError(err);
try {
if (await visit(fileOrDir, stats))
return resolve();
}
catch (err) {
return handleError(err);
}
if (stats.isDirectory()) {
fs.readdir(fileOrDir, (err, paths) => {
if (err || errored)
return handleError(err);
cnt--;
if (paths.length === 0 && !errored && cnt === 0)
return resolve();
paths.forEach(fileOrDirPath => visitFileOrDir(path.resolve(fileOrDir, fileOrDirPath)));
});
}
else if (!errored && --cnt === 0) {
resolve();
}
});
}
});
} | {
const { endpoint } = this.getEndpoint(registryName);
if (!endpoint.configure)
throw new JspmUserError(`The ${registryName} registry doesn't have any configuration hook.`);
await endpoint.configure();
} | identifier_body |
client.go | package main
import (
"encoding/json"
"fmt"
"net/url"
"os"
"sync"
"sync/atomic"
"time"
"github.com/gorilla/websocket"
"github.com/xing/beetle/consul"
)
// ClientOptions consist of the id by which the client identifies itself with
// the server, the overall configuration and pointer to a ConsulClient.
type ClientOptions struct {
Id string
Config *Config
ConsulClient *consul.Client
}
// RedisSystem holds the switch protocol state for each system name.
type RedisSystem struct {
system string
currentMaster *RedisShim
currentToken string
client *ClientState
}
// ClientState holds the client state.
type ClientState struct {
opts *ClientOptions
mutex sync.Mutex
ws *websocket.Conn
input chan MsgBody
writerDone chan struct{}
readerDone chan struct{}
configChanges chan consul.Env
redisSystems map[string]*RedisSystem
}
// GetConfig returns the client configuration in a thread safe way.
func (s *ClientState) GetConfig() *Config {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.opts.Config
}
// GetConfig returns the client configuration in a thread safe way.
func (s *RedisSystem) GetConfig() *Config {
return s.client.GetConfig()
}
// SetConfig sets replaces the current config with a new one in athread safe way
// and returns the old config.
func (s *ClientState) SetConfig(config *Config) *Config {
s.mutex.Lock()
defer s.mutex.Unlock()
oldconfig := s.opts.Config
s.opts.Config = config
return oldconfig
}
// ServerUrl constructs the webesocker URL to contact the server.
func (s *ClientState) ServerUrl() string {
config := s.GetConfig()
addr := fmt.Sprintf("%s:%d", config.Server, config.Port)
u := url.URL{Scheme: "ws", Host: addr, Path: "/configuration"}
return u.String()
}
// Connect establishes a webscket connection to the server.
func (s *ClientState) Connect() (err error) {
url := s.ServerUrl()
// copy default dialer to avoid race conditions
dialer := *websocket.DefaultDialer
dialer.HandshakeTimeout = time.Duration(s.opts.Config.DialTimeout) * time.Second
logInfo("connecting to %s, timeout: %s", url, dialer.HandshakeTimeout)
s.ws, _, err = dialer.Dial(url, nil)
if err != nil {
logError("could not establish web socket connection")
return
}
logInfo("established web socket connection")
return
}
// Close sends a Close message to the server and closed the connection.
func (s *ClientState) Close() {
defer s.ws.Close()
s.ws.SetWriteDeadline(time.Now().Add(WEBSOCKET_CLOSE_TIMEOUT))
err := s.ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
logError("writing websocket close failed: %s", err)
}
}
// Send a message to the server.
func (s *ClientState) send(msg MsgBody) error {
b, err := json.Marshal(msg)
if err != nil {
logError("could not marshal message: %s", err)
return err
}
logDebug("sending message")
s.ws.SetWriteDeadline(time.Now().Add(WEBSOCKET_WRITE_TIMEOUT))
err = s.ws.WriteMessage(websocket.TextMessage, b)
if err != nil {
logError("could not send message: %s", err)
return err
}
logDebug("sent: %s", string(b))
return nil
}
// SendHeartBeat sends a heartbeat message to the server.
func (s *ClientState) SendHeartBeat() error {
return s.send(MsgBody{Name: HEARTBEAT, Id: s.opts.Id})
}
// Ping sends a PING message to the server.
func (s *ClientState) Ping(pingMsg MsgBody) error {
logInfo("Received ping message")
rs := s.RegisterSystem(pingMsg.System)
if rs.RedeemToken(pingMsg.Token) {
return s.SendPong(rs)
}
return nil
}
// RedeemToken checks the validity of the given token.
func (s *RedisSystem) RedeemToken(token string) bool {
if s.currentToken == "" || token > s.currentToken {
s.currentToken = token
}
tokenValid := token >= s.currentToken
if !tokenValid {
logInfo("invalid token: %s is not greater or equal to %s", token, s.currentToken)
}
return tokenValid
}
// SendPong sends a PONG message to the server.
func (s *ClientState) SendPong(rs *RedisSystem) error {
return s.send(MsgBody{System: rs.system, Name: PONG, Id: s.opts.Id, Token: rs.currentToken})
}
// SendClientInvalidated sends a CLIENT_INVALIDATED message to the server.
func (s *ClientState) SendClientInvalidated(rs *RedisSystem) error {
return s.send(MsgBody{System: rs.system, Name: CLIENT_INVALIDATED, Id: s.opts.Id, Token: rs.currentToken})
}
// SendClientStarted sends a CLIENT_STARTED message to the server.
func (s *ClientState) SendClientStarted() error {
return s.send(MsgBody{Name: CLIENT_STARTED, Id: s.opts.Id})
}
// NewMaster modifies the client state by setting the current master to a new
// one.
func (s *RedisSystem) NewMaster(server string) {
logInfo("setting new master: %s", server)
s.currentMaster = NewRedisShim(server)
}
// UpdateMasterFile writes the known masters information to the redis master file.
func (s *ClientState) UpdateMasterFile() {
path := s.GetConfig().RedisMasterFile
systems := make(map[string]string, 0)
for system, rs := range s.redisSystems {
if rs.currentMaster == nil {
systems[system] = ""
} else {
systems[system] = rs.currentMaster.server
}
}
content := MarshalMasterFileContent(systems)
WriteRedisMasterFile(path, content)
}
// DetermineInitialMasters tries to read the current masters from disk
// and establish the system name to redis shim mapping.
func (s *ClientState) | () {
path := s.GetConfig().RedisMasterFile
if !MasterFileExists(path) {
s.UpdateMasterFile()
return
}
masters := RedisMastersFromMasterFile(path)
invalidSystems := make([]string, 0)
for system, server := range masters {
rs := s.RegisterSystem(system)
if server != "" {
rs.NewMaster(server)
}
if rs.currentMaster == nil || !rs.currentMaster.IsMaster() {
invalidSystems = append(invalidSystems, system)
rs.currentMaster = nil
}
}
if len(invalidSystems) > 0 {
logInfo("clearing systems from master file %s", invalidSystems)
s.UpdateMasterFile()
}
}
func (s *ClientState) RegisterSystem(system string) *RedisSystem {
rs := s.redisSystems[system]
if rs == nil {
rs = &RedisSystem{system: system}
s.redisSystems[system] = rs
}
return rs
}
// Invalidate sets the current master for the given system to nil, removes the
// corresponding line from the the redis master file and sends a
// CLIENT_INVALIDATED message to the server, provided the token sent with the
// message is valid.
func (s *ClientState) Invalidate(msg MsgBody) error {
rs := s.RegisterSystem(msg.System)
if rs.RedeemToken(msg.Token) && (rs.currentMaster == nil || rs.currentMaster.Role() != MASTER) {
rs.currentMaster = nil
logInfo("Removing invalidated system '%s' from redis master file", msg.System)
s.UpdateMasterFile()
logInfo("Sending client_invalidated message with id '%s' and token '%s'", s.opts.Id, rs.currentToken)
return s.SendClientInvalidated(rs)
}
return nil
}
// Reconfigure updates the redis mater file on disk, provided the token sent
// with the message is valid.
func (s *ClientState) Reconfigure(msg MsgBody) error {
logInfo("Received reconfigure message with server '%s' and token '%s'", msg.Server, msg.Token)
rs := s.RegisterSystem(msg.System)
if !rs.RedeemToken(msg.Token) {
logInfo("Received invalid or outdated token: '%s'", msg.Token)
}
if rs.currentMaster == nil || rs.currentMaster.server != msg.Server {
rs.NewMaster(msg.Server)
s.UpdateMasterFile()
}
return nil
}
// Reader reads messages from the server and forwards them on an internal
// channel to the Writer, which acts as a message dispatcher. It exits when
// reading results in an error or when the server closes the socket.
func (s *ClientState) Reader() {
defer func() { s.readerDone <- struct{}{} }()
for !interrupted {
select {
case <-s.writerDone:
return
default:
}
logDebug("reading message")
s.ws.SetReadDeadline(time.Now().Add(WEBSOCKET_READ_TIMEOUT))
msgType, bytes, err := s.ws.ReadMessage()
atomic.AddInt64(&processed, 1)
if err != nil || msgType != websocket.TextMessage {
logError("error reading from server socket: %s", err)
return
}
logDebug("received: %s", string(bytes))
var body MsgBody
err = json.Unmarshal(bytes, &body)
if err != nil {
logError("reader: could not parse msg: %s", err)
return
}
s.input <- body
}
}
// Writer reads messages from an internal channel and dispatches them. It
// periodically sends a HEARTBEAT message to the server. It if receives a config
// change message, it replaces the current config with the new one. If the
// config change implies that the server URL has changed, it exits, relying on
// the outer loop to restart the client.
func (s *ClientState) Writer() {
ticker := time.NewTicker(1 * time.Second)
defer s.Close()
defer ticker.Stop()
defer func() { s.writerDone <- struct{}{} }()
i := 0
var err error
for !interrupted {
select {
case msg := <-s.input:
err = s.Dispatch(msg)
case <-ticker.C:
i = (i + 1) % s.GetConfig().ClientHeartbeat
if i == 0 {
err = s.SendHeartBeat()
}
case <-s.readerDone:
return
case env := <-s.configChanges:
if env != nil {
newconfig := buildConfig(env)
oldconfig := s.SetConfig(newconfig)
logInfo("updated server config from consul: %s", s.GetConfig())
if newconfig.RedisMasterFile != oldconfig.RedisMasterFile {
if err := os.Rename(oldconfig.RedisMasterFile, newconfig.RedisMasterFile); err != nil {
logError("could not rename redis master file to: %s", newconfig.RedisMasterFile)
}
}
if newconfig.ServerUrl() != oldconfig.ServerUrl() {
logInfo("restarting client because server url has changed: %s", newconfig.ServerUrl())
return
}
}
}
if err != nil {
return
}
}
}
// Dispatch dispatches matches rceived from the server to appropriate methods.
func (s *ClientState) Dispatch(msg MsgBody) error {
logDebug("dispatcher received: %+v", msg)
switch msg.Name {
case RECONFIGURE:
return s.Reconfigure(msg)
case PING:
return s.Ping(msg)
case INVALIDATE:
return s.Invalidate(msg)
default:
logError("unexpected message: %s", msg.Name)
}
return nil
}
// Run establishes a websocket connection to the server, starts reader and
// writer routines and a consul watcher for config changes. It exits when the
// writer exits.
func (s *ClientState) Run() error {
s.DetermineInitialMasters()
defer s.closeRedisConnections()
if err := s.Connect(); err != nil {
return err
}
if err := VerifyMasterFileString(s.GetConfig().RedisMasterFile); err != nil {
return err
}
if err := s.SendClientStarted(); err != nil {
return err
}
if s.opts.ConsulClient != nil {
var err error
s.configChanges, err = s.opts.ConsulClient.WatchConfig()
if err != nil {
return err
}
} else {
s.configChanges = make(chan consul.Env)
}
go s.Reader()
s.Writer()
return nil
}
func (s *ClientState) closeRedisConnections() {
for _, rs := range s.redisSystems {
if rs.currentMaster != nil {
rs.currentMaster.Close()
}
}
}
// RunConfigurationClient keeps a client running until the process receives an
// INT or a TERM signal.
func RunConfigurationClient(o ClientOptions) error {
logInfo("client started with options: %+v\n", o)
for !interrupted {
state := &ClientState{
opts: &o,
readerDone: make(chan struct{}, 1),
writerDone: make(chan struct{}, 1),
redisSystems: make(map[string]*RedisSystem, 0),
}
state.input = make(chan MsgBody, 1000)
err := state.Run()
if err != nil {
logError("client exited prematurely: %s", err)
if !interrupted {
// TODO: exponential backoff with jitter.
time.Sleep(1 * time.Second)
}
}
}
logInfo("client terminated")
return nil
}
| DetermineInitialMasters | identifier_name |
client.go | package main
import (
"encoding/json"
"fmt"
"net/url"
"os"
"sync"
"sync/atomic"
"time"
"github.com/gorilla/websocket"
"github.com/xing/beetle/consul"
)
// ClientOptions consist of the id by which the client identifies itself with
// the server, the overall configuration and pointer to a ConsulClient.
type ClientOptions struct {
Id string
Config *Config
ConsulClient *consul.Client
}
// RedisSystem holds the switch protocol state for each system name.
type RedisSystem struct {
system string
currentMaster *RedisShim
currentToken string
client *ClientState
}
// ClientState holds the client state.
type ClientState struct {
opts *ClientOptions
mutex sync.Mutex
ws *websocket.Conn
input chan MsgBody
writerDone chan struct{}
readerDone chan struct{}
configChanges chan consul.Env
redisSystems map[string]*RedisSystem
}
// GetConfig returns the client configuration in a thread safe way.
func (s *ClientState) GetConfig() *Config {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.opts.Config
}
// GetConfig returns the client configuration in a thread safe way.
func (s *RedisSystem) GetConfig() *Config {
return s.client.GetConfig()
}
// SetConfig sets replaces the current config with a new one in athread safe way
// and returns the old config.
func (s *ClientState) SetConfig(config *Config) *Config {
s.mutex.Lock()
defer s.mutex.Unlock()
oldconfig := s.opts.Config
s.opts.Config = config
return oldconfig
}
// ServerUrl constructs the webesocker URL to contact the server.
func (s *ClientState) ServerUrl() string {
config := s.GetConfig()
addr := fmt.Sprintf("%s:%d", config.Server, config.Port)
u := url.URL{Scheme: "ws", Host: addr, Path: "/configuration"}
return u.String()
}
// Connect establishes a webscket connection to the server.
func (s *ClientState) Connect() (err error) {
url := s.ServerUrl()
// copy default dialer to avoid race conditions
dialer := *websocket.DefaultDialer
dialer.HandshakeTimeout = time.Duration(s.opts.Config.DialTimeout) * time.Second
logInfo("connecting to %s, timeout: %s", url, dialer.HandshakeTimeout)
s.ws, _, err = dialer.Dial(url, nil)
if err != nil {
logError("could not establish web socket connection")
return
}
logInfo("established web socket connection")
return
}
// Close sends a Close message to the server and closed the connection.
func (s *ClientState) Close() {
defer s.ws.Close()
s.ws.SetWriteDeadline(time.Now().Add(WEBSOCKET_CLOSE_TIMEOUT))
err := s.ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
logError("writing websocket close failed: %s", err)
}
}
// Send a message to the server.
func (s *ClientState) send(msg MsgBody) error {
b, err := json.Marshal(msg)
if err != nil {
logError("could not marshal message: %s", err)
return err
}
logDebug("sending message")
s.ws.SetWriteDeadline(time.Now().Add(WEBSOCKET_WRITE_TIMEOUT))
err = s.ws.WriteMessage(websocket.TextMessage, b)
if err != nil {
logError("could not send message: %s", err)
return err
}
logDebug("sent: %s", string(b))
return nil
}
// SendHeartBeat sends a heartbeat message to the server.
func (s *ClientState) SendHeartBeat() error {
return s.send(MsgBody{Name: HEARTBEAT, Id: s.opts.Id})
}
// Ping sends a PING message to the server.
func (s *ClientState) Ping(pingMsg MsgBody) error {
logInfo("Received ping message")
rs := s.RegisterSystem(pingMsg.System)
if rs.RedeemToken(pingMsg.Token) |
return nil
}
// RedeemToken checks the validity of the given token.
func (s *RedisSystem) RedeemToken(token string) bool {
if s.currentToken == "" || token > s.currentToken {
s.currentToken = token
}
tokenValid := token >= s.currentToken
if !tokenValid {
logInfo("invalid token: %s is not greater or equal to %s", token, s.currentToken)
}
return tokenValid
}
// SendPong sends a PONG message to the server.
func (s *ClientState) SendPong(rs *RedisSystem) error {
return s.send(MsgBody{System: rs.system, Name: PONG, Id: s.opts.Id, Token: rs.currentToken})
}
// SendClientInvalidated sends a CLIENT_INVALIDATED message to the server.
func (s *ClientState) SendClientInvalidated(rs *RedisSystem) error {
return s.send(MsgBody{System: rs.system, Name: CLIENT_INVALIDATED, Id: s.opts.Id, Token: rs.currentToken})
}
// SendClientStarted sends a CLIENT_STARTED message to the server.
func (s *ClientState) SendClientStarted() error {
return s.send(MsgBody{Name: CLIENT_STARTED, Id: s.opts.Id})
}
// NewMaster modifies the client state by setting the current master to a new
// one.
func (s *RedisSystem) NewMaster(server string) {
logInfo("setting new master: %s", server)
s.currentMaster = NewRedisShim(server)
}
// UpdateMasterFile writes the known masters information to the redis master file.
func (s *ClientState) UpdateMasterFile() {
path := s.GetConfig().RedisMasterFile
systems := make(map[string]string, 0)
for system, rs := range s.redisSystems {
if rs.currentMaster == nil {
systems[system] = ""
} else {
systems[system] = rs.currentMaster.server
}
}
content := MarshalMasterFileContent(systems)
WriteRedisMasterFile(path, content)
}
// DetermineInitialMasters tries to read the current masters from disk
// and establish the system name to redis shim mapping.
func (s *ClientState) DetermineInitialMasters() {
path := s.GetConfig().RedisMasterFile
if !MasterFileExists(path) {
s.UpdateMasterFile()
return
}
masters := RedisMastersFromMasterFile(path)
invalidSystems := make([]string, 0)
for system, server := range masters {
rs := s.RegisterSystem(system)
if server != "" {
rs.NewMaster(server)
}
if rs.currentMaster == nil || !rs.currentMaster.IsMaster() {
invalidSystems = append(invalidSystems, system)
rs.currentMaster = nil
}
}
if len(invalidSystems) > 0 {
logInfo("clearing systems from master file %s", invalidSystems)
s.UpdateMasterFile()
}
}
func (s *ClientState) RegisterSystem(system string) *RedisSystem {
rs := s.redisSystems[system]
if rs == nil {
rs = &RedisSystem{system: system}
s.redisSystems[system] = rs
}
return rs
}
// Invalidate sets the current master for the given system to nil, removes the
// corresponding line from the the redis master file and sends a
// CLIENT_INVALIDATED message to the server, provided the token sent with the
// message is valid.
func (s *ClientState) Invalidate(msg MsgBody) error {
rs := s.RegisterSystem(msg.System)
if rs.RedeemToken(msg.Token) && (rs.currentMaster == nil || rs.currentMaster.Role() != MASTER) {
rs.currentMaster = nil
logInfo("Removing invalidated system '%s' from redis master file", msg.System)
s.UpdateMasterFile()
logInfo("Sending client_invalidated message with id '%s' and token '%s'", s.opts.Id, rs.currentToken)
return s.SendClientInvalidated(rs)
}
return nil
}
// Reconfigure updates the redis mater file on disk, provided the token sent
// with the message is valid.
func (s *ClientState) Reconfigure(msg MsgBody) error {
logInfo("Received reconfigure message with server '%s' and token '%s'", msg.Server, msg.Token)
rs := s.RegisterSystem(msg.System)
if !rs.RedeemToken(msg.Token) {
logInfo("Received invalid or outdated token: '%s'", msg.Token)
}
if rs.currentMaster == nil || rs.currentMaster.server != msg.Server {
rs.NewMaster(msg.Server)
s.UpdateMasterFile()
}
return nil
}
// Reader reads messages from the server and forwards them on an internal
// channel to the Writer, which acts as a message dispatcher. It exits when
// reading results in an error or when the server closes the socket.
func (s *ClientState) Reader() {
defer func() { s.readerDone <- struct{}{} }()
for !interrupted {
select {
case <-s.writerDone:
return
default:
}
logDebug("reading message")
s.ws.SetReadDeadline(time.Now().Add(WEBSOCKET_READ_TIMEOUT))
msgType, bytes, err := s.ws.ReadMessage()
atomic.AddInt64(&processed, 1)
if err != nil || msgType != websocket.TextMessage {
logError("error reading from server socket: %s", err)
return
}
logDebug("received: %s", string(bytes))
var body MsgBody
err = json.Unmarshal(bytes, &body)
if err != nil {
logError("reader: could not parse msg: %s", err)
return
}
s.input <- body
}
}
// Writer reads messages from an internal channel and dispatches them. It
// periodically sends a HEARTBEAT message to the server. It if receives a config
// change message, it replaces the current config with the new one. If the
// config change implies that the server URL has changed, it exits, relying on
// the outer loop to restart the client.
func (s *ClientState) Writer() {
ticker := time.NewTicker(1 * time.Second)
defer s.Close()
defer ticker.Stop()
defer func() { s.writerDone <- struct{}{} }()
i := 0
var err error
for !interrupted {
select {
case msg := <-s.input:
err = s.Dispatch(msg)
case <-ticker.C:
i = (i + 1) % s.GetConfig().ClientHeartbeat
if i == 0 {
err = s.SendHeartBeat()
}
case <-s.readerDone:
return
case env := <-s.configChanges:
if env != nil {
newconfig := buildConfig(env)
oldconfig := s.SetConfig(newconfig)
logInfo("updated server config from consul: %s", s.GetConfig())
if newconfig.RedisMasterFile != oldconfig.RedisMasterFile {
if err := os.Rename(oldconfig.RedisMasterFile, newconfig.RedisMasterFile); err != nil {
logError("could not rename redis master file to: %s", newconfig.RedisMasterFile)
}
}
if newconfig.ServerUrl() != oldconfig.ServerUrl() {
logInfo("restarting client because server url has changed: %s", newconfig.ServerUrl())
return
}
}
}
if err != nil {
return
}
}
}
// Dispatch dispatches matches rceived from the server to appropriate methods.
func (s *ClientState) Dispatch(msg MsgBody) error {
logDebug("dispatcher received: %+v", msg)
switch msg.Name {
case RECONFIGURE:
return s.Reconfigure(msg)
case PING:
return s.Ping(msg)
case INVALIDATE:
return s.Invalidate(msg)
default:
logError("unexpected message: %s", msg.Name)
}
return nil
}
// Run establishes a websocket connection to the server, starts reader and
// writer routines and a consul watcher for config changes. It exits when the
// writer exits.
func (s *ClientState) Run() error {
s.DetermineInitialMasters()
defer s.closeRedisConnections()
if err := s.Connect(); err != nil {
return err
}
if err := VerifyMasterFileString(s.GetConfig().RedisMasterFile); err != nil {
return err
}
if err := s.SendClientStarted(); err != nil {
return err
}
if s.opts.ConsulClient != nil {
var err error
s.configChanges, err = s.opts.ConsulClient.WatchConfig()
if err != nil {
return err
}
} else {
s.configChanges = make(chan consul.Env)
}
go s.Reader()
s.Writer()
return nil
}
func (s *ClientState) closeRedisConnections() {
for _, rs := range s.redisSystems {
if rs.currentMaster != nil {
rs.currentMaster.Close()
}
}
}
// RunConfigurationClient keeps a client running until the process receives an
// INT or a TERM signal.
func RunConfigurationClient(o ClientOptions) error {
logInfo("client started with options: %+v\n", o)
for !interrupted {
state := &ClientState{
opts: &o,
readerDone: make(chan struct{}, 1),
writerDone: make(chan struct{}, 1),
redisSystems: make(map[string]*RedisSystem, 0),
}
state.input = make(chan MsgBody, 1000)
err := state.Run()
if err != nil {
logError("client exited prematurely: %s", err)
if !interrupted {
// TODO: exponential backoff with jitter.
time.Sleep(1 * time.Second)
}
}
}
logInfo("client terminated")
return nil
}
| {
return s.SendPong(rs)
} | conditional_block |
client.go | package main
import (
"encoding/json"
"fmt"
"net/url"
"os"
"sync"
"sync/atomic"
"time"
"github.com/gorilla/websocket"
"github.com/xing/beetle/consul"
)
// ClientOptions consist of the id by which the client identifies itself with
// the server, the overall configuration and pointer to a ConsulClient.
type ClientOptions struct {
Id string
Config *Config
ConsulClient *consul.Client
}
// RedisSystem holds the switch protocol state for each system name.
type RedisSystem struct {
system string
currentMaster *RedisShim
currentToken string
client *ClientState
}
// ClientState holds the client state.
type ClientState struct {
opts *ClientOptions
mutex sync.Mutex
ws *websocket.Conn
input chan MsgBody
writerDone chan struct{}
readerDone chan struct{}
configChanges chan consul.Env
redisSystems map[string]*RedisSystem
}
// GetConfig returns the client configuration in a thread safe way.
func (s *ClientState) GetConfig() *Config {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.opts.Config
}
// GetConfig returns the client configuration in a thread safe way.
func (s *RedisSystem) GetConfig() *Config {
return s.client.GetConfig()
}
// SetConfig sets replaces the current config with a new one in athread safe way
// and returns the old config.
func (s *ClientState) SetConfig(config *Config) *Config {
s.mutex.Lock()
defer s.mutex.Unlock()
oldconfig := s.opts.Config
s.opts.Config = config
return oldconfig
}
// ServerUrl constructs the webesocker URL to contact the server.
func (s *ClientState) ServerUrl() string {
config := s.GetConfig()
addr := fmt.Sprintf("%s:%d", config.Server, config.Port)
u := url.URL{Scheme: "ws", Host: addr, Path: "/configuration"}
return u.String()
}
// Connect establishes a webscket connection to the server.
func (s *ClientState) Connect() (err error) {
url := s.ServerUrl()
// copy default dialer to avoid race conditions
dialer := *websocket.DefaultDialer
dialer.HandshakeTimeout = time.Duration(s.opts.Config.DialTimeout) * time.Second
logInfo("connecting to %s, timeout: %s", url, dialer.HandshakeTimeout)
s.ws, _, err = dialer.Dial(url, nil)
if err != nil {
logError("could not establish web socket connection")
return
}
logInfo("established web socket connection")
return
}
// Close sends a Close message to the server and closed the connection.
func (s *ClientState) Close() {
defer s.ws.Close()
s.ws.SetWriteDeadline(time.Now().Add(WEBSOCKET_CLOSE_TIMEOUT))
err := s.ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
logError("writing websocket close failed: %s", err)
}
}
// Send a message to the server.
func (s *ClientState) send(msg MsgBody) error {
b, err := json.Marshal(msg)
if err != nil {
logError("could not marshal message: %s", err)
return err
}
logDebug("sending message")
s.ws.SetWriteDeadline(time.Now().Add(WEBSOCKET_WRITE_TIMEOUT))
err = s.ws.WriteMessage(websocket.TextMessage, b)
if err != nil {
logError("could not send message: %s", err)
return err
}
logDebug("sent: %s", string(b))
return nil
}
// SendHeartBeat sends a heartbeat message to the server.
func (s *ClientState) SendHeartBeat() error {
return s.send(MsgBody{Name: HEARTBEAT, Id: s.opts.Id})
}
// Ping sends a PING message to the server.
func (s *ClientState) Ping(pingMsg MsgBody) error {
logInfo("Received ping message")
rs := s.RegisterSystem(pingMsg.System)
if rs.RedeemToken(pingMsg.Token) {
return s.SendPong(rs)
}
return nil
}
// RedeemToken checks the validity of the given token.
func (s *RedisSystem) RedeemToken(token string) bool {
if s.currentToken == "" || token > s.currentToken {
s.currentToken = token
}
tokenValid := token >= s.currentToken
if !tokenValid {
logInfo("invalid token: %s is not greater or equal to %s", token, s.currentToken)
}
return tokenValid
}
// SendPong sends a PONG message to the server.
func (s *ClientState) SendPong(rs *RedisSystem) error {
return s.send(MsgBody{System: rs.system, Name: PONG, Id: s.opts.Id, Token: rs.currentToken})
}
// SendClientInvalidated sends a CLIENT_INVALIDATED message to the server.
func (s *ClientState) SendClientInvalidated(rs *RedisSystem) error {
return s.send(MsgBody{System: rs.system, Name: CLIENT_INVALIDATED, Id: s.opts.Id, Token: rs.currentToken})
}
// SendClientStarted sends a CLIENT_STARTED message to the server.
func (s *ClientState) SendClientStarted() error {
return s.send(MsgBody{Name: CLIENT_STARTED, Id: s.opts.Id})
}
// NewMaster modifies the client state by setting the current master to a new
// one.
func (s *RedisSystem) NewMaster(server string) {
logInfo("setting new master: %s", server)
s.currentMaster = NewRedisShim(server)
}
// UpdateMasterFile writes the known masters information to the redis master file.
func (s *ClientState) UpdateMasterFile() {
path := s.GetConfig().RedisMasterFile
systems := make(map[string]string, 0)
for system, rs := range s.redisSystems {
if rs.currentMaster == nil {
systems[system] = ""
} else {
systems[system] = rs.currentMaster.server
}
}
content := MarshalMasterFileContent(systems)
WriteRedisMasterFile(path, content)
}
// DetermineInitialMasters tries to read the current masters from disk
// and establish the system name to redis shim mapping.
func (s *ClientState) DetermineInitialMasters() {
path := s.GetConfig().RedisMasterFile
if !MasterFileExists(path) {
s.UpdateMasterFile()
return
}
masters := RedisMastersFromMasterFile(path)
invalidSystems := make([]string, 0)
for system, server := range masters {
rs := s.RegisterSystem(system)
if server != "" {
rs.NewMaster(server)
}
if rs.currentMaster == nil || !rs.currentMaster.IsMaster() {
invalidSystems = append(invalidSystems, system)
rs.currentMaster = nil
}
}
if len(invalidSystems) > 0 {
logInfo("clearing systems from master file %s", invalidSystems)
s.UpdateMasterFile()
}
}
func (s *ClientState) RegisterSystem(system string) *RedisSystem {
rs := s.redisSystems[system]
if rs == nil {
rs = &RedisSystem{system: system}
s.redisSystems[system] = rs
}
return rs
}
// Invalidate sets the current master for the given system to nil, removes the
// corresponding line from the the redis master file and sends a
// CLIENT_INVALIDATED message to the server, provided the token sent with the
// message is valid.
func (s *ClientState) Invalidate(msg MsgBody) error {
rs := s.RegisterSystem(msg.System)
if rs.RedeemToken(msg.Token) && (rs.currentMaster == nil || rs.currentMaster.Role() != MASTER) {
rs.currentMaster = nil
logInfo("Removing invalidated system '%s' from redis master file", msg.System)
s.UpdateMasterFile()
logInfo("Sending client_invalidated message with id '%s' and token '%s'", s.opts.Id, rs.currentToken)
return s.SendClientInvalidated(rs)
}
return nil
}
// Reconfigure updates the redis mater file on disk, provided the token sent
// with the message is valid.
func (s *ClientState) Reconfigure(msg MsgBody) error {
logInfo("Received reconfigure message with server '%s' and token '%s'", msg.Server, msg.Token)
rs := s.RegisterSystem(msg.System)
if !rs.RedeemToken(msg.Token) {
logInfo("Received invalid or outdated token: '%s'", msg.Token)
}
if rs.currentMaster == nil || rs.currentMaster.server != msg.Server {
rs.NewMaster(msg.Server)
s.UpdateMasterFile()
}
return nil
}
// Reader reads messages from the server and forwards them on an internal
// channel to the Writer, which acts as a message dispatcher. It exits when
// reading results in an error or when the server closes the socket.
func (s *ClientState) Reader() {
defer func() { s.readerDone <- struct{}{} }()
for !interrupted {
select {
case <-s.writerDone:
return
default:
}
logDebug("reading message")
s.ws.SetReadDeadline(time.Now().Add(WEBSOCKET_READ_TIMEOUT))
msgType, bytes, err := s.ws.ReadMessage()
atomic.AddInt64(&processed, 1)
if err != nil || msgType != websocket.TextMessage {
logError("error reading from server socket: %s", err)
return
}
logDebug("received: %s", string(bytes))
var body MsgBody
err = json.Unmarshal(bytes, &body)
if err != nil {
logError("reader: could not parse msg: %s", err)
return
}
s.input <- body
}
}
// Writer reads messages from an internal channel and dispatches them. It
// periodically sends a HEARTBEAT message to the server. It if receives a config
// change message, it replaces the current config with the new one. If the
// config change implies that the server URL has changed, it exits, relying on
// the outer loop to restart the client.
func (s *ClientState) Writer() {
ticker := time.NewTicker(1 * time.Second)
defer s.Close()
defer ticker.Stop()
defer func() { s.writerDone <- struct{}{} }()
i := 0
var err error
for !interrupted {
select {
case msg := <-s.input:
err = s.Dispatch(msg)
case <-ticker.C:
i = (i + 1) % s.GetConfig().ClientHeartbeat
if i == 0 {
err = s.SendHeartBeat()
}
case <-s.readerDone:
return
case env := <-s.configChanges:
if env != nil {
newconfig := buildConfig(env)
oldconfig := s.SetConfig(newconfig)
logInfo("updated server config from consul: %s", s.GetConfig())
if newconfig.RedisMasterFile != oldconfig.RedisMasterFile {
if err := os.Rename(oldconfig.RedisMasterFile, newconfig.RedisMasterFile); err != nil {
logError("could not rename redis master file to: %s", newconfig.RedisMasterFile)
}
}
if newconfig.ServerUrl() != oldconfig.ServerUrl() {
logInfo("restarting client because server url has changed: %s", newconfig.ServerUrl())
return
}
}
}
if err != nil {
return
}
}
}
// Dispatch dispatches matches rceived from the server to appropriate methods.
func (s *ClientState) Dispatch(msg MsgBody) error {
logDebug("dispatcher received: %+v", msg)
switch msg.Name {
case RECONFIGURE:
return s.Reconfigure(msg)
case PING:
return s.Ping(msg)
case INVALIDATE:
return s.Invalidate(msg)
default:
logError("unexpected message: %s", msg.Name)
}
return nil
}
// Run establishes a websocket connection to the server, starts reader and
// writer routines and a consul watcher for config changes. It exits when the
// writer exits.
func (s *ClientState) Run() error {
s.DetermineInitialMasters()
defer s.closeRedisConnections()
if err := s.Connect(); err != nil {
return err
}
if err := VerifyMasterFileString(s.GetConfig().RedisMasterFile); err != nil {
return err
}
if err := s.SendClientStarted(); err != nil {
return err
}
if s.opts.ConsulClient != nil {
var err error
s.configChanges, err = s.opts.ConsulClient.WatchConfig()
if err != nil {
return err
}
} else {
s.configChanges = make(chan consul.Env)
}
go s.Reader()
s.Writer()
return nil
}
func (s *ClientState) closeRedisConnections() {
for _, rs := range s.redisSystems {
if rs.currentMaster != nil {
rs.currentMaster.Close()
}
}
}
// RunConfigurationClient keeps a client running until the process receives an
// INT or a TERM signal.
func RunConfigurationClient(o ClientOptions) error {
logInfo("client started with options: %+v\n", o)
for !interrupted {
state := &ClientState{
opts: &o,
readerDone: make(chan struct{}, 1),
writerDone: make(chan struct{}, 1),
redisSystems: make(map[string]*RedisSystem, 0),
}
state.input = make(chan MsgBody, 1000)
err := state.Run()
if err != nil {
logError("client exited prematurely: %s", err)
if !interrupted {
// TODO: exponential backoff with jitter.
time.Sleep(1 * time.Second)
} | }
logInfo("client terminated")
return nil
} | } | random_line_split |
client.go | package main
import (
"encoding/json"
"fmt"
"net/url"
"os"
"sync"
"sync/atomic"
"time"
"github.com/gorilla/websocket"
"github.com/xing/beetle/consul"
)
// ClientOptions consist of the id by which the client identifies itself with
// the server, the overall configuration and pointer to a ConsulClient.
type ClientOptions struct {
Id string
Config *Config
ConsulClient *consul.Client
}
// RedisSystem holds the switch protocol state for each system name.
type RedisSystem struct {
system string
currentMaster *RedisShim
currentToken string
client *ClientState
}
// ClientState holds the client state.
type ClientState struct {
opts *ClientOptions
mutex sync.Mutex
ws *websocket.Conn
input chan MsgBody
writerDone chan struct{}
readerDone chan struct{}
configChanges chan consul.Env
redisSystems map[string]*RedisSystem
}
// GetConfig returns the client configuration in a thread safe way.
func (s *ClientState) GetConfig() *Config {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.opts.Config
}
// GetConfig returns the client configuration in a thread safe way.
func (s *RedisSystem) GetConfig() *Config {
return s.client.GetConfig()
}
// SetConfig sets replaces the current config with a new one in athread safe way
// and returns the old config.
func (s *ClientState) SetConfig(config *Config) *Config {
s.mutex.Lock()
defer s.mutex.Unlock()
oldconfig := s.opts.Config
s.opts.Config = config
return oldconfig
}
// ServerUrl constructs the webesocker URL to contact the server.
func (s *ClientState) ServerUrl() string {
config := s.GetConfig()
addr := fmt.Sprintf("%s:%d", config.Server, config.Port)
u := url.URL{Scheme: "ws", Host: addr, Path: "/configuration"}
return u.String()
}
// Connect establishes a webscket connection to the server.
func (s *ClientState) Connect() (err error) {
url := s.ServerUrl()
// copy default dialer to avoid race conditions
dialer := *websocket.DefaultDialer
dialer.HandshakeTimeout = time.Duration(s.opts.Config.DialTimeout) * time.Second
logInfo("connecting to %s, timeout: %s", url, dialer.HandshakeTimeout)
s.ws, _, err = dialer.Dial(url, nil)
if err != nil {
logError("could not establish web socket connection")
return
}
logInfo("established web socket connection")
return
}
// Close sends a Close message to the server and closed the connection.
func (s *ClientState) Close() {
defer s.ws.Close()
s.ws.SetWriteDeadline(time.Now().Add(WEBSOCKET_CLOSE_TIMEOUT))
err := s.ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
logError("writing websocket close failed: %s", err)
}
}
// Send a message to the server.
func (s *ClientState) send(msg MsgBody) error {
b, err := json.Marshal(msg)
if err != nil {
logError("could not marshal message: %s", err)
return err
}
logDebug("sending message")
s.ws.SetWriteDeadline(time.Now().Add(WEBSOCKET_WRITE_TIMEOUT))
err = s.ws.WriteMessage(websocket.TextMessage, b)
if err != nil {
logError("could not send message: %s", err)
return err
}
logDebug("sent: %s", string(b))
return nil
}
// SendHeartBeat sends a heartbeat message to the server.
func (s *ClientState) SendHeartBeat() error {
return s.send(MsgBody{Name: HEARTBEAT, Id: s.opts.Id})
}
// Ping sends a PING message to the server.
func (s *ClientState) Ping(pingMsg MsgBody) error {
logInfo("Received ping message")
rs := s.RegisterSystem(pingMsg.System)
if rs.RedeemToken(pingMsg.Token) {
return s.SendPong(rs)
}
return nil
}
// RedeemToken checks the validity of the given token.
func (s *RedisSystem) RedeemToken(token string) bool {
if s.currentToken == "" || token > s.currentToken {
s.currentToken = token
}
tokenValid := token >= s.currentToken
if !tokenValid {
logInfo("invalid token: %s is not greater or equal to %s", token, s.currentToken)
}
return tokenValid
}
// SendPong sends a PONG message to the server.
func (s *ClientState) SendPong(rs *RedisSystem) error {
return s.send(MsgBody{System: rs.system, Name: PONG, Id: s.opts.Id, Token: rs.currentToken})
}
// SendClientInvalidated sends a CLIENT_INVALIDATED message to the server.
func (s *ClientState) SendClientInvalidated(rs *RedisSystem) error {
return s.send(MsgBody{System: rs.system, Name: CLIENT_INVALIDATED, Id: s.opts.Id, Token: rs.currentToken})
}
// SendClientStarted sends a CLIENT_STARTED message to the server.
func (s *ClientState) SendClientStarted() error {
return s.send(MsgBody{Name: CLIENT_STARTED, Id: s.opts.Id})
}
// NewMaster modifies the client state by setting the current master to a new
// one.
func (s *RedisSystem) NewMaster(server string) {
logInfo("setting new master: %s", server)
s.currentMaster = NewRedisShim(server)
}
// UpdateMasterFile writes the known masters information to the redis master file.
func (s *ClientState) UpdateMasterFile() {
path := s.GetConfig().RedisMasterFile
systems := make(map[string]string, 0)
for system, rs := range s.redisSystems {
if rs.currentMaster == nil {
systems[system] = ""
} else {
systems[system] = rs.currentMaster.server
}
}
content := MarshalMasterFileContent(systems)
WriteRedisMasterFile(path, content)
}
// DetermineInitialMasters tries to read the current masters from disk
// and establish the system name to redis shim mapping.
func (s *ClientState) DetermineInitialMasters() {
path := s.GetConfig().RedisMasterFile
if !MasterFileExists(path) {
s.UpdateMasterFile()
return
}
masters := RedisMastersFromMasterFile(path)
invalidSystems := make([]string, 0)
for system, server := range masters {
rs := s.RegisterSystem(system)
if server != "" {
rs.NewMaster(server)
}
if rs.currentMaster == nil || !rs.currentMaster.IsMaster() {
invalidSystems = append(invalidSystems, system)
rs.currentMaster = nil
}
}
if len(invalidSystems) > 0 {
logInfo("clearing systems from master file %s", invalidSystems)
s.UpdateMasterFile()
}
}
func (s *ClientState) RegisterSystem(system string) *RedisSystem |
// Invalidate sets the current master for the given system to nil, removes the
// corresponding line from the the redis master file and sends a
// CLIENT_INVALIDATED message to the server, provided the token sent with the
// message is valid.
func (s *ClientState) Invalidate(msg MsgBody) error {
rs := s.RegisterSystem(msg.System)
if rs.RedeemToken(msg.Token) && (rs.currentMaster == nil || rs.currentMaster.Role() != MASTER) {
rs.currentMaster = nil
logInfo("Removing invalidated system '%s' from redis master file", msg.System)
s.UpdateMasterFile()
logInfo("Sending client_invalidated message with id '%s' and token '%s'", s.opts.Id, rs.currentToken)
return s.SendClientInvalidated(rs)
}
return nil
}
// Reconfigure updates the redis mater file on disk, provided the token sent
// with the message is valid.
func (s *ClientState) Reconfigure(msg MsgBody) error {
logInfo("Received reconfigure message with server '%s' and token '%s'", msg.Server, msg.Token)
rs := s.RegisterSystem(msg.System)
if !rs.RedeemToken(msg.Token) {
logInfo("Received invalid or outdated token: '%s'", msg.Token)
}
if rs.currentMaster == nil || rs.currentMaster.server != msg.Server {
rs.NewMaster(msg.Server)
s.UpdateMasterFile()
}
return nil
}
// Reader reads messages from the server and forwards them on an internal
// channel to the Writer, which acts as a message dispatcher. It exits when
// reading results in an error or when the server closes the socket.
func (s *ClientState) Reader() {
defer func() { s.readerDone <- struct{}{} }()
for !interrupted {
select {
case <-s.writerDone:
return
default:
}
logDebug("reading message")
s.ws.SetReadDeadline(time.Now().Add(WEBSOCKET_READ_TIMEOUT))
msgType, bytes, err := s.ws.ReadMessage()
atomic.AddInt64(&processed, 1)
if err != nil || msgType != websocket.TextMessage {
logError("error reading from server socket: %s", err)
return
}
logDebug("received: %s", string(bytes))
var body MsgBody
err = json.Unmarshal(bytes, &body)
if err != nil {
logError("reader: could not parse msg: %s", err)
return
}
s.input <- body
}
}
// Writer reads messages from an internal channel and dispatches them. It
// periodically sends a HEARTBEAT message to the server. It if receives a config
// change message, it replaces the current config with the new one. If the
// config change implies that the server URL has changed, it exits, relying on
// the outer loop to restart the client.
func (s *ClientState) Writer() {
ticker := time.NewTicker(1 * time.Second)
defer s.Close()
defer ticker.Stop()
defer func() { s.writerDone <- struct{}{} }()
i := 0
var err error
for !interrupted {
select {
case msg := <-s.input:
err = s.Dispatch(msg)
case <-ticker.C:
i = (i + 1) % s.GetConfig().ClientHeartbeat
if i == 0 {
err = s.SendHeartBeat()
}
case <-s.readerDone:
return
case env := <-s.configChanges:
if env != nil {
newconfig := buildConfig(env)
oldconfig := s.SetConfig(newconfig)
logInfo("updated server config from consul: %s", s.GetConfig())
if newconfig.RedisMasterFile != oldconfig.RedisMasterFile {
if err := os.Rename(oldconfig.RedisMasterFile, newconfig.RedisMasterFile); err != nil {
logError("could not rename redis master file to: %s", newconfig.RedisMasterFile)
}
}
if newconfig.ServerUrl() != oldconfig.ServerUrl() {
logInfo("restarting client because server url has changed: %s", newconfig.ServerUrl())
return
}
}
}
if err != nil {
return
}
}
}
// Dispatch dispatches matches rceived from the server to appropriate methods.
func (s *ClientState) Dispatch(msg MsgBody) error {
logDebug("dispatcher received: %+v", msg)
switch msg.Name {
case RECONFIGURE:
return s.Reconfigure(msg)
case PING:
return s.Ping(msg)
case INVALIDATE:
return s.Invalidate(msg)
default:
logError("unexpected message: %s", msg.Name)
}
return nil
}
// Run establishes a websocket connection to the server, starts reader and
// writer routines and a consul watcher for config changes. It exits when the
// writer exits.
func (s *ClientState) Run() error {
s.DetermineInitialMasters()
defer s.closeRedisConnections()
if err := s.Connect(); err != nil {
return err
}
if err := VerifyMasterFileString(s.GetConfig().RedisMasterFile); err != nil {
return err
}
if err := s.SendClientStarted(); err != nil {
return err
}
if s.opts.ConsulClient != nil {
var err error
s.configChanges, err = s.opts.ConsulClient.WatchConfig()
if err != nil {
return err
}
} else {
s.configChanges = make(chan consul.Env)
}
go s.Reader()
s.Writer()
return nil
}
func (s *ClientState) closeRedisConnections() {
for _, rs := range s.redisSystems {
if rs.currentMaster != nil {
rs.currentMaster.Close()
}
}
}
// RunConfigurationClient keeps a client running until the process receives an
// INT or a TERM signal.
func RunConfigurationClient(o ClientOptions) error {
logInfo("client started with options: %+v\n", o)
for !interrupted {
state := &ClientState{
opts: &o,
readerDone: make(chan struct{}, 1),
writerDone: make(chan struct{}, 1),
redisSystems: make(map[string]*RedisSystem, 0),
}
state.input = make(chan MsgBody, 1000)
err := state.Run()
if err != nil {
logError("client exited prematurely: %s", err)
if !interrupted {
// TODO: exponential backoff with jitter.
time.Sleep(1 * time.Second)
}
}
}
logInfo("client terminated")
return nil
}
| {
rs := s.redisSystems[system]
if rs == nil {
rs = &RedisSystem{system: system}
s.redisSystems[system] = rs
}
return rs
} | identifier_body |
indexer.pb.go | // Code generated by protoc-gen-go.
// source: indexer.proto
// DO NOT EDIT!
/*
Package indexer is a generated protocol buffer package.
It is generated from these files:
indexer.proto
It has these top-level messages:
ResolveRequest
ResolveResponse
Matcher
ValuesRequest
ValuesResponse
*/
package indexer
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MatcherType int32
const (
MatcherType_Equal MatcherType = 0
MatcherType_NotEqual MatcherType = 1
MatcherType_RegexMatch MatcherType = 2
MatcherType_RegexNoMatch MatcherType = 3
)
var MatcherType_name = map[int32]string{
0: "Equal",
1: "NotEqual",
2: "RegexMatch",
3: "RegexNoMatch",
}
var MatcherType_value = map[string]int32{
"Equal": 0,
"NotEqual": 1,
"RegexMatch": 2,
"RegexNoMatch": 3,
}
func (x MatcherType) String() string {
return proto.EnumName(MatcherType_name, int32(x))
}
func (MatcherType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type ResolveRequest struct {
Matchers []*Matcher `protobuf:"bytes,1,rep,name=matchers" json:"matchers,omitempty"`
Partition int32 `protobuf:"varint,2,opt,name=partition" json:"partition,omitempty"`
}
func (m *ResolveRequest) Reset() { *m = ResolveRequest{} }
func (m *ResolveRequest) String() string { return proto.CompactTextString(m) }
func (*ResolveRequest) ProtoMessage() {}
func (*ResolveRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *ResolveRequest) GetMatchers() []*Matcher {
if m != nil {
return m.Matchers
}
return nil
}
func (m *ResolveRequest) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
type ResolveResponse struct {
Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"`
}
func (m *ResolveResponse) Reset() { *m = ResolveResponse{} }
func (m *ResolveResponse) String() string { return proto.CompactTextString(m) }
func (*ResolveResponse) ProtoMessage() {}
func (*ResolveResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *ResolveResponse) GetIds() []string {
if m != nil {
return m.Ids
}
return nil
}
type Matcher struct {
Type MatcherType `protobuf:"varint,1,opt,name=type,enum=indexer.MatcherType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *Matcher) Reset() { *m = Matcher{} }
func (m *Matcher) String() string { return proto.CompactTextString(m) }
func (*Matcher) ProtoMessage() {}
func (*Matcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Matcher) GetType() MatcherType {
if m != nil {
return m.Type
}
return MatcherType_Equal
}
func (m *Matcher) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Matcher) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type ValuesRequest struct {
Field string `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"`
Partition int32 `protobuf:"varint,2,opt,name=partition" json:"partition,omitempty"`
}
func (m *ValuesRequest) Reset() { *m = ValuesRequest{} }
func (m *ValuesRequest) String() string { return proto.CompactTextString(m) }
func (*ValuesRequest) ProtoMessage() {}
func (*ValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *ValuesRequest) GetField() string {
if m != nil {
return m.Field
}
return ""
}
func (m *ValuesRequest) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
type ValuesResponse struct {
Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
}
func (m *ValuesResponse) Reset() { *m = ValuesResponse{} }
func (m *ValuesResponse) String() string { return proto.CompactTextString(m) }
func (*ValuesResponse) ProtoMessage() {}
func (*ValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ValuesResponse) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
func init() {
proto.RegisterType((*ResolveRequest)(nil), "indexer.ResolveRequest")
proto.RegisterType((*ResolveResponse)(nil), "indexer.ResolveResponse")
proto.RegisterType((*Matcher)(nil), "indexer.Matcher")
proto.RegisterType((*ValuesRequest)(nil), "indexer.ValuesRequest")
proto.RegisterType((*ValuesResponse)(nil), "indexer.ValuesResponse")
proto.RegisterEnum("indexer.MatcherType", MatcherType_name, MatcherType_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Resolver service
type ResolverClient interface {
Resolve(ctx context.Context, in *ResolveRequest, opts ...grpc.CallOption) (*ResolveResponse, error)
Values(ctx context.Context, in *ValuesRequest, opts ...grpc.CallOption) (*ValuesResponse, error)
}
type resolverClient struct {
cc *grpc.ClientConn
}
func NewResolverClient(cc *grpc.ClientConn) ResolverClient {
return &resolverClient{cc}
}
func (c *resolverClient) Resolve(ctx context.Context, in *ResolveRequest, opts ...grpc.CallOption) (*ResolveResponse, error) {
out := new(ResolveResponse)
err := grpc.Invoke(ctx, "/indexer.Resolver/Resolve", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resolverClient) Values(ctx context.Context, in *ValuesRequest, opts ...grpc.CallOption) (*ValuesResponse, error) {
out := new(ValuesResponse)
err := grpc.Invoke(ctx, "/indexer.Resolver/Values", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Resolver service
type ResolverServer interface {
Resolve(context.Context, *ResolveRequest) (*ResolveResponse, error)
Values(context.Context, *ValuesRequest) (*ValuesResponse, error)
}
func RegisterResolverServer(s *grpc.Server, srv ResolverServer) {
s.RegisterService(&_Resolver_serviceDesc, srv)
}
func _Resolver_Resolve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ResolveRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResolverServer).Resolve(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/indexer.Resolver/Resolve",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResolverServer).Resolve(ctx, req.(*ResolveRequest))
}
return interceptor(ctx, in, info, handler)
}
| in := new(ValuesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResolverServer).Values(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/indexer.Resolver/Values",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResolverServer).Values(ctx, req.(*ValuesRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Resolver_serviceDesc = grpc.ServiceDesc{
ServiceName: "indexer.Resolver",
HandlerType: (*ResolverServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Resolve",
Handler: _Resolver_Resolve_Handler,
},
{
MethodName: "Values",
Handler: _Resolver_Values_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "indexer.proto",
}
func init() { proto.RegisterFile("indexer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 326 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x52, 0x41, 0x4f, 0xf2, 0x40,
0x14, 0x64, 0x29, 0x05, 0xfa, 0x80, 0x7e, 0xcd, 0x0b, 0x81, 0x86, 0x7c, 0x07, 0x52, 0x2f, 0x8d,
0x31, 0x1c, 0xf0, 0x68, 0xe2, 0xc5, 0x98, 0x78, 0x91, 0xc3, 0xc6, 0x78, 0xd2, 0x43, 0x95, 0xa7,
0x36, 0x29, 0xdd, 0xd2, 0x5d, 0x08, 0xfc, 0x02, 0xff, 0xb6, 0x61, 0x77, 0x5b, 0x45, 0x0e, 0xde,
0x66, 0x66, 0x67, 0xe7, 0xed, 0xbc, 0x16, 0x06, 0x69, 0xbe, 0xa4, 0x1d, 0x95, 0xb3, 0xa2, 0x14,
0x4a, 0x60, 0xc7, 0xd2, 0xe8, 0x09, 0x7c, 0x4e, 0x52, 0x64, 0x5b, 0xe2, 0xb4, 0xde, 0x90, 0x54,
0x78, 0x01, 0xdd, 0x55, 0xa2, 0x5e, 0x3f, 0xa8, 0x94, 0x21, 0x9b, 0x3a, 0x71, 0x6f, 0x1e, 0xcc,
0xaa, 0xcb, 0xf7, 0xe6, 0x80, 0xd7, 0x0e, 0xfc, 0x0f, 0x5e, 0x91, 0x94, 0x2a, 0x55, 0xa9, 0xc8,
0xc3, 0xe6, 0x94, 0xc5, 0x2e, 0xff, 0x16, 0xa2, 0x33, 0xf8, 0x57, 0xa7, 0xcb, 0x42, 0xe4, 0x92,
0x30, 0x00, 0x27, 0x5d, 0x9a, 0x64, 0x8f, 0x1f, 0x60, 0xf4, 0x0c, 0x1d, 0x9b, 0x8b, 0x31, 0xb4,
0xd4, 0xbe, 0xa0, 0x90, 0x4d, 0x59, 0xec, 0xcf, 0x87, 0xbf, 0xe7, 0x3e, 0xec, 0x0b, 0xe2, 0xda,
0x81, 0x08, 0xad, 0x3c, 0x59, 0x91, 0x1e, 0xe9, 0x71, 0x8d, 0x71, 0x08, 0xee, 0x36, 0xc9, 0x36,
0x14, 0x3a, 0x5a, 0x34, 0x24, 0xba, 0x81, 0xc1, 0xe3, 0x01, 0xc8, 0xaa, 0xe0, 0x10, 0xdc, 0xb7,
0x94, 0xb2, 0xa5, 0x9e, 0xe2, 0x71, 0x43, 0xfe, 0x28, 0x12, 0x83, 0x5f, 0x85, 0xd8, 0x1e, 0x23,
0x68, 0xeb, 0xfc, 0xaa, 0x8a, 0x65, 0xe7, 0x77, 0xd0, 0xfb, 0xf1, 0x5a, 0xf4, 0xc0, 0xbd, 0x5d,
0x6f, 0x92, 0x2c, 0x68, 0x60, 0x1f, 0xba, 0x0b, 0xa1, 0x0c, 0x63, 0xe8, 0x03, 0x70, 0x7a, 0xa7,
0x9d, 0x36, 0x07, 0x4d, 0x0c, 0xa0, 0xaf, 0xf9, 0x42, 0x18, 0xc5, 0x99, 0x7f, 0x32, 0xe8, 0xda,
0xed, 0x95, 0x78, 0x0d, 0x1d, 0x8b, 0x71, 0x5c, 0xaf, 0xe5, 0xf8, 0xcb, 0x4d, 0xc2, 0xd3, 0x03,
0xf3, 0xd8, 0xa8, 0x81, 0x57, 0xd0, 0x36, 0x05, 0x70, 0x54, 0xbb, 0x8e, 0xd6, 0x32, 0x19, 0x9f,
0xe8, 0xd5, 0xe5, 0x97, 0xb6, 0xfe, 0x69, 0x2e, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x79, 0xff,
0x98, 0x48, 0x45, 0x02, 0x00, 0x00,
} | func _Resolver_Values_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | random_line_split |
indexer.pb.go | // Code generated by protoc-gen-go.
// source: indexer.proto
// DO NOT EDIT!
/*
Package indexer is a generated protocol buffer package.
It is generated from these files:
indexer.proto
It has these top-level messages:
ResolveRequest
ResolveResponse
Matcher
ValuesRequest
ValuesResponse
*/
package indexer
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MatcherType int32
const (
MatcherType_Equal MatcherType = 0
MatcherType_NotEqual MatcherType = 1
MatcherType_RegexMatch MatcherType = 2
MatcherType_RegexNoMatch MatcherType = 3
)
var MatcherType_name = map[int32]string{
0: "Equal",
1: "NotEqual",
2: "RegexMatch",
3: "RegexNoMatch",
}
var MatcherType_value = map[string]int32{
"Equal": 0,
"NotEqual": 1,
"RegexMatch": 2,
"RegexNoMatch": 3,
}
func (x MatcherType) String() string {
return proto.EnumName(MatcherType_name, int32(x))
}
func (MatcherType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type ResolveRequest struct {
Matchers []*Matcher `protobuf:"bytes,1,rep,name=matchers" json:"matchers,omitempty"`
Partition int32 `protobuf:"varint,2,opt,name=partition" json:"partition,omitempty"`
}
func (m *ResolveRequest) Reset() { *m = ResolveRequest{} }
func (m *ResolveRequest) String() string { return proto.CompactTextString(m) }
func (*ResolveRequest) ProtoMessage() {}
func (*ResolveRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *ResolveRequest) GetMatchers() []*Matcher {
if m != nil {
return m.Matchers
}
return nil
}
func (m *ResolveRequest) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
type ResolveResponse struct {
Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"`
}
func (m *ResolveResponse) Reset() { *m = ResolveResponse{} }
func (m *ResolveResponse) String() string { return proto.CompactTextString(m) }
func (*ResolveResponse) ProtoMessage() {}
func (*ResolveResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *ResolveResponse) GetIds() []string {
if m != nil {
return m.Ids
}
return nil
}
type Matcher struct {
Type MatcherType `protobuf:"varint,1,opt,name=type,enum=indexer.MatcherType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *Matcher) Reset() { *m = Matcher{} }
func (m *Matcher) String() string { return proto.CompactTextString(m) }
func (*Matcher) ProtoMessage() {}
func (*Matcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Matcher) GetType() MatcherType {
if m != nil |
return MatcherType_Equal
}
func (m *Matcher) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Matcher) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type ValuesRequest struct {
Field string `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"`
Partition int32 `protobuf:"varint,2,opt,name=partition" json:"partition,omitempty"`
}
func (m *ValuesRequest) Reset() { *m = ValuesRequest{} }
func (m *ValuesRequest) String() string { return proto.CompactTextString(m) }
func (*ValuesRequest) ProtoMessage() {}
func (*ValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *ValuesRequest) GetField() string {
if m != nil {
return m.Field
}
return ""
}
func (m *ValuesRequest) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
type ValuesResponse struct {
Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
}
func (m *ValuesResponse) Reset() { *m = ValuesResponse{} }
func (m *ValuesResponse) String() string { return proto.CompactTextString(m) }
func (*ValuesResponse) ProtoMessage() {}
func (*ValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ValuesResponse) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
func init() {
proto.RegisterType((*ResolveRequest)(nil), "indexer.ResolveRequest")
proto.RegisterType((*ResolveResponse)(nil), "indexer.ResolveResponse")
proto.RegisterType((*Matcher)(nil), "indexer.Matcher")
proto.RegisterType((*ValuesRequest)(nil), "indexer.ValuesRequest")
proto.RegisterType((*ValuesResponse)(nil), "indexer.ValuesResponse")
proto.RegisterEnum("indexer.MatcherType", MatcherType_name, MatcherType_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Resolver service
type ResolverClient interface {
Resolve(ctx context.Context, in *ResolveRequest, opts ...grpc.CallOption) (*ResolveResponse, error)
Values(ctx context.Context, in *ValuesRequest, opts ...grpc.CallOption) (*ValuesResponse, error)
}
type resolverClient struct {
cc *grpc.ClientConn
}
func NewResolverClient(cc *grpc.ClientConn) ResolverClient {
return &resolverClient{cc}
}
func (c *resolverClient) Resolve(ctx context.Context, in *ResolveRequest, opts ...grpc.CallOption) (*ResolveResponse, error) {
out := new(ResolveResponse)
err := grpc.Invoke(ctx, "/indexer.Resolver/Resolve", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resolverClient) Values(ctx context.Context, in *ValuesRequest, opts ...grpc.CallOption) (*ValuesResponse, error) {
out := new(ValuesResponse)
err := grpc.Invoke(ctx, "/indexer.Resolver/Values", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Resolver service
type ResolverServer interface {
Resolve(context.Context, *ResolveRequest) (*ResolveResponse, error)
Values(context.Context, *ValuesRequest) (*ValuesResponse, error)
}
func RegisterResolverServer(s *grpc.Server, srv ResolverServer) {
s.RegisterService(&_Resolver_serviceDesc, srv)
}
func _Resolver_Resolve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ResolveRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResolverServer).Resolve(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/indexer.Resolver/Resolve",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResolverServer).Resolve(ctx, req.(*ResolveRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Resolver_Values_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ValuesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResolverServer).Values(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/indexer.Resolver/Values",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResolverServer).Values(ctx, req.(*ValuesRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Resolver_serviceDesc = grpc.ServiceDesc{
ServiceName: "indexer.Resolver",
HandlerType: (*ResolverServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Resolve",
Handler: _Resolver_Resolve_Handler,
},
{
MethodName: "Values",
Handler: _Resolver_Values_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "indexer.proto",
}
func init() { proto.RegisterFile("indexer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 326 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x52, 0x41, 0x4f, 0xf2, 0x40,
0x14, 0x64, 0x29, 0x05, 0xfa, 0x80, 0x7e, 0xcd, 0x0b, 0x81, 0x86, 0x7c, 0x07, 0x52, 0x2f, 0x8d,
0x31, 0x1c, 0xf0, 0x68, 0xe2, 0xc5, 0x98, 0x78, 0x91, 0xc3, 0xc6, 0x78, 0xd2, 0x43, 0x95, 0xa7,
0x36, 0x29, 0xdd, 0xd2, 0x5d, 0x08, 0xfc, 0x02, 0xff, 0xb6, 0x61, 0x77, 0x5b, 0x45, 0x0e, 0xde,
0x66, 0x66, 0x67, 0xe7, 0xed, 0xbc, 0x16, 0x06, 0x69, 0xbe, 0xa4, 0x1d, 0x95, 0xb3, 0xa2, 0x14,
0x4a, 0x60, 0xc7, 0xd2, 0xe8, 0x09, 0x7c, 0x4e, 0x52, 0x64, 0x5b, 0xe2, 0xb4, 0xde, 0x90, 0x54,
0x78, 0x01, 0xdd, 0x55, 0xa2, 0x5e, 0x3f, 0xa8, 0x94, 0x21, 0x9b, 0x3a, 0x71, 0x6f, 0x1e, 0xcc,
0xaa, 0xcb, 0xf7, 0xe6, 0x80, 0xd7, 0x0e, 0xfc, 0x0f, 0x5e, 0x91, 0x94, 0x2a, 0x55, 0xa9, 0xc8,
0xc3, 0xe6, 0x94, 0xc5, 0x2e, 0xff, 0x16, 0xa2, 0x33, 0xf8, 0x57, 0xa7, 0xcb, 0x42, 0xe4, 0x92,
0x30, 0x00, 0x27, 0x5d, 0x9a, 0x64, 0x8f, 0x1f, 0x60, 0xf4, 0x0c, 0x1d, 0x9b, 0x8b, 0x31, 0xb4,
0xd4, 0xbe, 0xa0, 0x90, 0x4d, 0x59, 0xec, 0xcf, 0x87, 0xbf, 0xe7, 0x3e, 0xec, 0x0b, 0xe2, 0xda,
0x81, 0x08, 0xad, 0x3c, 0x59, 0x91, 0x1e, 0xe9, 0x71, 0x8d, 0x71, 0x08, 0xee, 0x36, 0xc9, 0x36,
0x14, 0x3a, 0x5a, 0x34, 0x24, 0xba, 0x81, 0xc1, 0xe3, 0x01, 0xc8, 0xaa, 0xe0, 0x10, 0xdc, 0xb7,
0x94, 0xb2, 0xa5, 0x9e, 0xe2, 0x71, 0x43, 0xfe, 0x28, 0x12, 0x83, 0x5f, 0x85, 0xd8, 0x1e, 0x23,
0x68, 0xeb, 0xfc, 0xaa, 0x8a, 0x65, 0xe7, 0x77, 0xd0, 0xfb, 0xf1, 0x5a, 0xf4, 0xc0, 0xbd, 0x5d,
0x6f, 0x92, 0x2c, 0x68, 0x60, 0x1f, 0xba, 0x0b, 0xa1, 0x0c, 0x63, 0xe8, 0x03, 0x70, 0x7a, 0xa7,
0x9d, 0x36, 0x07, 0x4d, 0x0c, 0xa0, 0xaf, 0xf9, 0x42, 0x18, 0xc5, 0x99, 0x7f, 0x32, 0xe8, 0xda,
0xed, 0x95, 0x78, 0x0d, 0x1d, 0x8b, 0x71, 0x5c, 0xaf, 0xe5, 0xf8, 0xcb, 0x4d, 0xc2, 0xd3, 0x03,
0xf3, 0xd8, 0xa8, 0x81, 0x57, 0xd0, 0x36, 0x05, 0x70, 0x54, 0xbb, 0x8e, 0xd6, 0x32, 0x19, 0x9f,
0xe8, 0xd5, 0xe5, 0x97, 0xb6, 0xfe, 0x69, 0x2e, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x79, 0xff,
0x98, 0x48, 0x45, 0x02, 0x00, 0x00,
}
| {
return m.Type
} | conditional_block |
indexer.pb.go | // Code generated by protoc-gen-go.
// source: indexer.proto
// DO NOT EDIT!
/*
Package indexer is a generated protocol buffer package.
It is generated from these files:
indexer.proto
It has these top-level messages:
ResolveRequest
ResolveResponse
Matcher
ValuesRequest
ValuesResponse
*/
package indexer
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MatcherType int32
const (
MatcherType_Equal MatcherType = 0
MatcherType_NotEqual MatcherType = 1
MatcherType_RegexMatch MatcherType = 2
MatcherType_RegexNoMatch MatcherType = 3
)
var MatcherType_name = map[int32]string{
0: "Equal",
1: "NotEqual",
2: "RegexMatch",
3: "RegexNoMatch",
}
var MatcherType_value = map[string]int32{
"Equal": 0,
"NotEqual": 1,
"RegexMatch": 2,
"RegexNoMatch": 3,
}
func (x MatcherType) String() string {
return proto.EnumName(MatcherType_name, int32(x))
}
func (MatcherType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type ResolveRequest struct {
Matchers []*Matcher `protobuf:"bytes,1,rep,name=matchers" json:"matchers,omitempty"`
Partition int32 `protobuf:"varint,2,opt,name=partition" json:"partition,omitempty"`
}
func (m *ResolveRequest) Reset() { *m = ResolveRequest{} }
func (m *ResolveRequest) String() string { return proto.CompactTextString(m) }
func (*ResolveRequest) ProtoMessage() {}
func (*ResolveRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *ResolveRequest) GetMatchers() []*Matcher {
if m != nil {
return m.Matchers
}
return nil
}
func (m *ResolveRequest) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
type ResolveResponse struct {
Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"`
}
func (m *ResolveResponse) Reset() { *m = ResolveResponse{} }
func (m *ResolveResponse) String() string { return proto.CompactTextString(m) }
func (*ResolveResponse) ProtoMessage() {}
func (*ResolveResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *ResolveResponse) GetIds() []string {
if m != nil {
return m.Ids
}
return nil
}
type Matcher struct {
Type MatcherType `protobuf:"varint,1,opt,name=type,enum=indexer.MatcherType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *Matcher) Reset() { *m = Matcher{} }
func (m *Matcher) String() string { return proto.CompactTextString(m) }
func (*Matcher) ProtoMessage() {}
func (*Matcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Matcher) GetType() MatcherType {
if m != nil {
return m.Type
}
return MatcherType_Equal
}
func (m *Matcher) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Matcher) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type ValuesRequest struct {
Field string `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"`
Partition int32 `protobuf:"varint,2,opt,name=partition" json:"partition,omitempty"`
}
func (m *ValuesRequest) Reset() { *m = ValuesRequest{} }
func (m *ValuesRequest) String() string { return proto.CompactTextString(m) }
func (*ValuesRequest) ProtoMessage() {}
func (*ValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *ValuesRequest) GetField() string {
if m != nil {
return m.Field
}
return ""
}
func (m *ValuesRequest) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
type ValuesResponse struct {
Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
}
func (m *ValuesResponse) Reset() { *m = ValuesResponse{} }
func (m *ValuesResponse) String() string { return proto.CompactTextString(m) }
func (*ValuesResponse) ProtoMessage() {}
func (*ValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ValuesResponse) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
func init() {
proto.RegisterType((*ResolveRequest)(nil), "indexer.ResolveRequest")
proto.RegisterType((*ResolveResponse)(nil), "indexer.ResolveResponse")
proto.RegisterType((*Matcher)(nil), "indexer.Matcher")
proto.RegisterType((*ValuesRequest)(nil), "indexer.ValuesRequest")
proto.RegisterType((*ValuesResponse)(nil), "indexer.ValuesResponse")
proto.RegisterEnum("indexer.MatcherType", MatcherType_name, MatcherType_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Resolver service
type ResolverClient interface {
Resolve(ctx context.Context, in *ResolveRequest, opts ...grpc.CallOption) (*ResolveResponse, error)
Values(ctx context.Context, in *ValuesRequest, opts ...grpc.CallOption) (*ValuesResponse, error)
}
type resolverClient struct {
cc *grpc.ClientConn
}
func NewResolverClient(cc *grpc.ClientConn) ResolverClient |
func (c *resolverClient) Resolve(ctx context.Context, in *ResolveRequest, opts ...grpc.CallOption) (*ResolveResponse, error) {
out := new(ResolveResponse)
err := grpc.Invoke(ctx, "/indexer.Resolver/Resolve", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resolverClient) Values(ctx context.Context, in *ValuesRequest, opts ...grpc.CallOption) (*ValuesResponse, error) {
out := new(ValuesResponse)
err := grpc.Invoke(ctx, "/indexer.Resolver/Values", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Resolver service
type ResolverServer interface {
Resolve(context.Context, *ResolveRequest) (*ResolveResponse, error)
Values(context.Context, *ValuesRequest) (*ValuesResponse, error)
}
func RegisterResolverServer(s *grpc.Server, srv ResolverServer) {
s.RegisterService(&_Resolver_serviceDesc, srv)
}
func _Resolver_Resolve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ResolveRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResolverServer).Resolve(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/indexer.Resolver/Resolve",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResolverServer).Resolve(ctx, req.(*ResolveRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Resolver_Values_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ValuesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResolverServer).Values(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/indexer.Resolver/Values",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResolverServer).Values(ctx, req.(*ValuesRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Resolver_serviceDesc = grpc.ServiceDesc{
ServiceName: "indexer.Resolver",
HandlerType: (*ResolverServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Resolve",
Handler: _Resolver_Resolve_Handler,
},
{
MethodName: "Values",
Handler: _Resolver_Values_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "indexer.proto",
}
func init() { proto.RegisterFile("indexer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 326 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x52, 0x41, 0x4f, 0xf2, 0x40,
0x14, 0x64, 0x29, 0x05, 0xfa, 0x80, 0x7e, 0xcd, 0x0b, 0x81, 0x86, 0x7c, 0x07, 0x52, 0x2f, 0x8d,
0x31, 0x1c, 0xf0, 0x68, 0xe2, 0xc5, 0x98, 0x78, 0x91, 0xc3, 0xc6, 0x78, 0xd2, 0x43, 0x95, 0xa7,
0x36, 0x29, 0xdd, 0xd2, 0x5d, 0x08, 0xfc, 0x02, 0xff, 0xb6, 0x61, 0x77, 0x5b, 0x45, 0x0e, 0xde,
0x66, 0x66, 0x67, 0xe7, 0xed, 0xbc, 0x16, 0x06, 0x69, 0xbe, 0xa4, 0x1d, 0x95, 0xb3, 0xa2, 0x14,
0x4a, 0x60, 0xc7, 0xd2, 0xe8, 0x09, 0x7c, 0x4e, 0x52, 0x64, 0x5b, 0xe2, 0xb4, 0xde, 0x90, 0x54,
0x78, 0x01, 0xdd, 0x55, 0xa2, 0x5e, 0x3f, 0xa8, 0x94, 0x21, 0x9b, 0x3a, 0x71, 0x6f, 0x1e, 0xcc,
0xaa, 0xcb, 0xf7, 0xe6, 0x80, 0xd7, 0x0e, 0xfc, 0x0f, 0x5e, 0x91, 0x94, 0x2a, 0x55, 0xa9, 0xc8,
0xc3, 0xe6, 0x94, 0xc5, 0x2e, 0xff, 0x16, 0xa2, 0x33, 0xf8, 0x57, 0xa7, 0xcb, 0x42, 0xe4, 0x92,
0x30, 0x00, 0x27, 0x5d, 0x9a, 0x64, 0x8f, 0x1f, 0x60, 0xf4, 0x0c, 0x1d, 0x9b, 0x8b, 0x31, 0xb4,
0xd4, 0xbe, 0xa0, 0x90, 0x4d, 0x59, 0xec, 0xcf, 0x87, 0xbf, 0xe7, 0x3e, 0xec, 0x0b, 0xe2, 0xda,
0x81, 0x08, 0xad, 0x3c, 0x59, 0x91, 0x1e, 0xe9, 0x71, 0x8d, 0x71, 0x08, 0xee, 0x36, 0xc9, 0x36,
0x14, 0x3a, 0x5a, 0x34, 0x24, 0xba, 0x81, 0xc1, 0xe3, 0x01, 0xc8, 0xaa, 0xe0, 0x10, 0xdc, 0xb7,
0x94, 0xb2, 0xa5, 0x9e, 0xe2, 0x71, 0x43, 0xfe, 0x28, 0x12, 0x83, 0x5f, 0x85, 0xd8, 0x1e, 0x23,
0x68, 0xeb, 0xfc, 0xaa, 0x8a, 0x65, 0xe7, 0x77, 0xd0, 0xfb, 0xf1, 0x5a, 0xf4, 0xc0, 0xbd, 0x5d,
0x6f, 0x92, 0x2c, 0x68, 0x60, 0x1f, 0xba, 0x0b, 0xa1, 0x0c, 0x63, 0xe8, 0x03, 0x70, 0x7a, 0xa7,
0x9d, 0x36, 0x07, 0x4d, 0x0c, 0xa0, 0xaf, 0xf9, 0x42, 0x18, 0xc5, 0x99, 0x7f, 0x32, 0xe8, 0xda,
0xed, 0x95, 0x78, 0x0d, 0x1d, 0x8b, 0x71, 0x5c, 0xaf, 0xe5, 0xf8, 0xcb, 0x4d, 0xc2, 0xd3, 0x03,
0xf3, 0xd8, 0xa8, 0x81, 0x57, 0xd0, 0x36, 0x05, 0x70, 0x54, 0xbb, 0x8e, 0xd6, 0x32, 0x19, 0x9f,
0xe8, 0xd5, 0xe5, 0x97, 0xb6, 0xfe, 0x69, 0x2e, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x79, 0xff,
0x98, 0x48, 0x45, 0x02, 0x00, 0x00,
}
| {
return &resolverClient{cc}
} | identifier_body |
indexer.pb.go | // Code generated by protoc-gen-go.
// source: indexer.proto
// DO NOT EDIT!
/*
Package indexer is a generated protocol buffer package.
It is generated from these files:
indexer.proto
It has these top-level messages:
ResolveRequest
ResolveResponse
Matcher
ValuesRequest
ValuesResponse
*/
package indexer
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MatcherType int32
const (
MatcherType_Equal MatcherType = 0
MatcherType_NotEqual MatcherType = 1
MatcherType_RegexMatch MatcherType = 2
MatcherType_RegexNoMatch MatcherType = 3
)
var MatcherType_name = map[int32]string{
0: "Equal",
1: "NotEqual",
2: "RegexMatch",
3: "RegexNoMatch",
}
var MatcherType_value = map[string]int32{
"Equal": 0,
"NotEqual": 1,
"RegexMatch": 2,
"RegexNoMatch": 3,
}
func (x MatcherType) String() string {
return proto.EnumName(MatcherType_name, int32(x))
}
func (MatcherType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type ResolveRequest struct {
Matchers []*Matcher `protobuf:"bytes,1,rep,name=matchers" json:"matchers,omitempty"`
Partition int32 `protobuf:"varint,2,opt,name=partition" json:"partition,omitempty"`
}
func (m *ResolveRequest) Reset() { *m = ResolveRequest{} }
func (m *ResolveRequest) String() string { return proto.CompactTextString(m) }
func (*ResolveRequest) ProtoMessage() {}
func (*ResolveRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *ResolveRequest) GetMatchers() []*Matcher {
if m != nil {
return m.Matchers
}
return nil
}
func (m *ResolveRequest) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
type ResolveResponse struct {
Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"`
}
func (m *ResolveResponse) Reset() { *m = ResolveResponse{} }
func (m *ResolveResponse) String() string { return proto.CompactTextString(m) }
func (*ResolveResponse) ProtoMessage() {}
func (*ResolveResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *ResolveResponse) GetIds() []string {
if m != nil {
return m.Ids
}
return nil
}
type Matcher struct {
Type MatcherType `protobuf:"varint,1,opt,name=type,enum=indexer.MatcherType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *Matcher) Reset() { *m = Matcher{} }
func (m *Matcher) String() string { return proto.CompactTextString(m) }
func (*Matcher) ProtoMessage() {}
func (*Matcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Matcher) GetType() MatcherType {
if m != nil {
return m.Type
}
return MatcherType_Equal
}
func (m *Matcher) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Matcher) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type ValuesRequest struct {
Field string `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"`
Partition int32 `protobuf:"varint,2,opt,name=partition" json:"partition,omitempty"`
}
func (m *ValuesRequest) Reset() { *m = ValuesRequest{} }
func (m *ValuesRequest) String() string { return proto.CompactTextString(m) }
func (*ValuesRequest) ProtoMessage() {}
func (*ValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *ValuesRequest) GetField() string {
if m != nil {
return m.Field
}
return ""
}
func (m *ValuesRequest) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
type ValuesResponse struct {
Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
}
func (m *ValuesResponse) Reset() { *m = ValuesResponse{} }
func (m *ValuesResponse) String() string { return proto.CompactTextString(m) }
func (*ValuesResponse) ProtoMessage() {}
func (*ValuesResponse) | () ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ValuesResponse) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
func init() {
proto.RegisterType((*ResolveRequest)(nil), "indexer.ResolveRequest")
proto.RegisterType((*ResolveResponse)(nil), "indexer.ResolveResponse")
proto.RegisterType((*Matcher)(nil), "indexer.Matcher")
proto.RegisterType((*ValuesRequest)(nil), "indexer.ValuesRequest")
proto.RegisterType((*ValuesResponse)(nil), "indexer.ValuesResponse")
proto.RegisterEnum("indexer.MatcherType", MatcherType_name, MatcherType_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Resolver service
type ResolverClient interface {
Resolve(ctx context.Context, in *ResolveRequest, opts ...grpc.CallOption) (*ResolveResponse, error)
Values(ctx context.Context, in *ValuesRequest, opts ...grpc.CallOption) (*ValuesResponse, error)
}
type resolverClient struct {
cc *grpc.ClientConn
}
func NewResolverClient(cc *grpc.ClientConn) ResolverClient {
return &resolverClient{cc}
}
func (c *resolverClient) Resolve(ctx context.Context, in *ResolveRequest, opts ...grpc.CallOption) (*ResolveResponse, error) {
out := new(ResolveResponse)
err := grpc.Invoke(ctx, "/indexer.Resolver/Resolve", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resolverClient) Values(ctx context.Context, in *ValuesRequest, opts ...grpc.CallOption) (*ValuesResponse, error) {
out := new(ValuesResponse)
err := grpc.Invoke(ctx, "/indexer.Resolver/Values", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Resolver service
type ResolverServer interface {
Resolve(context.Context, *ResolveRequest) (*ResolveResponse, error)
Values(context.Context, *ValuesRequest) (*ValuesResponse, error)
}
func RegisterResolverServer(s *grpc.Server, srv ResolverServer) {
s.RegisterService(&_Resolver_serviceDesc, srv)
}
func _Resolver_Resolve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ResolveRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResolverServer).Resolve(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/indexer.Resolver/Resolve",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResolverServer).Resolve(ctx, req.(*ResolveRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Resolver_Values_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ValuesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResolverServer).Values(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/indexer.Resolver/Values",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResolverServer).Values(ctx, req.(*ValuesRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Resolver_serviceDesc = grpc.ServiceDesc{
ServiceName: "indexer.Resolver",
HandlerType: (*ResolverServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Resolve",
Handler: _Resolver_Resolve_Handler,
},
{
MethodName: "Values",
Handler: _Resolver_Values_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "indexer.proto",
}
func init() { proto.RegisterFile("indexer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 326 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x52, 0x41, 0x4f, 0xf2, 0x40,
0x14, 0x64, 0x29, 0x05, 0xfa, 0x80, 0x7e, 0xcd, 0x0b, 0x81, 0x86, 0x7c, 0x07, 0x52, 0x2f, 0x8d,
0x31, 0x1c, 0xf0, 0x68, 0xe2, 0xc5, 0x98, 0x78, 0x91, 0xc3, 0xc6, 0x78, 0xd2, 0x43, 0x95, 0xa7,
0x36, 0x29, 0xdd, 0xd2, 0x5d, 0x08, 0xfc, 0x02, 0xff, 0xb6, 0x61, 0x77, 0x5b, 0x45, 0x0e, 0xde,
0x66, 0x66, 0x67, 0xe7, 0xed, 0xbc, 0x16, 0x06, 0x69, 0xbe, 0xa4, 0x1d, 0x95, 0xb3, 0xa2, 0x14,
0x4a, 0x60, 0xc7, 0xd2, 0xe8, 0x09, 0x7c, 0x4e, 0x52, 0x64, 0x5b, 0xe2, 0xb4, 0xde, 0x90, 0x54,
0x78, 0x01, 0xdd, 0x55, 0xa2, 0x5e, 0x3f, 0xa8, 0x94, 0x21, 0x9b, 0x3a, 0x71, 0x6f, 0x1e, 0xcc,
0xaa, 0xcb, 0xf7, 0xe6, 0x80, 0xd7, 0x0e, 0xfc, 0x0f, 0x5e, 0x91, 0x94, 0x2a, 0x55, 0xa9, 0xc8,
0xc3, 0xe6, 0x94, 0xc5, 0x2e, 0xff, 0x16, 0xa2, 0x33, 0xf8, 0x57, 0xa7, 0xcb, 0x42, 0xe4, 0x92,
0x30, 0x00, 0x27, 0x5d, 0x9a, 0x64, 0x8f, 0x1f, 0x60, 0xf4, 0x0c, 0x1d, 0x9b, 0x8b, 0x31, 0xb4,
0xd4, 0xbe, 0xa0, 0x90, 0x4d, 0x59, 0xec, 0xcf, 0x87, 0xbf, 0xe7, 0x3e, 0xec, 0x0b, 0xe2, 0xda,
0x81, 0x08, 0xad, 0x3c, 0x59, 0x91, 0x1e, 0xe9, 0x71, 0x8d, 0x71, 0x08, 0xee, 0x36, 0xc9, 0x36,
0x14, 0x3a, 0x5a, 0x34, 0x24, 0xba, 0x81, 0xc1, 0xe3, 0x01, 0xc8, 0xaa, 0xe0, 0x10, 0xdc, 0xb7,
0x94, 0xb2, 0xa5, 0x9e, 0xe2, 0x71, 0x43, 0xfe, 0x28, 0x12, 0x83, 0x5f, 0x85, 0xd8, 0x1e, 0x23,
0x68, 0xeb, 0xfc, 0xaa, 0x8a, 0x65, 0xe7, 0x77, 0xd0, 0xfb, 0xf1, 0x5a, 0xf4, 0xc0, 0xbd, 0x5d,
0x6f, 0x92, 0x2c, 0x68, 0x60, 0x1f, 0xba, 0x0b, 0xa1, 0x0c, 0x63, 0xe8, 0x03, 0x70, 0x7a, 0xa7,
0x9d, 0x36, 0x07, 0x4d, 0x0c, 0xa0, 0xaf, 0xf9, 0x42, 0x18, 0xc5, 0x99, 0x7f, 0x32, 0xe8, 0xda,
0xed, 0x95, 0x78, 0x0d, 0x1d, 0x8b, 0x71, 0x5c, 0xaf, 0xe5, 0xf8, 0xcb, 0x4d, 0xc2, 0xd3, 0x03,
0xf3, 0xd8, 0xa8, 0x81, 0x57, 0xd0, 0x36, 0x05, 0x70, 0x54, 0xbb, 0x8e, 0xd6, 0x32, 0x19, 0x9f,
0xe8, 0xd5, 0xe5, 0x97, 0xb6, 0xfe, 0x69, 0x2e, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x79, 0xff,
0x98, 0x48, 0x45, 0x02, 0x00, 0x00,
}
| Descriptor | identifier_name |
tags.rs | use std::fs::{File, OpenOptions};
use std::io::{Read, Write};
use std::process::Command;
use std::collections::HashSet;
use std::path::{PathBuf, Path};
use app_result::{AppResult, app_err_msg, app_err_missing_src};
use types::{Tags, TagsKind, SourceKind};
use path_ext::PathExt;
use config::Config;
use dirs::{
rusty_tags_cache_dir,
cargo_git_src_dir,
cargo_crates_io_src_dir,
glob_path
};
/// Checks if there's already a tags file for `source`
/// and if not it's creating a new tags file and returning it.
pub fn update_tags(config: &Config, source: &SourceKind) -> AppResult<Tags> {
let src_tags = try!(cached_tags_file(&config.tags_kind, source));
let src_dir = try!(find_src_dir(source));
if src_tags.as_path().is_file() && ! config.force_recreate {
return Ok(Tags::new(&src_dir, &src_tags, true));
}
try!(create_tags(config, &[&src_dir], &src_tags));
Ok(Tags::new(&src_dir, &src_tags, false))
}
/// Does the same thing as `update_tags`, but also checks if the `lib.rs`
/// file of the library has public reexports of external crates. If
/// that's the case, then the tags of the public reexported external
/// crates are merged into the tags of the library.
pub fn | (config: &Config,
source: &SourceKind,
dependencies: &Vec<SourceKind>)
-> AppResult<Tags> {
let lib_tags = try!(update_tags(config, source));
if lib_tags.is_up_to_date(&config.tags_kind) && ! config.force_recreate {
return Ok(lib_tags);
}
let reexp_crates = try!(find_reexported_crates(&lib_tags.src_dir));
if reexp_crates.is_empty() {
return Ok(lib_tags);
}
if config.verbose {
println!("Found public reexports in '{}' of:", source.get_lib_name());
for rcrate in reexp_crates.iter() {
println!(" {}", rcrate);
}
println!("");
}
let mut crate_tags = Vec::<PathBuf>::new();
for rcrate in reexp_crates.iter() {
if let Some(crate_dep) = dependencies.iter().find(|d| d.get_lib_name() == *rcrate) {
crate_tags.push(try!(update_tags(config, crate_dep)).tags_file.clone());
}
}
if crate_tags.is_empty() {
return Ok(lib_tags);
}
crate_tags.push(lib_tags.tags_file.clone());
try!(merge_tags(config, &crate_tags, &lib_tags.tags_file));
Ok(lib_tags)
}
/// merges `tag_files` into `into_tag_file`
pub fn merge_tags(config: &Config, tag_files: &Vec<PathBuf>, into_tag_file: &Path) -> AppResult<()> {
if config.verbose {
println!("Merging ...\n tags:");
for file in tag_files.iter() {
println!(" {}", file.display());
}
println!("\n into:\n {}\n", into_tag_file.display());
}
match config.tags_kind {
TagsKind::Vi => {
let mut file_contents: Vec<String> = Vec::new();
for file in tag_files.iter() {
let mut file = try!(File::open(file));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
file_contents.push(contents);
}
let mut merged_lines: Vec<&str> = Vec::with_capacity(100_000);
for content in file_contents.iter() {
for line in content.lines() {
if let Some(chr) = line.chars().nth(0) {
if chr != '!' {
merged_lines.push(line);
}
}
}
}
merged_lines.sort();
merged_lines.dedup();
let mut tag_file = try!(
OpenOptions::new()
.create(true)
.truncate(true)
.read(true)
.write(true)
.open(into_tag_file)
);
try!(tag_file.write_fmt(format_args!("{}\n", "!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;\" to lines/")));
try!(tag_file.write_fmt(format_args!("{}\n", "!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/")));
for line in merged_lines.iter() {
try!(tag_file.write_fmt(format_args!("{}\n", *line)));
}
},
TagsKind::Emacs => {
let mut tag_file = try!(
OpenOptions::new()
.create(true)
.append(true)
.read(true)
.write(true)
.open(into_tag_file)
);
for file in tag_files.iter() {
if file.as_path() != into_tag_file {
try!(tag_file.write_fmt(format_args!("{},include\n", file.display())));
}
}
}
}
Ok(())
}
/// creates tags recursive for the directory hierarchy starting at `src_dirs`
/// and writes them to `tags_file`
pub fn create_tags<P: AsRef<Path>>(config: &Config, src_dirs: &[P], tags_file: P) -> AppResult<()> {
let mut cmd = Command::new("ctags");
config.tags_kind.ctags_option().map(|opt| { cmd.arg(opt); () });
cmd.arg("--recurse")
.arg("--languages=Rust")
.arg("--langdef=Rust")
.arg("--langmap=Rust:.rs")
.arg("--regex-Rust=/^[ \\t]*(#\\[[^\\]]\\][ \\t]*)*(pub[ \\t]+)?(extern[ \\t]+)?(\"[^\"]+\"[ \\t]+)?(unsafe[ \\t]+)?fn[ \\t]+([a-zA-Z0-9_]+)/\\6/f,functions,function definitions/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?type[ \\t]+([a-zA-Z0-9_]+)/\\2/T,types,type definitions/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?enum[ \\t]+([a-zA-Z0-9_]+)/\\2/g,enum,enumeration names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?struct[ \\t]+([a-zA-Z0-9_]+)/\\2/s,structure names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?mod[ \\t]+([a-zA-Z0-9_]+)\\s*\\{/\\2/m,modules,module names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?(static|const)[ \\t]+([a-zA-Z0-9_]+)/\\3/c,consts,static constants/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?trait[ \\t]+([a-zA-Z0-9_]+)/\\2/t,traits,traits/")
.arg("--regex-Rust=/^[ \\t]*macro_rules![ \\t]+([a-zA-Z0-9_]+)/\\1/d,macros,macro definitions/")
.arg("-o")
.arg(tags_file.as_ref());
for dir in src_dirs {
cmd.arg(dir.as_ref());
}
if config.verbose {
println!("Creating tags ...\n for source:");
for dir in src_dirs {
println!(" {}", dir.as_ref().display());
}
println!("\n cached at:\n {}\n", tags_file.as_ref().display());
}
try!(cmd.output());
Ok(())
}
/// find the source directory of `source`, for git sources the directories
/// in `~/.cargo/git/checkouts` are considered and for crates.io sources
/// the directories in `~/.cargo/registry/src/github.com-*` are considered
fn find_src_dir(source: &SourceKind) -> AppResult<PathBuf> {
match *source {
SourceKind::Git { ref lib_name, ref commit_hash } => {
let mut lib_src = lib_name.clone();
lib_src.push_str("-*");
let mut src_dir = try!(cargo_git_src_dir().map(Path::to_path_buf));
src_dir.push(&lib_src);
src_dir.push("master");
let src_paths = try!(glob_path(&format!("{}", src_dir.display())));
for src_path in src_paths {
if let Ok(path) = src_path {
let src_commit_hash = try!(get_commit_hash(&path));
if *commit_hash == src_commit_hash {
return Ok(path);
}
}
}
// the git repository name hasn't to match the name of the library,
// so here we're just going through all git directories and searching
// for the one with a matching commit hash
let mut src_dir = try!(cargo_git_src_dir().map(Path::to_path_buf));
src_dir.push("*");
src_dir.push("master");
let src_paths = try!(glob_path(&format!("{}", src_dir.display())));
for src_path in src_paths {
if let Ok(path) = src_path {
let src_commit_hash = try!(get_commit_hash(&path));
if *commit_hash == src_commit_hash {
return Ok(path);
}
}
}
Err(app_err_missing_src(source))
},
SourceKind::CratesIo { ref lib_name, ref version } => {
let mut lib_src = lib_name.clone();
lib_src.push('-');
lib_src.push_str(&**version);
let mut src_dir = try!(cargo_crates_io_src_dir().map(Path::to_path_buf));
src_dir.push(&lib_src);
if ! src_dir.is_dir() {
return Err(app_err_missing_src(source));
}
Ok(src_dir)
},
SourceKind::Path { ref path, .. } => {
if ! path.is_dir() {
return Err(app_err_missing_src(source));
}
Ok(path.clone())
}
}
}
/// returns the position and name of the cached tags file of `source`
fn cached_tags_file(tags_kind: &TagsKind, source: &SourceKind) -> AppResult<PathBuf> {
match *source {
SourceKind::Git { .. } | SourceKind::CratesIo { .. } => {
let mut tags_file = try!(rusty_tags_cache_dir().map(Path::to_path_buf));
tags_file.push(&source.tags_file_name(tags_kind));
Ok(tags_file)
},
SourceKind::Path { ref path, .. } => {
let mut tags_file = path.clone();
tags_file.push(&source.tags_file_name(tags_kind));
Ok(tags_file)
}
}
}
type CrateName = String;
/// searches in the file `<src_dir>/src/lib.rs` for external crates
/// that are reexpored and returns their names
fn find_reexported_crates(src_dir: &Path) -> AppResult<Vec<CrateName>> {
let mut lib_file = src_dir.to_path_buf();
lib_file.push("src");
lib_file.push("lib.rs");
if ! lib_file.is_file() {
return Ok(Vec::new());
}
let contents = {
let mut file = try!(File::open(&lib_file));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
contents
};
let lines = contents.lines();
type ModuleName = String;
let mut pub_uses = HashSet::<ModuleName>::new();
#[derive(Eq, PartialEq, Hash)]
struct ExternCrate<'a>
{
name: &'a str,
as_name: &'a str
}
let mut extern_crates = HashSet::<ExternCrate>::new();
for line in lines {
let items = line.trim_matches(';').split(' ').collect::<Vec<&str>>();
if items.len() < 3 {
continue;
}
if items[0] == "pub" && items[1] == "use" {
let mods = items[2].split("::").collect::<Vec<&str>>();
if mods.len() >= 1 {
pub_uses.insert(mods[0].to_string());
}
}
if items[0] == "extern" && items[1] == "crate" {
if items.len() == 3 {
extern_crates.insert(ExternCrate { name: items[2].trim_matches('"'), as_name: items[2] });
} else if items.len() == 5 && items[3] == "as" {
extern_crates.insert(ExternCrate { name: items[2].trim_matches('"'), as_name: items[4] });
}
}
}
let mut reexp_crates = Vec::<CrateName>::new();
for extern_crate in extern_crates.iter() {
if pub_uses.contains(extern_crate.as_name) {
reexp_crates.push(extern_crate.name.to_string());
}
}
Ok(reexp_crates)
}
/// get the commit hash of the current `HEAD` of the git repository located at `git_dir`
fn get_commit_hash(git_dir: &Path) -> AppResult<String> {
let mut cmd = Command::new("git");
cmd.current_dir(git_dir)
.arg("rev-parse")
.arg("HEAD");
let out = try!(cmd.output());
String::from_utf8(out.stdout)
.map(|s| s.trim().to_string())
.map_err(|_| app_err_msg("Couldn't convert 'git rev-parse HEAD' output to utf8!".to_string()))
}
| update_tags_and_check_for_reexports | identifier_name |
tags.rs | use std::fs::{File, OpenOptions};
use std::io::{Read, Write};
use std::process::Command;
use std::collections::HashSet;
use std::path::{PathBuf, Path};
use app_result::{AppResult, app_err_msg, app_err_missing_src};
use types::{Tags, TagsKind, SourceKind};
use path_ext::PathExt;
use config::Config;
use dirs::{
rusty_tags_cache_dir,
cargo_git_src_dir,
cargo_crates_io_src_dir,
glob_path
};
/// Checks if there's already a tags file for `source`
/// and if not it's creating a new tags file and returning it.
pub fn update_tags(config: &Config, source: &SourceKind) -> AppResult<Tags> {
let src_tags = try!(cached_tags_file(&config.tags_kind, source));
let src_dir = try!(find_src_dir(source));
if src_tags.as_path().is_file() && ! config.force_recreate {
return Ok(Tags::new(&src_dir, &src_tags, true));
}
try!(create_tags(config, &[&src_dir], &src_tags));
Ok(Tags::new(&src_dir, &src_tags, false))
}
/// Does the same thing as `update_tags`, but also checks if the `lib.rs`
/// file of the library has public reexports of external crates. If
/// that's the case, then the tags of the public reexported external
/// crates are merged into the tags of the library.
pub fn update_tags_and_check_for_reexports(config: &Config,
source: &SourceKind,
dependencies: &Vec<SourceKind>)
-> AppResult<Tags> {
let lib_tags = try!(update_tags(config, source));
if lib_tags.is_up_to_date(&config.tags_kind) && ! config.force_recreate {
return Ok(lib_tags);
}
let reexp_crates = try!(find_reexported_crates(&lib_tags.src_dir));
if reexp_crates.is_empty() {
return Ok(lib_tags);
}
if config.verbose {
println!("Found public reexports in '{}' of:", source.get_lib_name());
for rcrate in reexp_crates.iter() {
println!(" {}", rcrate);
}
println!("");
}
let mut crate_tags = Vec::<PathBuf>::new();
for rcrate in reexp_crates.iter() {
if let Some(crate_dep) = dependencies.iter().find(|d| d.get_lib_name() == *rcrate) {
crate_tags.push(try!(update_tags(config, crate_dep)).tags_file.clone());
}
}
if crate_tags.is_empty() {
return Ok(lib_tags);
}
crate_tags.push(lib_tags.tags_file.clone());
try!(merge_tags(config, &crate_tags, &lib_tags.tags_file));
Ok(lib_tags)
}
/// merges `tag_files` into `into_tag_file`
pub fn merge_tags(config: &Config, tag_files: &Vec<PathBuf>, into_tag_file: &Path) -> AppResult<()> {
if config.verbose {
println!("Merging ...\n tags:");
for file in tag_files.iter() {
println!(" {}", file.display());
}
println!("\n into:\n {}\n", into_tag_file.display());
}
match config.tags_kind {
TagsKind::Vi => {
let mut file_contents: Vec<String> = Vec::new();
for file in tag_files.iter() {
let mut file = try!(File::open(file));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
file_contents.push(contents);
}
let mut merged_lines: Vec<&str> = Vec::with_capacity(100_000);
for content in file_contents.iter() {
for line in content.lines() {
if let Some(chr) = line.chars().nth(0) {
if chr != '!' {
merged_lines.push(line);
}
}
}
}
merged_lines.sort();
merged_lines.dedup();
let mut tag_file = try!(
OpenOptions::new()
.create(true)
.truncate(true)
.read(true)
.write(true)
.open(into_tag_file)
);
try!(tag_file.write_fmt(format_args!("{}\n", "!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;\" to lines/")));
try!(tag_file.write_fmt(format_args!("{}\n", "!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/")));
for line in merged_lines.iter() {
try!(tag_file.write_fmt(format_args!("{}\n", *line)));
}
},
TagsKind::Emacs => {
let mut tag_file = try!(
OpenOptions::new()
.create(true)
.append(true)
.read(true)
.write(true)
.open(into_tag_file)
);
for file in tag_files.iter() {
if file.as_path() != into_tag_file {
try!(tag_file.write_fmt(format_args!("{},include\n", file.display())));
}
}
}
}
Ok(())
}
/// creates tags recursive for the directory hierarchy starting at `src_dirs`
/// and writes them to `tags_file`
pub fn create_tags<P: AsRef<Path>>(config: &Config, src_dirs: &[P], tags_file: P) -> AppResult<()> {
let mut cmd = Command::new("ctags");
config.tags_kind.ctags_option().map(|opt| { cmd.arg(opt); () });
cmd.arg("--recurse")
.arg("--languages=Rust")
.arg("--langdef=Rust")
.arg("--langmap=Rust:.rs")
.arg("--regex-Rust=/^[ \\t]*(#\\[[^\\]]\\][ \\t]*)*(pub[ \\t]+)?(extern[ \\t]+)?(\"[^\"]+\"[ \\t]+)?(unsafe[ \\t]+)?fn[ \\t]+([a-zA-Z0-9_]+)/\\6/f,functions,function definitions/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?type[ \\t]+([a-zA-Z0-9_]+)/\\2/T,types,type definitions/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?enum[ \\t]+([a-zA-Z0-9_]+)/\\2/g,enum,enumeration names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?struct[ \\t]+([a-zA-Z0-9_]+)/\\2/s,structure names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?mod[ \\t]+([a-zA-Z0-9_]+)\\s*\\{/\\2/m,modules,module names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?(static|const)[ \\t]+([a-zA-Z0-9_]+)/\\3/c,consts,static constants/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?trait[ \\t]+([a-zA-Z0-9_]+)/\\2/t,traits,traits/")
.arg("--regex-Rust=/^[ \\t]*macro_rules![ \\t]+([a-zA-Z0-9_]+)/\\1/d,macros,macro definitions/")
.arg("-o")
.arg(tags_file.as_ref());
for dir in src_dirs {
cmd.arg(dir.as_ref());
}
if config.verbose {
println!("Creating tags ...\n for source:");
for dir in src_dirs {
println!(" {}", dir.as_ref().display());
}
println!("\n cached at:\n {}\n", tags_file.as_ref().display());
}
try!(cmd.output());
Ok(())
}
/// find the source directory of `source`, for git sources the directories
/// in `~/.cargo/git/checkouts` are considered and for crates.io sources
/// the directories in `~/.cargo/registry/src/github.com-*` are considered
fn find_src_dir(source: &SourceKind) -> AppResult<PathBuf> {
match *source {
SourceKind::Git { ref lib_name, ref commit_hash } => {
let mut lib_src = lib_name.clone();
lib_src.push_str("-*");
let mut src_dir = try!(cargo_git_src_dir().map(Path::to_path_buf));
src_dir.push(&lib_src);
src_dir.push("master");
let src_paths = try!(glob_path(&format!("{}", src_dir.display())));
for src_path in src_paths {
if let Ok(path) = src_path {
let src_commit_hash = try!(get_commit_hash(&path));
if *commit_hash == src_commit_hash {
return Ok(path);
}
}
}
// the git repository name hasn't to match the name of the library,
// so here we're just going through all git directories and searching
// for the one with a matching commit hash
let mut src_dir = try!(cargo_git_src_dir().map(Path::to_path_buf));
src_dir.push("*");
src_dir.push("master");
let src_paths = try!(glob_path(&format!("{}", src_dir.display())));
for src_path in src_paths {
if let Ok(path) = src_path {
let src_commit_hash = try!(get_commit_hash(&path));
if *commit_hash == src_commit_hash {
return Ok(path);
}
}
}
Err(app_err_missing_src(source))
},
SourceKind::CratesIo { ref lib_name, ref version } => {
let mut lib_src = lib_name.clone();
lib_src.push('-');
lib_src.push_str(&**version);
let mut src_dir = try!(cargo_crates_io_src_dir().map(Path::to_path_buf));
src_dir.push(&lib_src);
if ! src_dir.is_dir() {
return Err(app_err_missing_src(source));
}
Ok(src_dir)
},
SourceKind::Path { ref path, .. } => {
if ! path.is_dir() {
return Err(app_err_missing_src(source));
}
Ok(path.clone())
}
}
}
/// returns the position and name of the cached tags file of `source`
fn cached_tags_file(tags_kind: &TagsKind, source: &SourceKind) -> AppResult<PathBuf> {
match *source {
SourceKind::Git { .. } | SourceKind::CratesIo { .. } => {
let mut tags_file = try!(rusty_tags_cache_dir().map(Path::to_path_buf));
tags_file.push(&source.tags_file_name(tags_kind));
Ok(tags_file)
},
SourceKind::Path { ref path, .. } => {
let mut tags_file = path.clone();
tags_file.push(&source.tags_file_name(tags_kind));
Ok(tags_file)
}
}
}
type CrateName = String;
/// searches in the file `<src_dir>/src/lib.rs` for external crates
/// that are reexpored and returns their names
fn find_reexported_crates(src_dir: &Path) -> AppResult<Vec<CrateName>> |
/// get the commit hash of the current `HEAD` of the git repository located at `git_dir`
fn get_commit_hash(git_dir: &Path) -> AppResult<String> {
let mut cmd = Command::new("git");
cmd.current_dir(git_dir)
.arg("rev-parse")
.arg("HEAD");
let out = try!(cmd.output());
String::from_utf8(out.stdout)
.map(|s| s.trim().to_string())
.map_err(|_| app_err_msg("Couldn't convert 'git rev-parse HEAD' output to utf8!".to_string()))
}
| {
let mut lib_file = src_dir.to_path_buf();
lib_file.push("src");
lib_file.push("lib.rs");
if ! lib_file.is_file() {
return Ok(Vec::new());
}
let contents = {
let mut file = try!(File::open(&lib_file));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
contents
};
let lines = contents.lines();
type ModuleName = String;
let mut pub_uses = HashSet::<ModuleName>::new();
#[derive(Eq, PartialEq, Hash)]
struct ExternCrate<'a>
{
name: &'a str,
as_name: &'a str
}
let mut extern_crates = HashSet::<ExternCrate>::new();
for line in lines {
let items = line.trim_matches(';').split(' ').collect::<Vec<&str>>();
if items.len() < 3 {
continue;
}
if items[0] == "pub" && items[1] == "use" {
let mods = items[2].split("::").collect::<Vec<&str>>();
if mods.len() >= 1 {
pub_uses.insert(mods[0].to_string());
}
}
if items[0] == "extern" && items[1] == "crate" {
if items.len() == 3 {
extern_crates.insert(ExternCrate { name: items[2].trim_matches('"'), as_name: items[2] });
} else if items.len() == 5 && items[3] == "as" {
extern_crates.insert(ExternCrate { name: items[2].trim_matches('"'), as_name: items[4] });
}
}
}
let mut reexp_crates = Vec::<CrateName>::new();
for extern_crate in extern_crates.iter() {
if pub_uses.contains(extern_crate.as_name) {
reexp_crates.push(extern_crate.name.to_string());
}
}
Ok(reexp_crates)
} | identifier_body |
tags.rs | use std::fs::{File, OpenOptions};
use std::io::{Read, Write};
use std::process::Command;
use std::collections::HashSet;
use std::path::{PathBuf, Path};
use app_result::{AppResult, app_err_msg, app_err_missing_src};
use types::{Tags, TagsKind, SourceKind};
use path_ext::PathExt;
use config::Config;
use dirs::{
rusty_tags_cache_dir,
cargo_git_src_dir,
cargo_crates_io_src_dir,
glob_path
};
/// Checks if there's already a tags file for `source`
/// and if not it's creating a new tags file and returning it.
pub fn update_tags(config: &Config, source: &SourceKind) -> AppResult<Tags> {
let src_tags = try!(cached_tags_file(&config.tags_kind, source));
let src_dir = try!(find_src_dir(source));
if src_tags.as_path().is_file() && ! config.force_recreate {
return Ok(Tags::new(&src_dir, &src_tags, true));
}
try!(create_tags(config, &[&src_dir], &src_tags));
Ok(Tags::new(&src_dir, &src_tags, false))
}
/// Does the same thing as `update_tags`, but also checks if the `lib.rs`
/// file of the library has public reexports of external crates. If
/// that's the case, then the tags of the public reexported external
/// crates are merged into the tags of the library.
pub fn update_tags_and_check_for_reexports(config: &Config,
source: &SourceKind,
dependencies: &Vec<SourceKind>)
-> AppResult<Tags> {
let lib_tags = try!(update_tags(config, source));
if lib_tags.is_up_to_date(&config.tags_kind) && ! config.force_recreate {
return Ok(lib_tags);
}
let reexp_crates = try!(find_reexported_crates(&lib_tags.src_dir));
if reexp_crates.is_empty() {
return Ok(lib_tags);
}
if config.verbose {
println!("Found public reexports in '{}' of:", source.get_lib_name());
for rcrate in reexp_crates.iter() {
println!(" {}", rcrate);
}
println!("");
}
let mut crate_tags = Vec::<PathBuf>::new();
for rcrate in reexp_crates.iter() {
if let Some(crate_dep) = dependencies.iter().find(|d| d.get_lib_name() == *rcrate) {
crate_tags.push(try!(update_tags(config, crate_dep)).tags_file.clone());
}
}
if crate_tags.is_empty() {
return Ok(lib_tags);
}
crate_tags.push(lib_tags.tags_file.clone());
try!(merge_tags(config, &crate_tags, &lib_tags.tags_file));
Ok(lib_tags)
}
/// merges `tag_files` into `into_tag_file`
pub fn merge_tags(config: &Config, tag_files: &Vec<PathBuf>, into_tag_file: &Path) -> AppResult<()> {
if config.verbose {
println!("Merging ...\n tags:");
for file in tag_files.iter() {
println!(" {}", file.display());
}
println!("\n into:\n {}\n", into_tag_file.display());
}
match config.tags_kind {
TagsKind::Vi => | ,
TagsKind::Emacs => {
let mut tag_file = try!(
OpenOptions::new()
.create(true)
.append(true)
.read(true)
.write(true)
.open(into_tag_file)
);
for file in tag_files.iter() {
if file.as_path() != into_tag_file {
try!(tag_file.write_fmt(format_args!("{},include\n", file.display())));
}
}
}
}
Ok(())
}
/// creates tags recursive for the directory hierarchy starting at `src_dirs`
/// and writes them to `tags_file`
pub fn create_tags<P: AsRef<Path>>(config: &Config, src_dirs: &[P], tags_file: P) -> AppResult<()> {
let mut cmd = Command::new("ctags");
config.tags_kind.ctags_option().map(|opt| { cmd.arg(opt); () });
cmd.arg("--recurse")
.arg("--languages=Rust")
.arg("--langdef=Rust")
.arg("--langmap=Rust:.rs")
.arg("--regex-Rust=/^[ \\t]*(#\\[[^\\]]\\][ \\t]*)*(pub[ \\t]+)?(extern[ \\t]+)?(\"[^\"]+\"[ \\t]+)?(unsafe[ \\t]+)?fn[ \\t]+([a-zA-Z0-9_]+)/\\6/f,functions,function definitions/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?type[ \\t]+([a-zA-Z0-9_]+)/\\2/T,types,type definitions/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?enum[ \\t]+([a-zA-Z0-9_]+)/\\2/g,enum,enumeration names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?struct[ \\t]+([a-zA-Z0-9_]+)/\\2/s,structure names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?mod[ \\t]+([a-zA-Z0-9_]+)\\s*\\{/\\2/m,modules,module names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?(static|const)[ \\t]+([a-zA-Z0-9_]+)/\\3/c,consts,static constants/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?trait[ \\t]+([a-zA-Z0-9_]+)/\\2/t,traits,traits/")
.arg("--regex-Rust=/^[ \\t]*macro_rules![ \\t]+([a-zA-Z0-9_]+)/\\1/d,macros,macro definitions/")
.arg("-o")
.arg(tags_file.as_ref());
for dir in src_dirs {
cmd.arg(dir.as_ref());
}
if config.verbose {
println!("Creating tags ...\n for source:");
for dir in src_dirs {
println!(" {}", dir.as_ref().display());
}
println!("\n cached at:\n {}\n", tags_file.as_ref().display());
}
try!(cmd.output());
Ok(())
}
/// find the source directory of `source`, for git sources the directories
/// in `~/.cargo/git/checkouts` are considered and for crates.io sources
/// the directories in `~/.cargo/registry/src/github.com-*` are considered
fn find_src_dir(source: &SourceKind) -> AppResult<PathBuf> {
match *source {
SourceKind::Git { ref lib_name, ref commit_hash } => {
let mut lib_src = lib_name.clone();
lib_src.push_str("-*");
let mut src_dir = try!(cargo_git_src_dir().map(Path::to_path_buf));
src_dir.push(&lib_src);
src_dir.push("master");
let src_paths = try!(glob_path(&format!("{}", src_dir.display())));
for src_path in src_paths {
if let Ok(path) = src_path {
let src_commit_hash = try!(get_commit_hash(&path));
if *commit_hash == src_commit_hash {
return Ok(path);
}
}
}
// the git repository name hasn't to match the name of the library,
// so here we're just going through all git directories and searching
// for the one with a matching commit hash
let mut src_dir = try!(cargo_git_src_dir().map(Path::to_path_buf));
src_dir.push("*");
src_dir.push("master");
let src_paths = try!(glob_path(&format!("{}", src_dir.display())));
for src_path in src_paths {
if let Ok(path) = src_path {
let src_commit_hash = try!(get_commit_hash(&path));
if *commit_hash == src_commit_hash {
return Ok(path);
}
}
}
Err(app_err_missing_src(source))
},
SourceKind::CratesIo { ref lib_name, ref version } => {
let mut lib_src = lib_name.clone();
lib_src.push('-');
lib_src.push_str(&**version);
let mut src_dir = try!(cargo_crates_io_src_dir().map(Path::to_path_buf));
src_dir.push(&lib_src);
if ! src_dir.is_dir() {
return Err(app_err_missing_src(source));
}
Ok(src_dir)
},
SourceKind::Path { ref path, .. } => {
if ! path.is_dir() {
return Err(app_err_missing_src(source));
}
Ok(path.clone())
}
}
}
/// returns the position and name of the cached tags file of `source`
fn cached_tags_file(tags_kind: &TagsKind, source: &SourceKind) -> AppResult<PathBuf> {
match *source {
SourceKind::Git { .. } | SourceKind::CratesIo { .. } => {
let mut tags_file = try!(rusty_tags_cache_dir().map(Path::to_path_buf));
tags_file.push(&source.tags_file_name(tags_kind));
Ok(tags_file)
},
SourceKind::Path { ref path, .. } => {
let mut tags_file = path.clone();
tags_file.push(&source.tags_file_name(tags_kind));
Ok(tags_file)
}
}
}
type CrateName = String;
/// searches in the file `<src_dir>/src/lib.rs` for external crates
/// that are reexpored and returns their names
fn find_reexported_crates(src_dir: &Path) -> AppResult<Vec<CrateName>> {
let mut lib_file = src_dir.to_path_buf();
lib_file.push("src");
lib_file.push("lib.rs");
if ! lib_file.is_file() {
return Ok(Vec::new());
}
let contents = {
let mut file = try!(File::open(&lib_file));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
contents
};
let lines = contents.lines();
type ModuleName = String;
let mut pub_uses = HashSet::<ModuleName>::new();
#[derive(Eq, PartialEq, Hash)]
struct ExternCrate<'a>
{
name: &'a str,
as_name: &'a str
}
let mut extern_crates = HashSet::<ExternCrate>::new();
for line in lines {
let items = line.trim_matches(';').split(' ').collect::<Vec<&str>>();
if items.len() < 3 {
continue;
}
if items[0] == "pub" && items[1] == "use" {
let mods = items[2].split("::").collect::<Vec<&str>>();
if mods.len() >= 1 {
pub_uses.insert(mods[0].to_string());
}
}
if items[0] == "extern" && items[1] == "crate" {
if items.len() == 3 {
extern_crates.insert(ExternCrate { name: items[2].trim_matches('"'), as_name: items[2] });
} else if items.len() == 5 && items[3] == "as" {
extern_crates.insert(ExternCrate { name: items[2].trim_matches('"'), as_name: items[4] });
}
}
}
let mut reexp_crates = Vec::<CrateName>::new();
for extern_crate in extern_crates.iter() {
if pub_uses.contains(extern_crate.as_name) {
reexp_crates.push(extern_crate.name.to_string());
}
}
Ok(reexp_crates)
}
/// get the commit hash of the current `HEAD` of the git repository located at `git_dir`
fn get_commit_hash(git_dir: &Path) -> AppResult<String> {
let mut cmd = Command::new("git");
cmd.current_dir(git_dir)
.arg("rev-parse")
.arg("HEAD");
let out = try!(cmd.output());
String::from_utf8(out.stdout)
.map(|s| s.trim().to_string())
.map_err(|_| app_err_msg("Couldn't convert 'git rev-parse HEAD' output to utf8!".to_string()))
}
| {
let mut file_contents: Vec<String> = Vec::new();
for file in tag_files.iter() {
let mut file = try!(File::open(file));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
file_contents.push(contents);
}
let mut merged_lines: Vec<&str> = Vec::with_capacity(100_000);
for content in file_contents.iter() {
for line in content.lines() {
if let Some(chr) = line.chars().nth(0) {
if chr != '!' {
merged_lines.push(line);
}
}
}
}
merged_lines.sort();
merged_lines.dedup();
let mut tag_file = try!(
OpenOptions::new()
.create(true)
.truncate(true)
.read(true)
.write(true)
.open(into_tag_file)
);
try!(tag_file.write_fmt(format_args!("{}\n", "!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;\" to lines/")));
try!(tag_file.write_fmt(format_args!("{}\n", "!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/")));
for line in merged_lines.iter() {
try!(tag_file.write_fmt(format_args!("{}\n", *line)));
}
} | conditional_block |
tags.rs | use std::fs::{File, OpenOptions};
use std::io::{Read, Write};
use std::process::Command;
use std::collections::HashSet;
use std::path::{PathBuf, Path};
use app_result::{AppResult, app_err_msg, app_err_missing_src};
use types::{Tags, TagsKind, SourceKind};
use path_ext::PathExt;
use config::Config;
use dirs::{
rusty_tags_cache_dir,
cargo_git_src_dir,
cargo_crates_io_src_dir,
glob_path
};
/// Checks if there's already a tags file for `source`
/// and if not it's creating a new tags file and returning it.
pub fn update_tags(config: &Config, source: &SourceKind) -> AppResult<Tags> {
let src_tags = try!(cached_tags_file(&config.tags_kind, source));
let src_dir = try!(find_src_dir(source));
if src_tags.as_path().is_file() && ! config.force_recreate {
return Ok(Tags::new(&src_dir, &src_tags, true));
}
try!(create_tags(config, &[&src_dir], &src_tags));
Ok(Tags::new(&src_dir, &src_tags, false))
}
/// Does the same thing as `update_tags`, but also checks if the `lib.rs`
/// file of the library has public reexports of external crates. If
/// that's the case, then the tags of the public reexported external
/// crates are merged into the tags of the library.
pub fn update_tags_and_check_for_reexports(config: &Config,
source: &SourceKind,
dependencies: &Vec<SourceKind>)
-> AppResult<Tags> {
let lib_tags = try!(update_tags(config, source));
if lib_tags.is_up_to_date(&config.tags_kind) && ! config.force_recreate {
return Ok(lib_tags);
}
let reexp_crates = try!(find_reexported_crates(&lib_tags.src_dir));
if reexp_crates.is_empty() {
return Ok(lib_tags);
}
if config.verbose {
println!("Found public reexports in '{}' of:", source.get_lib_name());
for rcrate in reexp_crates.iter() {
println!(" {}", rcrate);
}
println!("");
}
let mut crate_tags = Vec::<PathBuf>::new();
for rcrate in reexp_crates.iter() {
if let Some(crate_dep) = dependencies.iter().find(|d| d.get_lib_name() == *rcrate) {
crate_tags.push(try!(update_tags(config, crate_dep)).tags_file.clone());
}
}
if crate_tags.is_empty() {
return Ok(lib_tags);
}
crate_tags.push(lib_tags.tags_file.clone());
try!(merge_tags(config, &crate_tags, &lib_tags.tags_file));
Ok(lib_tags)
}
/// merges `tag_files` into `into_tag_file`
pub fn merge_tags(config: &Config, tag_files: &Vec<PathBuf>, into_tag_file: &Path) -> AppResult<()> {
if config.verbose {
println!("Merging ...\n tags:");
for file in tag_files.iter() {
println!(" {}", file.display());
}
println!("\n into:\n {}\n", into_tag_file.display());
}
match config.tags_kind {
TagsKind::Vi => {
let mut file_contents: Vec<String> = Vec::new();
for file in tag_files.iter() {
let mut file = try!(File::open(file));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
file_contents.push(contents);
}
let mut merged_lines: Vec<&str> = Vec::with_capacity(100_000);
for content in file_contents.iter() {
for line in content.lines() {
if let Some(chr) = line.chars().nth(0) {
if chr != '!' {
merged_lines.push(line);
}
}
}
}
merged_lines.sort();
merged_lines.dedup();
let mut tag_file = try!(
OpenOptions::new()
.create(true)
.truncate(true)
.read(true)
.write(true)
.open(into_tag_file)
);
try!(tag_file.write_fmt(format_args!("{}\n", "!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;\" to lines/")));
try!(tag_file.write_fmt(format_args!("{}\n", "!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/")));
for line in merged_lines.iter() {
try!(tag_file.write_fmt(format_args!("{}\n", *line)));
}
},
TagsKind::Emacs => {
let mut tag_file = try!(
OpenOptions::new()
.create(true)
.append(true)
.read(true)
.write(true)
.open(into_tag_file)
);
for file in tag_files.iter() {
if file.as_path() != into_tag_file {
try!(tag_file.write_fmt(format_args!("{},include\n", file.display())));
}
}
}
}
Ok(())
}
/// creates tags recursive for the directory hierarchy starting at `src_dirs`
/// and writes them to `tags_file`
pub fn create_tags<P: AsRef<Path>>(config: &Config, src_dirs: &[P], tags_file: P) -> AppResult<()> {
let mut cmd = Command::new("ctags");
config.tags_kind.ctags_option().map(|opt| { cmd.arg(opt); () });
cmd.arg("--recurse")
.arg("--languages=Rust")
.arg("--langdef=Rust")
.arg("--langmap=Rust:.rs")
.arg("--regex-Rust=/^[ \\t]*(#\\[[^\\]]\\][ \\t]*)*(pub[ \\t]+)?(extern[ \\t]+)?(\"[^\"]+\"[ \\t]+)?(unsafe[ \\t]+)?fn[ \\t]+([a-zA-Z0-9_]+)/\\6/f,functions,function definitions/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?type[ \\t]+([a-zA-Z0-9_]+)/\\2/T,types,type definitions/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?enum[ \\t]+([a-zA-Z0-9_]+)/\\2/g,enum,enumeration names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?struct[ \\t]+([a-zA-Z0-9_]+)/\\2/s,structure names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?mod[ \\t]+([a-zA-Z0-9_]+)\\s*\\{/\\2/m,modules,module names/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?(static|const)[ \\t]+([a-zA-Z0-9_]+)/\\3/c,consts,static constants/")
.arg("--regex-Rust=/^[ \\t]*(pub[ \\t]+)?trait[ \\t]+([a-zA-Z0-9_]+)/\\2/t,traits,traits/")
.arg("--regex-Rust=/^[ \\t]*macro_rules![ \\t]+([a-zA-Z0-9_]+)/\\1/d,macros,macro definitions/")
.arg("-o")
.arg(tags_file.as_ref());
for dir in src_dirs {
cmd.arg(dir.as_ref());
}
if config.verbose {
println!("Creating tags ...\n for source:");
for dir in src_dirs {
println!(" {}", dir.as_ref().display());
}
println!("\n cached at:\n {}\n", tags_file.as_ref().display());
}
try!(cmd.output());
Ok(())
}
/// find the source directory of `source`, for git sources the directories
/// in `~/.cargo/git/checkouts` are considered and for crates.io sources
/// the directories in `~/.cargo/registry/src/github.com-*` are considered
fn find_src_dir(source: &SourceKind) -> AppResult<PathBuf> {
match *source {
SourceKind::Git { ref lib_name, ref commit_hash } => {
let mut lib_src = lib_name.clone();
lib_src.push_str("-*");
let mut src_dir = try!(cargo_git_src_dir().map(Path::to_path_buf));
src_dir.push(&lib_src);
src_dir.push("master");
let src_paths = try!(glob_path(&format!("{}", src_dir.display())));
for src_path in src_paths {
if let Ok(path) = src_path {
let src_commit_hash = try!(get_commit_hash(&path));
if *commit_hash == src_commit_hash {
return Ok(path);
}
}
}
// the git repository name hasn't to match the name of the library,
// so here we're just going through all git directories and searching
// for the one with a matching commit hash
let mut src_dir = try!(cargo_git_src_dir().map(Path::to_path_buf));
src_dir.push("*");
src_dir.push("master");
let src_paths = try!(glob_path(&format!("{}", src_dir.display())));
for src_path in src_paths {
if let Ok(path) = src_path {
let src_commit_hash = try!(get_commit_hash(&path));
if *commit_hash == src_commit_hash {
return Ok(path);
}
}
}
Err(app_err_missing_src(source))
},
SourceKind::CratesIo { ref lib_name, ref version } => {
let mut lib_src = lib_name.clone();
lib_src.push('-');
lib_src.push_str(&**version);
let mut src_dir = try!(cargo_crates_io_src_dir().map(Path::to_path_buf));
src_dir.push(&lib_src);
if ! src_dir.is_dir() {
return Err(app_err_missing_src(source));
}
Ok(src_dir)
},
SourceKind::Path { ref path, .. } => {
if ! path.is_dir() {
return Err(app_err_missing_src(source));
}
Ok(path.clone())
}
}
}
/// returns the position and name of the cached tags file of `source`
fn cached_tags_file(tags_kind: &TagsKind, source: &SourceKind) -> AppResult<PathBuf> {
match *source {
SourceKind::Git { .. } | SourceKind::CratesIo { .. } => {
let mut tags_file = try!(rusty_tags_cache_dir().map(Path::to_path_buf));
tags_file.push(&source.tags_file_name(tags_kind));
Ok(tags_file)
},
SourceKind::Path { ref path, .. } => {
let mut tags_file = path.clone();
tags_file.push(&source.tags_file_name(tags_kind));
Ok(tags_file)
}
}
}
type CrateName = String;
/// searches in the file `<src_dir>/src/lib.rs` for external crates
/// that are reexpored and returns their names
fn find_reexported_crates(src_dir: &Path) -> AppResult<Vec<CrateName>> {
let mut lib_file = src_dir.to_path_buf();
lib_file.push("src");
lib_file.push("lib.rs");
if ! lib_file.is_file() {
return Ok(Vec::new());
}
let contents = {
let mut file = try!(File::open(&lib_file));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
contents
};
let lines = contents.lines();
type ModuleName = String;
let mut pub_uses = HashSet::<ModuleName>::new();
#[derive(Eq, PartialEq, Hash)]
struct ExternCrate<'a>
{
name: &'a str,
as_name: &'a str
}
let mut extern_crates = HashSet::<ExternCrate>::new();
for line in lines {
let items = line.trim_matches(';').split(' ').collect::<Vec<&str>>();
if items.len() < 3 {
continue;
}
if items[0] == "pub" && items[1] == "use" {
let mods = items[2].split("::").collect::<Vec<&str>>();
if mods.len() >= 1 {
pub_uses.insert(mods[0].to_string());
}
}
if items[0] == "extern" && items[1] == "crate" {
if items.len() == 3 {
extern_crates.insert(ExternCrate { name: items[2].trim_matches('"'), as_name: items[2] });
} else if items.len() == 5 && items[3] == "as" {
extern_crates.insert(ExternCrate { name: items[2].trim_matches('"'), as_name: items[4] });
}
}
}
let mut reexp_crates = Vec::<CrateName>::new();
for extern_crate in extern_crates.iter() {
if pub_uses.contains(extern_crate.as_name) {
reexp_crates.push(extern_crate.name.to_string());
}
}
Ok(reexp_crates)
} | let mut cmd = Command::new("git");
cmd.current_dir(git_dir)
.arg("rev-parse")
.arg("HEAD");
let out = try!(cmd.output());
String::from_utf8(out.stdout)
.map(|s| s.trim().to_string())
.map_err(|_| app_err_msg("Couldn't convert 'git rev-parse HEAD' output to utf8!".to_string()))
} |
/// get the commit hash of the current `HEAD` of the git repository located at `git_dir`
fn get_commit_hash(git_dir: &Path) -> AppResult<String> { | random_line_split |
auction.py | from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from .metrics import compute_auc, compute_model_auc
def run_selection(seed, n_samples, auction_size, n_auctions):
seed +=1
np.random.seed(seed)
ind = np.random.randint(0, n_samples, size=auction_size*n_auctions)
return ind, seed+1
def sample_true(x):
if any(x):
return np.random.choice(np.where(x)[0])
else:
return np.random.choice(np.where(x==False)[0])
def run_auction(dataset, seed, model, epsilon, auction_size, n_auctions, max_slate, position_effect=0):
seed += 1
np.random.seed(seed)
name = dataset['name']
x_te = dataset['X']
y_te = dataset['y']
ind = dataset['ind']
auction_type = dataset['auction_type']
reserve = dataset['reserve']
# Copy our sampled data
x, y = x_te[ind, :].copy(), y_te[ind].copy()
# True PClicks, with some noise
pclick = model.predict_proba(x)[:, 1]
logit = np.log(pclick / (1-pclick))
noisy_pclick = 1 / (1 + np.exp(- (logit + np.random.normal(scale=epsilon, size=len(ind)))))
print(name, 'True AUC:', compute_auc(pclick, y), 'Noisy AUC:', compute_auc(noisy_pclick, y))
# Build table with auctionid, dataid, and pclick
df = pandas.DataFrame(zip(ind, pclick, noisy_pclick), columns=['SampleId', 'TruePClick', 'PClick'])
df['AuctionId'] = [int(i / auction_size) for i in range(auction_size*n_auctions)]
# We have either a randomized dataset, or a greedy dataset.
# Use this to create a temporary ranking pclick that's strictly for
# ordering. It won't be used in the training data.
if auction_type == 'random':
df['RankingPClick'] = np.random.permutation(df['PClick'])
else:
df['RankingPClick'] = df['PClick']
df = df[df['RankingPClick'] >= reserve].reset_index(drop=True)
# Rank by pclick, then get position and layout. Apply max_slate here.
df = df.sort_values(['AuctionId', 'RankingPClick'], ascending=False)
df['Position'] = df.groupby('AuctionId').cumcount()
df = df[df['Position'] < max_slate]
# Random auctions pick a random layout
if auction_type == 'random':
m = np.max(df['AuctionId'])
ids = np.arange(m)
layouts = np.random.randint(1, max_slate, len(ids))
layout_df = pandas.DataFrame(zip(ids, layouts), columns=['AuctionId', 'Layout'])
df = df.join(layout_df, on='AuctionId', how='inner', lsuffix='', rsuffix='_dup')
df.drop(list(df.filter(regex='_dup$')), axis=1, inplace=True)
df = df[df['Position'] < df['Layout']]
else:
df['Layout'] = df.groupby('AuctionId')['AuctionId'].transform('count')
# Rank by PClick, then cascade to generate clicks
# Effect for Position is Zero
df['Uniform'] = np.random.uniform(size=len(df))
df['WouldClick'] = np.where(df['Uniform'] <= df['TruePClick'], 1, 0)
df['Click'] = 0
if position_effect==1:
df.loc[df["WouldClick"].ne(0).groupby(df['AuctionId']).idxmax(),'Click']=1
else:
sample_id = df['WouldClick'].ne(0).groupby(df['AuctionId']).apply(sample_true)
group_id = df['WouldClick'].ne(0).groupby(df['AuctionId']).groups
idx=[]
for k,v in group_id.items():
idx.append(group_id[k][sample_id[k]])
df.loc[idx,'Click']=1
df['Click'] = df['Click'] * df['WouldClick']
df.drop(columns=['Uniform', 'WouldClick', 'RankingPClick', 'TruePClick'], inplace=True)
return df, seed+1
def construct_auction_dataset(dataset):
X = np.hstack((dataset['auctions'][['PClick', 'Position', 'Layout']], dataset['X'][dataset['auctions']['SampleId']]))
#y = dataset['y'][dataset['auctions']['SampleId']]
y = dataset['auctions']['Click']
return X, y
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('-s', '--seed', default=16, type=int, help='Random seed for repro')
# # Settings for true pclick distribution
# parser.add_argument('--n_tr_samples', default=30000, type=int, help='Training Samples')
# parser.add_argument('--n_te_cnt_samples', default=100000, type=int, help='Control Testing Samples')
# parser.add_argument('--n_te_trt_samples', default=100000, type=int, help='Treatment Testing Samples')
# parser.add_argument('--n_te_rnd_samples', default=100000, type=int, help='Treatment Randomized Samples')
# parser.add_argument('--n_features', default=200, type=int, help='Features for distribution')
# parser.add_argument('--n_informative', default=150, type=int, help='Relevant Features')
# parser.add_argument('--n_clusters_per_class', default=10, type=int, help='Clusters per class')
# parser.add_argument('--class_sep', default=3, type=float, help='Class separation')
# parser.add_argument('--oracle_n_estimators', default=100, type=int, help='Number of estimators for Oracle')
# parser.add_argument('--oracle_min_samples_leaf', default=100, type=int, help='Minimum number of samples for Oracle to use for labeling')
# parser.add_argument('--auction_n_estimators', default=100, type=int, help='Number of estimators for Auction pclick models')
# parser.add_argument('--auction_min_samples_leaf', default=10000, type=int, help='Minimum number of samples for Auction pclick models to use for labeling')
# parser.add_argument('--auction_size', default=20, type=int, help='Size of Auction')
# parser.add_argument('--n_rnd_auction', default=10000, type=int, help='Number of randomized auctions')
# parser.add_argument('--n_auctions', default=100000, type=int, help='Number of auctions for control and treatment')
# parser.add_argument('--epsilon', default = .5, type=float, help='Noise to add to pclicks prior to sorting')
# parser.add_argument('--control_reserve', default=.6, type=float, help='Reserve on Control Flight')
# parser.add_argument('--treatment_reserve', default=.7, type=float, help='Reserve on Treatment Flight')
# parser.add_argument('--max_slate', default=5, type=float, help='Maximum slate size')
# args = parser.parse_args()
# print('Arguments', args)
# X, y = make_classification( n_samples=args.n_tr_samples + args.n_te_cnt_samples + args.n_te_trt_samples + args.n_te_rnd_samples
# , n_features=args.n_features
# , n_informative=args.n_informative
# , n_clusters_per_class=args.n_clusters_per_class
# , class_sep=args.class_sep
# , random_state=args.seed)
# args.seed += 1
# print('Splitting Datasets')
# samples = np.array([0, args.n_tr_samples, args.n_te_cnt_samples, args.n_te_trt_samples, args.n_te_rnd_samples])
# datasets = {name : {'name':name, 'start':start, 'end':end, 'samples':samples, 'auction_type':auction_type, 'reserve':reserve} for name, start, end, samples, auction_type, reserve in zip(['oracle', 'cnt', 'trt', 'rnd'], np.cumsum(samples), np.cumsum(samples)[1:], samples[1:], [None, 'greedy', 'greedy', 'random'], [None, args.control_reserve, args.treatment_reserve, 0])}
# for dataset, info in datasets.items():
# print(dataset, info['start'], info['end'], info['samples'])
# info['X'], info['y'] = X[info['start']:info['end'], :].copy() , y[info['start']:info['end']].copy()
# # Creates an oracle pclick that ignores position and simply observes c/nc
# oracle, args.seed = train_rf(datasets['oracle']['X'], datasets['oracle']['y'], seed=args.seed, n_estimators=args.oracle_n_estimators, min_samples_leaf=args.oracle_min_samples_leaf)
# print('Oracle Created -- Test AUC Control:', compute_model_auc(oracle, datasets['cnt']['X'], datasets['cnt']['y']))
# print('Oracle Created -- Test AUC Treatment:', compute_model_auc(oracle, datasets['trt']['X'], datasets['trt']['y']))
# # Run selection
# datasets['rnd']['ind'], args.seed = run_selection(args.seed, datasets['rnd']['samples'], args.auction_size, args.n_rnd_auction)
# datasets['cnt']['ind'], args.seed = run_selection(args.seed, datasets['cnt']['samples'], args.auction_size, args.n_auctions)
# datasets['trt']['ind'], args.seed = run_selection(args.seed, datasets['trt']['samples'], args.auction_size, args.n_auctions)
# # Run the auction
# datasets['rnd']['auctions'], args.seed = run_auction(datasets['rnd'], args.seed, oracle, args.epsilon, args.auction_size, args.n_rnd_auction, args.max_slate)
# datasets['cnt']['auctions'], args.seed = run_auction(datasets['cnt'], args.seed, oracle, args.epsilon, args.auction_size, args.n_auctions, args.max_slate)
# datasets['trt']['auctions'], args.seed = run_auction(datasets['trt'], args.seed, oracle, args.epsilon, args.auction_size, args.n_auctions, args.max_slate)
# print('Train RF Models')
# rnd_model, args.seed = train_rf(*construct_auction_dataset(datasets['rnd']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# cnt_model, args.seed = train_rf(*construct_auction_dataset(datasets['cnt']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# trt_model, args.seed = train_rf(*construct_auction_dataset(datasets['trt']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# print('Eval rnd on cnt', compute_model_auc(rnd_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval rnd on trt', compute_model_auc(rnd_model, *construct_auction_dataset(datasets['trt'])))
# print('Eval cnt on cnt', compute_model_auc(cnt_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval cnt on trt', compute_model_auc(cnt_model, *construct_auction_dataset(datasets['trt'])))
# print('Eval trt on cnt', compute_model_auc(trt_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval trt on trt', compute_model_auc(trt_model, *construct_auction_dataset(datasets['trt']))) | import argparse
import numpy as np
import operator
import pandas
| random_line_split | |
auction.py | import argparse
import numpy as np
import operator
import pandas
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from .metrics import compute_auc, compute_model_auc
def run_selection(seed, n_samples, auction_size, n_auctions):
seed +=1
np.random.seed(seed)
ind = np.random.randint(0, n_samples, size=auction_size*n_auctions)
return ind, seed+1
def sample_true(x):
if any(x):
return np.random.choice(np.where(x)[0])
else:
return np.random.choice(np.where(x==False)[0])
def run_auction(dataset, seed, model, epsilon, auction_size, n_auctions, max_slate, position_effect=0):
seed += 1
np.random.seed(seed)
name = dataset['name']
x_te = dataset['X']
y_te = dataset['y']
ind = dataset['ind']
auction_type = dataset['auction_type']
reserve = dataset['reserve']
# Copy our sampled data
x, y = x_te[ind, :].copy(), y_te[ind].copy()
# True PClicks, with some noise
pclick = model.predict_proba(x)[:, 1]
logit = np.log(pclick / (1-pclick))
noisy_pclick = 1 / (1 + np.exp(- (logit + np.random.normal(scale=epsilon, size=len(ind)))))
print(name, 'True AUC:', compute_auc(pclick, y), 'Noisy AUC:', compute_auc(noisy_pclick, y))
# Build table with auctionid, dataid, and pclick
df = pandas.DataFrame(zip(ind, pclick, noisy_pclick), columns=['SampleId', 'TruePClick', 'PClick'])
df['AuctionId'] = [int(i / auction_size) for i in range(auction_size*n_auctions)]
# We have either a randomized dataset, or a greedy dataset.
# Use this to create a temporary ranking pclick that's strictly for
# ordering. It won't be used in the training data.
if auction_type == 'random':
|
else:
df['RankingPClick'] = df['PClick']
df = df[df['RankingPClick'] >= reserve].reset_index(drop=True)
# Rank by pclick, then get position and layout. Apply max_slate here.
df = df.sort_values(['AuctionId', 'RankingPClick'], ascending=False)
df['Position'] = df.groupby('AuctionId').cumcount()
df = df[df['Position'] < max_slate]
# Random auctions pick a random layout
if auction_type == 'random':
m = np.max(df['AuctionId'])
ids = np.arange(m)
layouts = np.random.randint(1, max_slate, len(ids))
layout_df = pandas.DataFrame(zip(ids, layouts), columns=['AuctionId', 'Layout'])
df = df.join(layout_df, on='AuctionId', how='inner', lsuffix='', rsuffix='_dup')
df.drop(list(df.filter(regex='_dup$')), axis=1, inplace=True)
df = df[df['Position'] < df['Layout']]
else:
df['Layout'] = df.groupby('AuctionId')['AuctionId'].transform('count')
# Rank by PClick, then cascade to generate clicks
# Effect for Position is Zero
df['Uniform'] = np.random.uniform(size=len(df))
df['WouldClick'] = np.where(df['Uniform'] <= df['TruePClick'], 1, 0)
df['Click'] = 0
if position_effect==1:
df.loc[df["WouldClick"].ne(0).groupby(df['AuctionId']).idxmax(),'Click']=1
else:
sample_id = df['WouldClick'].ne(0).groupby(df['AuctionId']).apply(sample_true)
group_id = df['WouldClick'].ne(0).groupby(df['AuctionId']).groups
idx=[]
for k,v in group_id.items():
idx.append(group_id[k][sample_id[k]])
df.loc[idx,'Click']=1
df['Click'] = df['Click'] * df['WouldClick']
df.drop(columns=['Uniform', 'WouldClick', 'RankingPClick', 'TruePClick'], inplace=True)
return df, seed+1
def construct_auction_dataset(dataset):
X = np.hstack((dataset['auctions'][['PClick', 'Position', 'Layout']], dataset['X'][dataset['auctions']['SampleId']]))
#y = dataset['y'][dataset['auctions']['SampleId']]
y = dataset['auctions']['Click']
return X, y
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('-s', '--seed', default=16, type=int, help='Random seed for repro')
# # Settings for true pclick distribution
# parser.add_argument('--n_tr_samples', default=30000, type=int, help='Training Samples')
# parser.add_argument('--n_te_cnt_samples', default=100000, type=int, help='Control Testing Samples')
# parser.add_argument('--n_te_trt_samples', default=100000, type=int, help='Treatment Testing Samples')
# parser.add_argument('--n_te_rnd_samples', default=100000, type=int, help='Treatment Randomized Samples')
# parser.add_argument('--n_features', default=200, type=int, help='Features for distribution')
# parser.add_argument('--n_informative', default=150, type=int, help='Relevant Features')
# parser.add_argument('--n_clusters_per_class', default=10, type=int, help='Clusters per class')
# parser.add_argument('--class_sep', default=3, type=float, help='Class separation')
# parser.add_argument('--oracle_n_estimators', default=100, type=int, help='Number of estimators for Oracle')
# parser.add_argument('--oracle_min_samples_leaf', default=100, type=int, help='Minimum number of samples for Oracle to use for labeling')
# parser.add_argument('--auction_n_estimators', default=100, type=int, help='Number of estimators for Auction pclick models')
# parser.add_argument('--auction_min_samples_leaf', default=10000, type=int, help='Minimum number of samples for Auction pclick models to use for labeling')
# parser.add_argument('--auction_size', default=20, type=int, help='Size of Auction')
# parser.add_argument('--n_rnd_auction', default=10000, type=int, help='Number of randomized auctions')
# parser.add_argument('--n_auctions', default=100000, type=int, help='Number of auctions for control and treatment')
# parser.add_argument('--epsilon', default = .5, type=float, help='Noise to add to pclicks prior to sorting')
# parser.add_argument('--control_reserve', default=.6, type=float, help='Reserve on Control Flight')
# parser.add_argument('--treatment_reserve', default=.7, type=float, help='Reserve on Treatment Flight')
# parser.add_argument('--max_slate', default=5, type=float, help='Maximum slate size')
# args = parser.parse_args()
# print('Arguments', args)
# X, y = make_classification( n_samples=args.n_tr_samples + args.n_te_cnt_samples + args.n_te_trt_samples + args.n_te_rnd_samples
# , n_features=args.n_features
# , n_informative=args.n_informative
# , n_clusters_per_class=args.n_clusters_per_class
# , class_sep=args.class_sep
# , random_state=args.seed)
# args.seed += 1
# print('Splitting Datasets')
# samples = np.array([0, args.n_tr_samples, args.n_te_cnt_samples, args.n_te_trt_samples, args.n_te_rnd_samples])
# datasets = {name : {'name':name, 'start':start, 'end':end, 'samples':samples, 'auction_type':auction_type, 'reserve':reserve} for name, start, end, samples, auction_type, reserve in zip(['oracle', 'cnt', 'trt', 'rnd'], np.cumsum(samples), np.cumsum(samples)[1:], samples[1:], [None, 'greedy', 'greedy', 'random'], [None, args.control_reserve, args.treatment_reserve, 0])}
# for dataset, info in datasets.items():
# print(dataset, info['start'], info['end'], info['samples'])
# info['X'], info['y'] = X[info['start']:info['end'], :].copy() , y[info['start']:info['end']].copy()
# # Creates an oracle pclick that ignores position and simply observes c/nc
# oracle, args.seed = train_rf(datasets['oracle']['X'], datasets['oracle']['y'], seed=args.seed, n_estimators=args.oracle_n_estimators, min_samples_leaf=args.oracle_min_samples_leaf)
# print('Oracle Created -- Test AUC Control:', compute_model_auc(oracle, datasets['cnt']['X'], datasets['cnt']['y']))
# print('Oracle Created -- Test AUC Treatment:', compute_model_auc(oracle, datasets['trt']['X'], datasets['trt']['y']))
# # Run selection
# datasets['rnd']['ind'], args.seed = run_selection(args.seed, datasets['rnd']['samples'], args.auction_size, args.n_rnd_auction)
# datasets['cnt']['ind'], args.seed = run_selection(args.seed, datasets['cnt']['samples'], args.auction_size, args.n_auctions)
# datasets['trt']['ind'], args.seed = run_selection(args.seed, datasets['trt']['samples'], args.auction_size, args.n_auctions)
# # Run the auction
# datasets['rnd']['auctions'], args.seed = run_auction(datasets['rnd'], args.seed, oracle, args.epsilon, args.auction_size, args.n_rnd_auction, args.max_slate)
# datasets['cnt']['auctions'], args.seed = run_auction(datasets['cnt'], args.seed, oracle, args.epsilon, args.auction_size, args.n_auctions, args.max_slate)
# datasets['trt']['auctions'], args.seed = run_auction(datasets['trt'], args.seed, oracle, args.epsilon, args.auction_size, args.n_auctions, args.max_slate)
# print('Train RF Models')
# rnd_model, args.seed = train_rf(*construct_auction_dataset(datasets['rnd']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# cnt_model, args.seed = train_rf(*construct_auction_dataset(datasets['cnt']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# trt_model, args.seed = train_rf(*construct_auction_dataset(datasets['trt']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# print('Eval rnd on cnt', compute_model_auc(rnd_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval rnd on trt', compute_model_auc(rnd_model, *construct_auction_dataset(datasets['trt'])))
# print('Eval cnt on cnt', compute_model_auc(cnt_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval cnt on trt', compute_model_auc(cnt_model, *construct_auction_dataset(datasets['trt'])))
# print('Eval trt on cnt', compute_model_auc(trt_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval trt on trt', compute_model_auc(trt_model, *construct_auction_dataset(datasets['trt'])))
| df['RankingPClick'] = np.random.permutation(df['PClick']) | conditional_block |
auction.py | import argparse
import numpy as np
import operator
import pandas
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from .metrics import compute_auc, compute_model_auc
def run_selection(seed, n_samples, auction_size, n_auctions):
seed +=1
np.random.seed(seed)
ind = np.random.randint(0, n_samples, size=auction_size*n_auctions)
return ind, seed+1
def sample_true(x):
|
def run_auction(dataset, seed, model, epsilon, auction_size, n_auctions, max_slate, position_effect=0):
seed += 1
np.random.seed(seed)
name = dataset['name']
x_te = dataset['X']
y_te = dataset['y']
ind = dataset['ind']
auction_type = dataset['auction_type']
reserve = dataset['reserve']
# Copy our sampled data
x, y = x_te[ind, :].copy(), y_te[ind].copy()
# True PClicks, with some noise
pclick = model.predict_proba(x)[:, 1]
logit = np.log(pclick / (1-pclick))
noisy_pclick = 1 / (1 + np.exp(- (logit + np.random.normal(scale=epsilon, size=len(ind)))))
print(name, 'True AUC:', compute_auc(pclick, y), 'Noisy AUC:', compute_auc(noisy_pclick, y))
# Build table with auctionid, dataid, and pclick
df = pandas.DataFrame(zip(ind, pclick, noisy_pclick), columns=['SampleId', 'TruePClick', 'PClick'])
df['AuctionId'] = [int(i / auction_size) for i in range(auction_size*n_auctions)]
# We have either a randomized dataset, or a greedy dataset.
# Use this to create a temporary ranking pclick that's strictly for
# ordering. It won't be used in the training data.
if auction_type == 'random':
df['RankingPClick'] = np.random.permutation(df['PClick'])
else:
df['RankingPClick'] = df['PClick']
df = df[df['RankingPClick'] >= reserve].reset_index(drop=True)
# Rank by pclick, then get position and layout. Apply max_slate here.
df = df.sort_values(['AuctionId', 'RankingPClick'], ascending=False)
df['Position'] = df.groupby('AuctionId').cumcount()
df = df[df['Position'] < max_slate]
# Random auctions pick a random layout
if auction_type == 'random':
m = np.max(df['AuctionId'])
ids = np.arange(m)
layouts = np.random.randint(1, max_slate, len(ids))
layout_df = pandas.DataFrame(zip(ids, layouts), columns=['AuctionId', 'Layout'])
df = df.join(layout_df, on='AuctionId', how='inner', lsuffix='', rsuffix='_dup')
df.drop(list(df.filter(regex='_dup$')), axis=1, inplace=True)
df = df[df['Position'] < df['Layout']]
else:
df['Layout'] = df.groupby('AuctionId')['AuctionId'].transform('count')
# Rank by PClick, then cascade to generate clicks
# Effect for Position is Zero
df['Uniform'] = np.random.uniform(size=len(df))
df['WouldClick'] = np.where(df['Uniform'] <= df['TruePClick'], 1, 0)
df['Click'] = 0
if position_effect==1:
df.loc[df["WouldClick"].ne(0).groupby(df['AuctionId']).idxmax(),'Click']=1
else:
sample_id = df['WouldClick'].ne(0).groupby(df['AuctionId']).apply(sample_true)
group_id = df['WouldClick'].ne(0).groupby(df['AuctionId']).groups
idx=[]
for k,v in group_id.items():
idx.append(group_id[k][sample_id[k]])
df.loc[idx,'Click']=1
df['Click'] = df['Click'] * df['WouldClick']
df.drop(columns=['Uniform', 'WouldClick', 'RankingPClick', 'TruePClick'], inplace=True)
return df, seed+1
def construct_auction_dataset(dataset):
X = np.hstack((dataset['auctions'][['PClick', 'Position', 'Layout']], dataset['X'][dataset['auctions']['SampleId']]))
#y = dataset['y'][dataset['auctions']['SampleId']]
y = dataset['auctions']['Click']
return X, y
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('-s', '--seed', default=16, type=int, help='Random seed for repro')
# # Settings for true pclick distribution
# parser.add_argument('--n_tr_samples', default=30000, type=int, help='Training Samples')
# parser.add_argument('--n_te_cnt_samples', default=100000, type=int, help='Control Testing Samples')
# parser.add_argument('--n_te_trt_samples', default=100000, type=int, help='Treatment Testing Samples')
# parser.add_argument('--n_te_rnd_samples', default=100000, type=int, help='Treatment Randomized Samples')
# parser.add_argument('--n_features', default=200, type=int, help='Features for distribution')
# parser.add_argument('--n_informative', default=150, type=int, help='Relevant Features')
# parser.add_argument('--n_clusters_per_class', default=10, type=int, help='Clusters per class')
# parser.add_argument('--class_sep', default=3, type=float, help='Class separation')
# parser.add_argument('--oracle_n_estimators', default=100, type=int, help='Number of estimators for Oracle')
# parser.add_argument('--oracle_min_samples_leaf', default=100, type=int, help='Minimum number of samples for Oracle to use for labeling')
# parser.add_argument('--auction_n_estimators', default=100, type=int, help='Number of estimators for Auction pclick models')
# parser.add_argument('--auction_min_samples_leaf', default=10000, type=int, help='Minimum number of samples for Auction pclick models to use for labeling')
# parser.add_argument('--auction_size', default=20, type=int, help='Size of Auction')
# parser.add_argument('--n_rnd_auction', default=10000, type=int, help='Number of randomized auctions')
# parser.add_argument('--n_auctions', default=100000, type=int, help='Number of auctions for control and treatment')
# parser.add_argument('--epsilon', default = .5, type=float, help='Noise to add to pclicks prior to sorting')
# parser.add_argument('--control_reserve', default=.6, type=float, help='Reserve on Control Flight')
# parser.add_argument('--treatment_reserve', default=.7, type=float, help='Reserve on Treatment Flight')
# parser.add_argument('--max_slate', default=5, type=float, help='Maximum slate size')
# args = parser.parse_args()
# print('Arguments', args)
# X, y = make_classification( n_samples=args.n_tr_samples + args.n_te_cnt_samples + args.n_te_trt_samples + args.n_te_rnd_samples
# , n_features=args.n_features
# , n_informative=args.n_informative
# , n_clusters_per_class=args.n_clusters_per_class
# , class_sep=args.class_sep
# , random_state=args.seed)
# args.seed += 1
# print('Splitting Datasets')
# samples = np.array([0, args.n_tr_samples, args.n_te_cnt_samples, args.n_te_trt_samples, args.n_te_rnd_samples])
# datasets = {name : {'name':name, 'start':start, 'end':end, 'samples':samples, 'auction_type':auction_type, 'reserve':reserve} for name, start, end, samples, auction_type, reserve in zip(['oracle', 'cnt', 'trt', 'rnd'], np.cumsum(samples), np.cumsum(samples)[1:], samples[1:], [None, 'greedy', 'greedy', 'random'], [None, args.control_reserve, args.treatment_reserve, 0])}
# for dataset, info in datasets.items():
# print(dataset, info['start'], info['end'], info['samples'])
# info['X'], info['y'] = X[info['start']:info['end'], :].copy() , y[info['start']:info['end']].copy()
# # Creates an oracle pclick that ignores position and simply observes c/nc
# oracle, args.seed = train_rf(datasets['oracle']['X'], datasets['oracle']['y'], seed=args.seed, n_estimators=args.oracle_n_estimators, min_samples_leaf=args.oracle_min_samples_leaf)
# print('Oracle Created -- Test AUC Control:', compute_model_auc(oracle, datasets['cnt']['X'], datasets['cnt']['y']))
# print('Oracle Created -- Test AUC Treatment:', compute_model_auc(oracle, datasets['trt']['X'], datasets['trt']['y']))
# # Run selection
# datasets['rnd']['ind'], args.seed = run_selection(args.seed, datasets['rnd']['samples'], args.auction_size, args.n_rnd_auction)
# datasets['cnt']['ind'], args.seed = run_selection(args.seed, datasets['cnt']['samples'], args.auction_size, args.n_auctions)
# datasets['trt']['ind'], args.seed = run_selection(args.seed, datasets['trt']['samples'], args.auction_size, args.n_auctions)
# # Run the auction
# datasets['rnd']['auctions'], args.seed = run_auction(datasets['rnd'], args.seed, oracle, args.epsilon, args.auction_size, args.n_rnd_auction, args.max_slate)
# datasets['cnt']['auctions'], args.seed = run_auction(datasets['cnt'], args.seed, oracle, args.epsilon, args.auction_size, args.n_auctions, args.max_slate)
# datasets['trt']['auctions'], args.seed = run_auction(datasets['trt'], args.seed, oracle, args.epsilon, args.auction_size, args.n_auctions, args.max_slate)
# print('Train RF Models')
# rnd_model, args.seed = train_rf(*construct_auction_dataset(datasets['rnd']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# cnt_model, args.seed = train_rf(*construct_auction_dataset(datasets['cnt']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# trt_model, args.seed = train_rf(*construct_auction_dataset(datasets['trt']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# print('Eval rnd on cnt', compute_model_auc(rnd_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval rnd on trt', compute_model_auc(rnd_model, *construct_auction_dataset(datasets['trt'])))
# print('Eval cnt on cnt', compute_model_auc(cnt_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval cnt on trt', compute_model_auc(cnt_model, *construct_auction_dataset(datasets['trt'])))
# print('Eval trt on cnt', compute_model_auc(trt_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval trt on trt', compute_model_auc(trt_model, *construct_auction_dataset(datasets['trt'])))
| if any(x):
return np.random.choice(np.where(x)[0])
else:
return np.random.choice(np.where(x==False)[0]) | identifier_body |
auction.py | import argparse
import numpy as np
import operator
import pandas
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from .metrics import compute_auc, compute_model_auc
def | (seed, n_samples, auction_size, n_auctions):
seed +=1
np.random.seed(seed)
ind = np.random.randint(0, n_samples, size=auction_size*n_auctions)
return ind, seed+1
def sample_true(x):
if any(x):
return np.random.choice(np.where(x)[0])
else:
return np.random.choice(np.where(x==False)[0])
def run_auction(dataset, seed, model, epsilon, auction_size, n_auctions, max_slate, position_effect=0):
seed += 1
np.random.seed(seed)
name = dataset['name']
x_te = dataset['X']
y_te = dataset['y']
ind = dataset['ind']
auction_type = dataset['auction_type']
reserve = dataset['reserve']
# Copy our sampled data
x, y = x_te[ind, :].copy(), y_te[ind].copy()
# True PClicks, with some noise
pclick = model.predict_proba(x)[:, 1]
logit = np.log(pclick / (1-pclick))
noisy_pclick = 1 / (1 + np.exp(- (logit + np.random.normal(scale=epsilon, size=len(ind)))))
print(name, 'True AUC:', compute_auc(pclick, y), 'Noisy AUC:', compute_auc(noisy_pclick, y))
# Build table with auctionid, dataid, and pclick
df = pandas.DataFrame(zip(ind, pclick, noisy_pclick), columns=['SampleId', 'TruePClick', 'PClick'])
df['AuctionId'] = [int(i / auction_size) for i in range(auction_size*n_auctions)]
# We have either a randomized dataset, or a greedy dataset.
# Use this to create a temporary ranking pclick that's strictly for
# ordering. It won't be used in the training data.
if auction_type == 'random':
df['RankingPClick'] = np.random.permutation(df['PClick'])
else:
df['RankingPClick'] = df['PClick']
df = df[df['RankingPClick'] >= reserve].reset_index(drop=True)
# Rank by pclick, then get position and layout. Apply max_slate here.
df = df.sort_values(['AuctionId', 'RankingPClick'], ascending=False)
df['Position'] = df.groupby('AuctionId').cumcount()
df = df[df['Position'] < max_slate]
# Random auctions pick a random layout
if auction_type == 'random':
m = np.max(df['AuctionId'])
ids = np.arange(m)
layouts = np.random.randint(1, max_slate, len(ids))
layout_df = pandas.DataFrame(zip(ids, layouts), columns=['AuctionId', 'Layout'])
df = df.join(layout_df, on='AuctionId', how='inner', lsuffix='', rsuffix='_dup')
df.drop(list(df.filter(regex='_dup$')), axis=1, inplace=True)
df = df[df['Position'] < df['Layout']]
else:
df['Layout'] = df.groupby('AuctionId')['AuctionId'].transform('count')
# Rank by PClick, then cascade to generate clicks
# Effect for Position is Zero
df['Uniform'] = np.random.uniform(size=len(df))
df['WouldClick'] = np.where(df['Uniform'] <= df['TruePClick'], 1, 0)
df['Click'] = 0
if position_effect==1:
df.loc[df["WouldClick"].ne(0).groupby(df['AuctionId']).idxmax(),'Click']=1
else:
sample_id = df['WouldClick'].ne(0).groupby(df['AuctionId']).apply(sample_true)
group_id = df['WouldClick'].ne(0).groupby(df['AuctionId']).groups
idx=[]
for k,v in group_id.items():
idx.append(group_id[k][sample_id[k]])
df.loc[idx,'Click']=1
df['Click'] = df['Click'] * df['WouldClick']
df.drop(columns=['Uniform', 'WouldClick', 'RankingPClick', 'TruePClick'], inplace=True)
return df, seed+1
def construct_auction_dataset(dataset):
X = np.hstack((dataset['auctions'][['PClick', 'Position', 'Layout']], dataset['X'][dataset['auctions']['SampleId']]))
#y = dataset['y'][dataset['auctions']['SampleId']]
y = dataset['auctions']['Click']
return X, y
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('-s', '--seed', default=16, type=int, help='Random seed for repro')
# # Settings for true pclick distribution
# parser.add_argument('--n_tr_samples', default=30000, type=int, help='Training Samples')
# parser.add_argument('--n_te_cnt_samples', default=100000, type=int, help='Control Testing Samples')
# parser.add_argument('--n_te_trt_samples', default=100000, type=int, help='Treatment Testing Samples')
# parser.add_argument('--n_te_rnd_samples', default=100000, type=int, help='Treatment Randomized Samples')
# parser.add_argument('--n_features', default=200, type=int, help='Features for distribution')
# parser.add_argument('--n_informative', default=150, type=int, help='Relevant Features')
# parser.add_argument('--n_clusters_per_class', default=10, type=int, help='Clusters per class')
# parser.add_argument('--class_sep', default=3, type=float, help='Class separation')
# parser.add_argument('--oracle_n_estimators', default=100, type=int, help='Number of estimators for Oracle')
# parser.add_argument('--oracle_min_samples_leaf', default=100, type=int, help='Minimum number of samples for Oracle to use for labeling')
# parser.add_argument('--auction_n_estimators', default=100, type=int, help='Number of estimators for Auction pclick models')
# parser.add_argument('--auction_min_samples_leaf', default=10000, type=int, help='Minimum number of samples for Auction pclick models to use for labeling')
# parser.add_argument('--auction_size', default=20, type=int, help='Size of Auction')
# parser.add_argument('--n_rnd_auction', default=10000, type=int, help='Number of randomized auctions')
# parser.add_argument('--n_auctions', default=100000, type=int, help='Number of auctions for control and treatment')
# parser.add_argument('--epsilon', default = .5, type=float, help='Noise to add to pclicks prior to sorting')
# parser.add_argument('--control_reserve', default=.6, type=float, help='Reserve on Control Flight')
# parser.add_argument('--treatment_reserve', default=.7, type=float, help='Reserve on Treatment Flight')
# parser.add_argument('--max_slate', default=5, type=float, help='Maximum slate size')
# args = parser.parse_args()
# print('Arguments', args)
# X, y = make_classification( n_samples=args.n_tr_samples + args.n_te_cnt_samples + args.n_te_trt_samples + args.n_te_rnd_samples
# , n_features=args.n_features
# , n_informative=args.n_informative
# , n_clusters_per_class=args.n_clusters_per_class
# , class_sep=args.class_sep
# , random_state=args.seed)
# args.seed += 1
# print('Splitting Datasets')
# samples = np.array([0, args.n_tr_samples, args.n_te_cnt_samples, args.n_te_trt_samples, args.n_te_rnd_samples])
# datasets = {name : {'name':name, 'start':start, 'end':end, 'samples':samples, 'auction_type':auction_type, 'reserve':reserve} for name, start, end, samples, auction_type, reserve in zip(['oracle', 'cnt', 'trt', 'rnd'], np.cumsum(samples), np.cumsum(samples)[1:], samples[1:], [None, 'greedy', 'greedy', 'random'], [None, args.control_reserve, args.treatment_reserve, 0])}
# for dataset, info in datasets.items():
# print(dataset, info['start'], info['end'], info['samples'])
# info['X'], info['y'] = X[info['start']:info['end'], :].copy() , y[info['start']:info['end']].copy()
# # Creates an oracle pclick that ignores position and simply observes c/nc
# oracle, args.seed = train_rf(datasets['oracle']['X'], datasets['oracle']['y'], seed=args.seed, n_estimators=args.oracle_n_estimators, min_samples_leaf=args.oracle_min_samples_leaf)
# print('Oracle Created -- Test AUC Control:', compute_model_auc(oracle, datasets['cnt']['X'], datasets['cnt']['y']))
# print('Oracle Created -- Test AUC Treatment:', compute_model_auc(oracle, datasets['trt']['X'], datasets['trt']['y']))
# # Run selection
# datasets['rnd']['ind'], args.seed = run_selection(args.seed, datasets['rnd']['samples'], args.auction_size, args.n_rnd_auction)
# datasets['cnt']['ind'], args.seed = run_selection(args.seed, datasets['cnt']['samples'], args.auction_size, args.n_auctions)
# datasets['trt']['ind'], args.seed = run_selection(args.seed, datasets['trt']['samples'], args.auction_size, args.n_auctions)
# # Run the auction
# datasets['rnd']['auctions'], args.seed = run_auction(datasets['rnd'], args.seed, oracle, args.epsilon, args.auction_size, args.n_rnd_auction, args.max_slate)
# datasets['cnt']['auctions'], args.seed = run_auction(datasets['cnt'], args.seed, oracle, args.epsilon, args.auction_size, args.n_auctions, args.max_slate)
# datasets['trt']['auctions'], args.seed = run_auction(datasets['trt'], args.seed, oracle, args.epsilon, args.auction_size, args.n_auctions, args.max_slate)
# print('Train RF Models')
# rnd_model, args.seed = train_rf(*construct_auction_dataset(datasets['rnd']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# cnt_model, args.seed = train_rf(*construct_auction_dataset(datasets['cnt']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# trt_model, args.seed = train_rf(*construct_auction_dataset(datasets['trt']), seed=args.seed, n_estimators=args.auction_n_estimators, min_samples_leaf=args.auction_min_samples_leaf)
# print('Eval rnd on cnt', compute_model_auc(rnd_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval rnd on trt', compute_model_auc(rnd_model, *construct_auction_dataset(datasets['trt'])))
# print('Eval cnt on cnt', compute_model_auc(cnt_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval cnt on trt', compute_model_auc(cnt_model, *construct_auction_dataset(datasets['trt'])))
# print('Eval trt on cnt', compute_model_auc(trt_model, *construct_auction_dataset(datasets['cnt'])))
# print('Eval trt on trt', compute_model_auc(trt_model, *construct_auction_dataset(datasets['trt'])))
| run_selection | identifier_name |
base.py | # Copyright (C) 2013-2015 Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, unicode_literals
import codecs
import logging
import re
import warnings
from abc import ABCMeta, abstractmethod
from base64 import b64encode
from hashlib import md5
from os.path import basename, exists, splitext
HAS_SMARTYPANTS = False
try:
import smartypants
HAS_SMARTYPANTS = True
except ImportError:
pass
class BaseHandler():
"""The base clase of markup handler"""
__metaclass__ = ABCMeta
# default handler options
OPTIONS = {
'markup_prefix': '',
'markup_suffix': '',
'smartypants': False,
'id_affix': None,
}
MERGE_HEADERS = ('service', 'kind', 'blog', 'id', 'url', 'draft')
HEADER_FMT = '%s: %s'
PREFIX_HEAD = ''
PREFIX_END = ''
RE_SPLIT = re.compile(r'^(?:([^\n]*?!b.*?)\n\n)?(.*)',
re.DOTALL | re.MULTILINE)
RE_HEADER = re.compile(r'.*?([a-zA-Z0-9_-]+)\s*[=:]\s*(.*)\s*')
SUPPORT_EMBED_IMAGES = True
RE_IMG = re.compile(
r'''
(?P<prefix><img.*?)
src="(?!data:image/|https?://)(?P<src>[^"]*)"
(?P<suffix>.*?>)
''',
re.VERBOSE
)
def __init__(self, filename, options=None):
self.filename = filename
self.title = ''
self.options = self.OPTIONS.copy()
self.options.update(options or {})
if filename:
with codecs.open(filename, 'r', 'utf8') as f:
self.source = f.read()
header, markup = self.split_header_markup()
self.title = splitext(basename(filename))[0]
else:
header = {}
markup = ''
self.header = header
self.markup = markup
self.modified = False
def set_header(self, k, v):
"""Set header
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> print(handler.header)
{}
>>> handler.modified
False
>>> handler.set_header('foo', 'bar')
>>> print(handler.header['foo'])
bar
>>> handler.modified
True
"""
if k in self.header and self.header[k] == v:
return
self.header[k] = v
self.modified = True
def merge_header(self, header):
"""Merge header
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> handler.merge_header({'id': 12345, 'bogus': 'blah'})
>>> print(handler.header['id'])
12345
>>> handler.modified
True
"""
for k, v in header.items():
if k not in self.MERGE_HEADERS:
continue
if k == 'blog':
v = v['id']
elif k == 'kind':
v = v.replace('blogger#', '')
self.set_header(k, v)
@property
def markup(self):
"""Return markup with markup_prefix and markup_suffix
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> options = {
... 'markup_prefix': 'the prefix\\n',
... 'markup_suffix': '\\nthe suffix',
... }
>>> handler = Handler(None, options)
>>> handler.markup = 'content'
>>> print(handler.markup)
the prefix
content
the suffix
"""
return '%s%s%s' % (
self.options['markup_prefix'],
self._markup,
self.options['markup_suffix'],
)
@markup.setter
def markup(self, markup):
"""Set the markup"""
self._markup = markup
@property
def id_affix(self):
"""Return id_affix
The initial value is from self.options, and can be overriden by
self.header.
Returns
* None if it's None.
* value if value is not ''
* first 4 digits of md5 of value if value is '', and assign back to
self.options. _generate method of Handler should write back to
self.header.
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> options = {
... 'id_affix': None,
... }
>>> handler = Handler(None, options)
>>> print(repr(handler.id_affix))
None
>>> handler.options['id_affix'] = 'foobar'
>>> print(handler.id_affix)
foobar
>>> # auto generate an id affix from title
>>> handler.options['id_affix'] = ''
>>> handler.title = 'abc'
>>> print(handler.id_affix)
9001
>>> handler.header['id_affix'] = 'override-affix'
>>> print(handler.id_affix)
override-affix
"""
id_affix = self.options['id_affix']
# override?
if 'id_affix' in self.header:
id_affix = self.header['id_affix']
if self.header['id_affix'] and id_affix != 'None':
return self.header['id_affix']
# second case is from header of post, has to use string 'None'
if id_affix is None or id_affix == 'None':
return None
if id_affix:
return id_affix
m = md5()
# if self.title is Unicode-type string, then encode it,
# otherwise it's byte-type, then just update with it.
# The __future__.unicode_literals ensures '' is unicode-type.
if isinstance(self.title, type('')):
m.update(self.title.encode('utf8'))
else:
m.update(self.title)
return m.hexdigest()[:4]
@abstractmethod
def _generate(self, markup=None):
"""Generate HTML of markup source"""
raise NotImplementedError
def generate(self, markup=None):
"""Generate HTML
>>> class Handler(BaseHandler):
... def _generate(self, markup=None): return markup
>>> handler = Handler(None)
>>> print(handler.generate('foo "bar"'))
foo "bar"
>>> handler.options['smartypants'] = True
>>> print(handler.generate('foo "bar"'))
foo “bar”
"""
if markup is None:
markup = self.markup
html = self._generate(markup)
if self.options.get('smartypants', False):
if not HAS_SMARTYPANTS:
warnings.warn("smartypants option is set, "
"but the library isn't installed.", RuntimeWarning)
return html
Attr = smartypants.Attr
html = smartypants.smartypants(html, Attr.set1 | Attr.w)
if self.SUPPORT_EMBED_IMAGES and self.options.get('embed_images', False):
html = self.embed_images(html)
return html
def generate_header(self, header=None):
"""Generate header in text for writing back to the file
>>> class Handler(BaseHandler):
... PREFIX_HEAD = 'foo '
... PREFIX_END = 'bar'
... HEADER_FMT = '--- %s: %s'
... def _generate(self, source=None): pass
>>> handler = Handler(None)
>>> print(handler.generate_header({'title': 'foobar'}))
foo !b
--- title: foobar
bar
<BLANKLINE>
>>> print(handler.generate_header({'labels': ['foo', 'bar']}))
foo !b
--- labels: foo, bar
bar
<BLANKLINE>
"""
if header is None:
header = self.header
lines = [self.PREFIX_HEAD + '!b']
for k, v in header.items():
if k in ('labels', 'categories'):
v = ', '.join(v)
elif k == 'draft':
v = repr(v)
lines.append(self.HEADER_FMT % (k, v))
lines.append(self.PREFIX_END)
return '\n'.join([_f for _f in lines if _f]) + '\n'
def generate_title(self, title=None):
"""Generate title for posting
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> print(handler.generate_title('foo "bar"'))
foo "bar"
>>> print(handler.generate_title('foo\\nbar\\n\\n'))
foo bar
>>> handler.options['smartypants'] = True
>>> print(handler.generate_title('foo "bar"'))
foo “bar”
"""
if title is None:
title = self.header.get('title', self.title)
title = self.generate(title)
title = title.replace('<p>', '').replace('</p>', '')
# no trailing newlines
title = re.sub(r'\n+', ' ', title).rstrip()
return title
def generate_post(self):
"""Generate dict for merging to post object of API"""
post = {'title': self.generate_title(), 'draft': False}
for k in ('blog', 'id', 'labels', 'categories', 'draft'):
if k not in self.header:
continue
if k == 'blog':
post[k] = {'id': self.header[k]}
else:
post[k] = self.header[k]
return post
def split_header_markup(self, source=None):
"""Split source into header and markup parts
It also parses header into a dict."""
if source is None:
source = self.source
header, markup = self.RE_SPLIT.match(source).groups()
if not header:
|
if not markup:
logging.warning('markup is empty')
logging.debug('markup length = %d' % len(markup))
_header = {}
if header:
for item in header.split('\n'):
m = self.RE_HEADER.match(item)
if not m:
continue
k, v = list(map(type('').strip, m.groups()))
if k in ('labels', 'categories'):
v = [_f for _f in [label.strip() for label in v.split(',')] if _f]
elif k == 'draft':
v = v.lower() in ('true', 'yes', '1')
_header[k] = v
header = _header
logging.debug('header = %r' % header)
return header, markup
def update_source(self, header=None, markup=None, only_returned=False):
if header is None:
header = self.header
if markup is None:
markup = self._markup
source = self.generate_header(header) + '\n' + markup
if not only_returned:
self.source = source
return source
def write(self, forced=False):
"""Write source back to file"""
if not self.modified:
if not forced:
return
else:
self.update_source()
with codecs.open(self.filename, 'w', 'utf8') as f:
f.write(self.source)
self.modified = False
def embed_images(self, html):
"""Embed images on local filesystem as data URI
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> html = '<img src="http://example.com/example.png"/>'
>>> print(handler.embed_images(html))
<img src="http://example.com/example.png"/>
>>> html = '<img src="tests/test.png"/>'
>>> print(handler.embed_images(html)) #doctest: +ELLIPSIS
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAB...QmCC"/>
"""
if not self.SUPPORT_EMBED_IMAGES:
raise RuntimeError('%r does not support embed_images' % type(self))
return self.RE_IMG.sub(self._embed_image, html)
@staticmethod
def _embed_image(match):
src = match.group('src')
if not exists(src):
print('%s is not found.' % src)
return match.group(0)
with open(src, 'rb') as f:
data = b64encode(f.read()).decode('ascii')
return '%ssrc="%s"%s' % (
match.group('prefix'),
'data:image/%s;base64,%s' % (splitext(src)[1].lstrip('.'), data),
match.group('suffix'),
)
| logging.warning('found no header') | conditional_block |
base.py | # Copyright (C) 2013-2015 Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, unicode_literals
import codecs
import logging
import re
import warnings
from abc import ABCMeta, abstractmethod
from base64 import b64encode
from hashlib import md5
from os.path import basename, exists, splitext
HAS_SMARTYPANTS = False
try:
import smartypants
HAS_SMARTYPANTS = True
except ImportError:
pass
class BaseHandler():
"""The base clase of markup handler"""
__metaclass__ = ABCMeta
# default handler options
OPTIONS = {
'markup_prefix': '',
'markup_suffix': '',
'smartypants': False,
'id_affix': None,
}
MERGE_HEADERS = ('service', 'kind', 'blog', 'id', 'url', 'draft')
HEADER_FMT = '%s: %s'
PREFIX_HEAD = ''
PREFIX_END = ''
RE_SPLIT = re.compile(r'^(?:([^\n]*?!b.*?)\n\n)?(.*)',
re.DOTALL | re.MULTILINE)
RE_HEADER = re.compile(r'.*?([a-zA-Z0-9_-]+)\s*[=:]\s*(.*)\s*')
SUPPORT_EMBED_IMAGES = True
RE_IMG = re.compile(
r'''
(?P<prefix><img.*?)
src="(?!data:image/|https?://)(?P<src>[^"]*)"
(?P<suffix>.*?>)
''',
re.VERBOSE
)
def __init__(self, filename, options=None):
self.filename = filename
self.title = ''
self.options = self.OPTIONS.copy()
self.options.update(options or {})
if filename:
with codecs.open(filename, 'r', 'utf8') as f:
self.source = f.read()
header, markup = self.split_header_markup()
self.title = splitext(basename(filename))[0]
else:
header = {}
markup = ''
self.header = header
self.markup = markup
self.modified = False
def set_header(self, k, v):
"""Set header
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> print(handler.header)
{}
>>> handler.modified
False
>>> handler.set_header('foo', 'bar')
>>> print(handler.header['foo'])
bar
>>> handler.modified
True
"""
if k in self.header and self.header[k] == v:
return
self.header[k] = v
self.modified = True
def merge_header(self, header):
"""Merge header
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> handler.merge_header({'id': 12345, 'bogus': 'blah'})
>>> print(handler.header['id'])
12345
>>> handler.modified
True
"""
for k, v in header.items():
if k not in self.MERGE_HEADERS:
continue
if k == 'blog':
v = v['id']
elif k == 'kind':
v = v.replace('blogger#', '')
self.set_header(k, v)
@property
def markup(self):
"""Return markup with markup_prefix and markup_suffix
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> options = {
... 'markup_prefix': 'the prefix\\n',
... 'markup_suffix': '\\nthe suffix',
... }
>>> handler = Handler(None, options)
>>> handler.markup = 'content'
>>> print(handler.markup)
the prefix
content
the suffix
"""
return '%s%s%s' % (
self.options['markup_prefix'],
self._markup,
self.options['markup_suffix'],
)
@markup.setter
def markup(self, markup):
"""Set the markup"""
self._markup = markup
@property
def id_affix(self):
"""Return id_affix
The initial value is from self.options, and can be overriden by
self.header.
Returns
* None if it's None.
* value if value is not ''
* first 4 digits of md5 of value if value is '', and assign back to
self.options. _generate method of Handler should write back to
self.header.
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> options = {
... 'id_affix': None,
... }
>>> handler = Handler(None, options)
>>> print(repr(handler.id_affix))
None
>>> handler.options['id_affix'] = 'foobar'
>>> print(handler.id_affix)
foobar
>>> # auto generate an id affix from title
>>> handler.options['id_affix'] = ''
>>> handler.title = 'abc'
>>> print(handler.id_affix)
9001
>>> handler.header['id_affix'] = 'override-affix'
>>> print(handler.id_affix)
override-affix
"""
id_affix = self.options['id_affix']
# override?
if 'id_affix' in self.header:
id_affix = self.header['id_affix']
if self.header['id_affix'] and id_affix != 'None':
return self.header['id_affix']
# second case is from header of post, has to use string 'None'
if id_affix is None or id_affix == 'None':
return None
if id_affix:
return id_affix
m = md5()
# if self.title is Unicode-type string, then encode it,
# otherwise it's byte-type, then just update with it.
# The __future__.unicode_literals ensures '' is unicode-type.
if isinstance(self.title, type('')):
m.update(self.title.encode('utf8'))
else:
m.update(self.title)
return m.hexdigest()[:4]
@abstractmethod
def _generate(self, markup=None):
"""Generate HTML of markup source"""
raise NotImplementedError
def generate(self, markup=None):
"""Generate HTML
>>> class Handler(BaseHandler):
... def _generate(self, markup=None): return markup
>>> handler = Handler(None)
>>> print(handler.generate('foo "bar"'))
foo "bar"
>>> handler.options['smartypants'] = True
>>> print(handler.generate('foo "bar"'))
foo “bar”
"""
if markup is None:
markup = self.markup
html = self._generate(markup)
if self.options.get('smartypants', False):
if not HAS_SMARTYPANTS:
warnings.warn("smartypants option is set, "
"but the library isn't installed.", RuntimeWarning)
return html
Attr = smartypants.Attr
html = smartypants.smartypants(html, Attr.set1 | Attr.w)
if self.SUPPORT_EMBED_IMAGES and self.options.get('embed_images', False):
html = self.embed_images(html)
return html
def generate_header(self, header=None):
"""Generate header in text for writing back to the file
>>> class Handler(BaseHandler):
... PREFIX_HEAD = 'foo '
... PREFIX_END = 'bar'
... HEADER_FMT = '--- %s: %s'
... def _generate(self, source=None): pass
>>> handler = Handler(None)
>>> print(handler.generate_header({'title': 'foobar'}))
foo !b
--- title: foobar
bar
<BLANKLINE>
>>> print(handler.generate_header({'labels': ['foo', 'bar']}))
foo !b
--- labels: foo, bar
bar
<BLANKLINE>
"""
if header is None:
header = self.header
lines = [self.PREFIX_HEAD + '!b']
for k, v in header.items():
if k in ('labels', 'categories'):
v = ', '.join(v)
elif k == 'draft':
v = repr(v)
lines.append(self.HEADER_FMT % (k, v))
lines.append(self.PREFIX_END)
return '\n'.join([_f for _f in lines if _f]) + '\n'
def generate_title(self, title=None):
"""Generate title for posting
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> print(handler.generate_title('foo "bar"'))
foo "bar"
>>> print(handler.generate_title('foo\\nbar\\n\\n'))
foo bar
>>> handler.options['smartypants'] = True
>>> print(handler.generate_title('foo "bar"'))
foo “bar”
"""
if title is None:
title = self.header.get('title', self.title)
title = self.generate(title)
title = title.replace('<p>', '').replace('</p>', '')
# no trailing newlines
title = re.sub(r'\n+', ' ', title).rstrip()
return title
def generate_post(self):
"""Generate dict for merging to post object of API"""
post = {'title': self.generate_title(), 'draft': False}
for k in ('blog', 'id', 'labels', 'categories', 'draft'):
if k not in self.header:
continue
if k == 'blog':
post[k] = {'id': self.header[k]}
else:
post[k] = self.header[k]
return post
def split_header_markup(self, source=None):
"""Split source into header and markup parts
It also parses header into a dict."""
if source is None:
source = self.source
header, markup = self.RE_SPLIT.match(source).groups()
if not header:
logging.warning('found no header')
if not markup:
logging.warning('markup is empty')
logging.debug('markup length = %d' % len(markup))
_header = {}
if header:
for item in header.split('\n'):
m = self.RE_HEADER.match(item)
if not m:
continue
k, v = list(map(type('').strip, m.groups()))
if k in ('labels', 'categories'):
v = [_f for _f in [label.strip() for label in v.split(',')] if _f]
elif k == 'draft':
v = v.lower() in ('true', 'yes', '1')
_header[k] = v
header = _header
logging.debug('header = %r' % header)
return header, markup
def update_source(self, header=None, markup=None, only_returned=False):
if header is None:
header = self.header
if markup is None:
markup = self._markup
source = self.generate_header(header) + '\n' + markup
if not only_returned:
self.source = source
return source
def write(self, forced=False):
"""Write source back to file"""
if not self.modified:
if not forced:
return
else:
self.update_source()
with codecs.open(self.filename, 'w', 'utf8') as f:
f.write(self.source)
self.modified = False
def embed_images(self, html):
|
@staticmethod
def _embed_image(match):
src = match.group('src')
if not exists(src):
print('%s is not found.' % src)
return match.group(0)
with open(src, 'rb') as f:
data = b64encode(f.read()).decode('ascii')
return '%ssrc="%s"%s' % (
match.group('prefix'),
'data:image/%s;base64,%s' % (splitext(src)[1].lstrip('.'), data),
match.group('suffix'),
)
| """Embed images on local filesystem as data URI
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> html = '<img src="http://example.com/example.png"/>'
>>> print(handler.embed_images(html))
<img src="http://example.com/example.png"/>
>>> html = '<img src="tests/test.png"/>'
>>> print(handler.embed_images(html)) #doctest: +ELLIPSIS
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAB...QmCC"/>
"""
if not self.SUPPORT_EMBED_IMAGES:
raise RuntimeError('%r does not support embed_images' % type(self))
return self.RE_IMG.sub(self._embed_image, html) | identifier_body |
base.py | # Copyright (C) 2013-2015 Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
from __future__ import print_function, unicode_literals
import codecs
import logging
import re
import warnings
from abc import ABCMeta, abstractmethod
from base64 import b64encode
from hashlib import md5
from os.path import basename, exists, splitext
HAS_SMARTYPANTS = False
try:
import smartypants
HAS_SMARTYPANTS = True
except ImportError:
pass
class BaseHandler():
"""The base clase of markup handler"""
__metaclass__ = ABCMeta
# default handler options
OPTIONS = {
'markup_prefix': '',
'markup_suffix': '',
'smartypants': False,
'id_affix': None,
}
MERGE_HEADERS = ('service', 'kind', 'blog', 'id', 'url', 'draft')
HEADER_FMT = '%s: %s'
PREFIX_HEAD = ''
PREFIX_END = ''
RE_SPLIT = re.compile(r'^(?:([^\n]*?!b.*?)\n\n)?(.*)',
re.DOTALL | re.MULTILINE)
RE_HEADER = re.compile(r'.*?([a-zA-Z0-9_-]+)\s*[=:]\s*(.*)\s*')
SUPPORT_EMBED_IMAGES = True
RE_IMG = re.compile(
r'''
(?P<prefix><img.*?)
src="(?!data:image/|https?://)(?P<src>[^"]*)"
(?P<suffix>.*?>)
''',
re.VERBOSE
)
def __init__(self, filename, options=None):
self.filename = filename
self.title = ''
self.options = self.OPTIONS.copy()
self.options.update(options or {})
if filename:
with codecs.open(filename, 'r', 'utf8') as f:
self.source = f.read()
header, markup = self.split_header_markup()
self.title = splitext(basename(filename))[0]
else:
header = {}
markup = ''
self.header = header
self.markup = markup
self.modified = False
def set_header(self, k, v):
"""Set header
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> print(handler.header)
{}
>>> handler.modified
False
>>> handler.set_header('foo', 'bar')
>>> print(handler.header['foo'])
bar
>>> handler.modified
True
"""
if k in self.header and self.header[k] == v:
return
self.header[k] = v
self.modified = True
def merge_header(self, header):
"""Merge header
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> handler.merge_header({'id': 12345, 'bogus': 'blah'})
>>> print(handler.header['id'])
12345
>>> handler.modified
True
"""
for k, v in header.items():
if k not in self.MERGE_HEADERS:
continue
if k == 'blog':
v = v['id']
elif k == 'kind':
v = v.replace('blogger#', '')
self.set_header(k, v)
@property
def markup(self):
"""Return markup with markup_prefix and markup_suffix
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> options = {
... 'markup_prefix': 'the prefix\\n',
... 'markup_suffix': '\\nthe suffix',
... }
>>> handler = Handler(None, options)
>>> handler.markup = 'content'
>>> print(handler.markup)
the prefix
content
the suffix
"""
return '%s%s%s' % (
self.options['markup_prefix'],
self._markup,
self.options['markup_suffix'],
)
@markup.setter
def markup(self, markup):
"""Set the markup"""
self._markup = markup
@property
def id_affix(self):
"""Return id_affix
The initial value is from self.options, and can be overriden by
self.header.
Returns
* None if it's None.
* value if value is not ''
* first 4 digits of md5 of value if value is '', and assign back to
self.options. _generate method of Handler should write back to
self.header.
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> options = {
... 'id_affix': None,
... }
>>> handler = Handler(None, options)
>>> print(repr(handler.id_affix))
None
>>> handler.options['id_affix'] = 'foobar'
>>> print(handler.id_affix)
foobar
>>> # auto generate an id affix from title
>>> handler.options['id_affix'] = ''
>>> handler.title = 'abc'
>>> print(handler.id_affix)
9001
>>> handler.header['id_affix'] = 'override-affix'
>>> print(handler.id_affix)
override-affix
"""
id_affix = self.options['id_affix']
# override?
if 'id_affix' in self.header:
id_affix = self.header['id_affix']
if self.header['id_affix'] and id_affix != 'None':
return self.header['id_affix']
# second case is from header of post, has to use string 'None'
if id_affix is None or id_affix == 'None':
return None
if id_affix:
return id_affix
m = md5()
# if self.title is Unicode-type string, then encode it,
# otherwise it's byte-type, then just update with it.
# The __future__.unicode_literals ensures '' is unicode-type.
if isinstance(self.title, type('')):
m.update(self.title.encode('utf8'))
else:
m.update(self.title)
return m.hexdigest()[:4]
@abstractmethod
def _generate(self, markup=None):
"""Generate HTML of markup source"""
raise NotImplementedError
def generate(self, markup=None):
"""Generate HTML
>>> class Handler(BaseHandler):
... def _generate(self, markup=None): return markup
>>> handler = Handler(None)
>>> print(handler.generate('foo "bar"'))
foo "bar"
>>> handler.options['smartypants'] = True
>>> print(handler.generate('foo "bar"'))
foo “bar”
"""
if markup is None:
markup = self.markup
html = self._generate(markup)
if self.options.get('smartypants', False):
if not HAS_SMARTYPANTS:
warnings.warn("smartypants option is set, "
"but the library isn't installed.", RuntimeWarning)
return html
Attr = smartypants.Attr
html = smartypants.smartypants(html, Attr.set1 | Attr.w)
if self.SUPPORT_EMBED_IMAGES and self.options.get('embed_images', False):
html = self.embed_images(html)
return html
def generate_header(self, header=None):
"""Generate header in text for writing back to the file
>>> class Handler(BaseHandler):
... PREFIX_HEAD = 'foo '
... PREFIX_END = 'bar'
... HEADER_FMT = '--- %s: %s'
... def _generate(self, source=None): pass
>>> handler = Handler(None)
>>> print(handler.generate_header({'title': 'foobar'}))
foo !b
--- title: foobar
bar
<BLANKLINE>
>>> print(handler.generate_header({'labels': ['foo', 'bar']}))
foo !b
--- labels: foo, bar
bar
<BLANKLINE>
"""
if header is None:
header = self.header
lines = [self.PREFIX_HEAD + '!b']
for k, v in header.items():
if k in ('labels', 'categories'):
v = ', '.join(v)
elif k == 'draft':
v = repr(v)
lines.append(self.HEADER_FMT % (k, v))
lines.append(self.PREFIX_END)
return '\n'.join([_f for _f in lines if _f]) + '\n'
def generate_title(self, title=None):
"""Generate title for posting
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> print(handler.generate_title('foo "bar"'))
foo "bar"
>>> print(handler.generate_title('foo\\nbar\\n\\n'))
foo bar
>>> handler.options['smartypants'] = True
>>> print(handler.generate_title('foo "bar"'))
foo “bar”
"""
if title is None:
title = self.header.get('title', self.title)
title = self.generate(title)
title = title.replace('<p>', '').replace('</p>', '')
# no trailing newlines
title = re.sub(r'\n+', ' ', title).rstrip()
return title
def generate_post(self):
"""Generate dict for merging to post object of API"""
post = {'title': self.generate_title(), 'draft': False}
for k in ('blog', 'id', 'labels', 'categories', 'draft'):
if k not in self.header:
continue
if k == 'blog':
post[k] = {'id': self.header[k]}
else:
post[k] = self.header[k]
return post
def split_header_markup(self, source=None):
"""Split source into header and markup parts
It also parses header into a dict."""
if source is None:
source = self.source
header, markup = self.RE_SPLIT.match(source).groups()
if not header:
logging.warning('found no header')
if not markup:
logging.warning('markup is empty')
logging.debug('markup length = %d' % len(markup))
_header = {}
if header:
for item in header.split('\n'):
m = self.RE_HEADER.match(item)
if not m:
continue
k, v = list(map(type('').strip, m.groups()))
if k in ('labels', 'categories'):
v = [_f for _f in [label.strip() for label in v.split(',')] if _f]
elif k == 'draft':
v = v.lower() in ('true', 'yes', '1')
_header[k] = v
header = _header
logging.debug('header = %r' % header)
return header, markup
def update_source(self, header=None, markup=None, only_returned=False):
if header is None:
header = self.header
if markup is None:
markup = self._markup
source = self.generate_header(header) + '\n' + markup
if not only_returned:
self.source = source
return source
def write(self, forced=False):
"""Write source back to file"""
if not self.modified:
if not forced:
return
else:
self.update_source()
with codecs.open(self.filename, 'w', 'utf8') as f:
f.write(self.source)
self.modified = False
def embed_images(self, html):
"""Embed images on local filesystem as data URI
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> html = '<img src="http://example.com/example.png"/>'
>>> print(handler.embed_images(html))
<img src="http://example.com/example.png"/>
>>> html = '<img src="tests/test.png"/>'
>>> print(handler.embed_images(html)) #doctest: +ELLIPSIS
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAB...QmCC"/>
"""
if not self.SUPPORT_EMBED_IMAGES:
raise RuntimeError('%r does not support embed_images' % type(self))
return self.RE_IMG.sub(self._embed_image, html)
@staticmethod
def _embed_image(match):
src = match.group('src')
if not exists(src):
print('%s is not found.' % src)
return match.group(0)
with open(src, 'rb') as f:
data = b64encode(f.read()).decode('ascii')
return '%ssrc="%s"%s' % (
match.group('prefix'),
'data:image/%s;base64,%s' % (splitext(src)[1].lstrip('.'), data),
match.group('suffix'),
) | # THE SOFTWARE. | random_line_split |
base.py | # Copyright (C) 2013-2015 Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, unicode_literals
import codecs
import logging
import re
import warnings
from abc import ABCMeta, abstractmethod
from base64 import b64encode
from hashlib import md5
from os.path import basename, exists, splitext
HAS_SMARTYPANTS = False
try:
import smartypants
HAS_SMARTYPANTS = True
except ImportError:
pass
class BaseHandler():
"""The base clase of markup handler"""
__metaclass__ = ABCMeta
# default handler options
OPTIONS = {
'markup_prefix': '',
'markup_suffix': '',
'smartypants': False,
'id_affix': None,
}
MERGE_HEADERS = ('service', 'kind', 'blog', 'id', 'url', 'draft')
HEADER_FMT = '%s: %s'
PREFIX_HEAD = ''
PREFIX_END = ''
RE_SPLIT = re.compile(r'^(?:([^\n]*?!b.*?)\n\n)?(.*)',
re.DOTALL | re.MULTILINE)
RE_HEADER = re.compile(r'.*?([a-zA-Z0-9_-]+)\s*[=:]\s*(.*)\s*')
SUPPORT_EMBED_IMAGES = True
RE_IMG = re.compile(
r'''
(?P<prefix><img.*?)
src="(?!data:image/|https?://)(?P<src>[^"]*)"
(?P<suffix>.*?>)
''',
re.VERBOSE
)
def __init__(self, filename, options=None):
self.filename = filename
self.title = ''
self.options = self.OPTIONS.copy()
self.options.update(options or {})
if filename:
with codecs.open(filename, 'r', 'utf8') as f:
self.source = f.read()
header, markup = self.split_header_markup()
self.title = splitext(basename(filename))[0]
else:
header = {}
markup = ''
self.header = header
self.markup = markup
self.modified = False
def set_header(self, k, v):
"""Set header
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> print(handler.header)
{}
>>> handler.modified
False
>>> handler.set_header('foo', 'bar')
>>> print(handler.header['foo'])
bar
>>> handler.modified
True
"""
if k in self.header and self.header[k] == v:
return
self.header[k] = v
self.modified = True
def merge_header(self, header):
"""Merge header
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> handler.merge_header({'id': 12345, 'bogus': 'blah'})
>>> print(handler.header['id'])
12345
>>> handler.modified
True
"""
for k, v in header.items():
if k not in self.MERGE_HEADERS:
continue
if k == 'blog':
v = v['id']
elif k == 'kind':
v = v.replace('blogger#', '')
self.set_header(k, v)
@property
def markup(self):
"""Return markup with markup_prefix and markup_suffix
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> options = {
... 'markup_prefix': 'the prefix\\n',
... 'markup_suffix': '\\nthe suffix',
... }
>>> handler = Handler(None, options)
>>> handler.markup = 'content'
>>> print(handler.markup)
the prefix
content
the suffix
"""
return '%s%s%s' % (
self.options['markup_prefix'],
self._markup,
self.options['markup_suffix'],
)
@markup.setter
def markup(self, markup):
"""Set the markup"""
self._markup = markup
@property
def id_affix(self):
"""Return id_affix
The initial value is from self.options, and can be overriden by
self.header.
Returns
* None if it's None.
* value if value is not ''
* first 4 digits of md5 of value if value is '', and assign back to
self.options. _generate method of Handler should write back to
self.header.
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> options = {
... 'id_affix': None,
... }
>>> handler = Handler(None, options)
>>> print(repr(handler.id_affix))
None
>>> handler.options['id_affix'] = 'foobar'
>>> print(handler.id_affix)
foobar
>>> # auto generate an id affix from title
>>> handler.options['id_affix'] = ''
>>> handler.title = 'abc'
>>> print(handler.id_affix)
9001
>>> handler.header['id_affix'] = 'override-affix'
>>> print(handler.id_affix)
override-affix
"""
id_affix = self.options['id_affix']
# override?
if 'id_affix' in self.header:
id_affix = self.header['id_affix']
if self.header['id_affix'] and id_affix != 'None':
return self.header['id_affix']
# second case is from header of post, has to use string 'None'
if id_affix is None or id_affix == 'None':
return None
if id_affix:
return id_affix
m = md5()
# if self.title is Unicode-type string, then encode it,
# otherwise it's byte-type, then just update with it.
# The __future__.unicode_literals ensures '' is unicode-type.
if isinstance(self.title, type('')):
m.update(self.title.encode('utf8'))
else:
m.update(self.title)
return m.hexdigest()[:4]
@abstractmethod
def _generate(self, markup=None):
"""Generate HTML of markup source"""
raise NotImplementedError
def generate(self, markup=None):
"""Generate HTML
>>> class Handler(BaseHandler):
... def _generate(self, markup=None): return markup
>>> handler = Handler(None)
>>> print(handler.generate('foo "bar"'))
foo "bar"
>>> handler.options['smartypants'] = True
>>> print(handler.generate('foo "bar"'))
foo “bar”
"""
if markup is None:
markup = self.markup
html = self._generate(markup)
if self.options.get('smartypants', False):
if not HAS_SMARTYPANTS:
warnings.warn("smartypants option is set, "
"but the library isn't installed.", RuntimeWarning)
return html
Attr = smartypants.Attr
html = smartypants.smartypants(html, Attr.set1 | Attr.w)
if self.SUPPORT_EMBED_IMAGES and self.options.get('embed_images', False):
html = self.embed_images(html)
return html
def | (self, header=None):
"""Generate header in text for writing back to the file
>>> class Handler(BaseHandler):
... PREFIX_HEAD = 'foo '
... PREFIX_END = 'bar'
... HEADER_FMT = '--- %s: %s'
... def _generate(self, source=None): pass
>>> handler = Handler(None)
>>> print(handler.generate_header({'title': 'foobar'}))
foo !b
--- title: foobar
bar
<BLANKLINE>
>>> print(handler.generate_header({'labels': ['foo', 'bar']}))
foo !b
--- labels: foo, bar
bar
<BLANKLINE>
"""
if header is None:
header = self.header
lines = [self.PREFIX_HEAD + '!b']
for k, v in header.items():
if k in ('labels', 'categories'):
v = ', '.join(v)
elif k == 'draft':
v = repr(v)
lines.append(self.HEADER_FMT % (k, v))
lines.append(self.PREFIX_END)
return '\n'.join([_f for _f in lines if _f]) + '\n'
def generate_title(self, title=None):
"""Generate title for posting
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> print(handler.generate_title('foo "bar"'))
foo "bar"
>>> print(handler.generate_title('foo\\nbar\\n\\n'))
foo bar
>>> handler.options['smartypants'] = True
>>> print(handler.generate_title('foo "bar"'))
foo “bar”
"""
if title is None:
title = self.header.get('title', self.title)
title = self.generate(title)
title = title.replace('<p>', '').replace('</p>', '')
# no trailing newlines
title = re.sub(r'\n+', ' ', title).rstrip()
return title
def generate_post(self):
"""Generate dict for merging to post object of API"""
post = {'title': self.generate_title(), 'draft': False}
for k in ('blog', 'id', 'labels', 'categories', 'draft'):
if k not in self.header:
continue
if k == 'blog':
post[k] = {'id': self.header[k]}
else:
post[k] = self.header[k]
return post
def split_header_markup(self, source=None):
"""Split source into header and markup parts
It also parses header into a dict."""
if source is None:
source = self.source
header, markup = self.RE_SPLIT.match(source).groups()
if not header:
logging.warning('found no header')
if not markup:
logging.warning('markup is empty')
logging.debug('markup length = %d' % len(markup))
_header = {}
if header:
for item in header.split('\n'):
m = self.RE_HEADER.match(item)
if not m:
continue
k, v = list(map(type('').strip, m.groups()))
if k in ('labels', 'categories'):
v = [_f for _f in [label.strip() for label in v.split(',')] if _f]
elif k == 'draft':
v = v.lower() in ('true', 'yes', '1')
_header[k] = v
header = _header
logging.debug('header = %r' % header)
return header, markup
def update_source(self, header=None, markup=None, only_returned=False):
if header is None:
header = self.header
if markup is None:
markup = self._markup
source = self.generate_header(header) + '\n' + markup
if not only_returned:
self.source = source
return source
def write(self, forced=False):
"""Write source back to file"""
if not self.modified:
if not forced:
return
else:
self.update_source()
with codecs.open(self.filename, 'w', 'utf8') as f:
f.write(self.source)
self.modified = False
def embed_images(self, html):
"""Embed images on local filesystem as data URI
>>> class Handler(BaseHandler):
... def _generate(self, source=None): return source
>>> handler = Handler(None)
>>> html = '<img src="http://example.com/example.png"/>'
>>> print(handler.embed_images(html))
<img src="http://example.com/example.png"/>
>>> html = '<img src="tests/test.png"/>'
>>> print(handler.embed_images(html)) #doctest: +ELLIPSIS
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAB...QmCC"/>
"""
if not self.SUPPORT_EMBED_IMAGES:
raise RuntimeError('%r does not support embed_images' % type(self))
return self.RE_IMG.sub(self._embed_image, html)
@staticmethod
def _embed_image(match):
src = match.group('src')
if not exists(src):
print('%s is not found.' % src)
return match.group(0)
with open(src, 'rb') as f:
data = b64encode(f.read()).decode('ascii')
return '%ssrc="%s"%s' % (
match.group('prefix'),
'data:image/%s;base64,%s' % (splitext(src)[1].lstrip('.'), data),
match.group('suffix'),
)
| generate_header | identifier_name |
process_all_day_long.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Team 16: COMP90024-Assignment2
# Team Members:
# Qingmeng Xu, 969413
# Tingqian Wang, 1043988
# Zhong Liao, 1056020
# Cheng Qian, 962539
# Zongcheng Du, 1096319
"""
Created on Sun May 24 21:41:09 2020
@author: ciciwang
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 01:20:56 2020
Initial Analysis on raw data
detect the related words
label the sentiment
@author: ciciwang
"""
import time
import couchdb
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import json
import re
"""
nltk function declaration
"""
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('vader_lexicon')
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
lemmatizer = nltk.stem.WordNetLemmatizer()
#lemmatisation of the words from text
def lemmatisation(word):
lemma = lemmatizer.lemmatize(word,'v')
if lemma == word:
lemma = lemmatizer.lemmatize(word,'n')
return lemma
# applying lemmatisation to process text
def init_process(text) -> str:
# lower cased
text = text.lower()
# tokenized
words = tokenizer.tokenize(text)
# check if word is alphabetic
words = [w for w in words if w.isalpha()]
# lemmatized
words = [lemmatisation(w) for w in words]
init_processed_text = " ".join(words)
return init_processed_text
# check whether the keyword in the text or not
def | (processed_text, keyword_list) ->bool:
for word in keyword_list:
if word in processed_text:
return True
return False
#SentimentIntensityAnalyzer
def IdentifySentiment( sentence ):
sia = SentimentIntensityAnalyzer()
ps = sia.polarity_scores( sentence )
sentiment = max(ps, key = ps.get)
return sentiment
def ScoringSentiment( sentence ):
si = SentimentIntensityAnalyzer()
score = si.polarity_scores( sentence )
return score
"""
Load related words list
"""
beers_wines_list = []
with open(
"/home/ubuntu/analysis/beers.txt"
, 'r', encoding="utf-8") as bw:
bw_words = bw.readlines()
for word in bw_words:
word = lemmatisation(word)
beers_wines_list.append(word.replace('\n',''))
sports_list = []
with open(
"/home/ubuntu/analysis/sports.txt"
, 'r', encoding="utf-8") as sp:
sp_words = sp.readlines()
for word in sp_words:
word = lemmatisation(word)
sports_list.append(word.replace('\n',''))
coffee_list = []
with open(
"/home/ubuntu/analysis/coffee.txt"
, 'r', encoding="utf-8") as cf:
cf_words = cf.readlines()
for word in cf_words:
word = lemmatisation(word)
coffee_list.append(word.replace('\n',''))
"""
Load bundaries info for suburb
"""
boundary = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/Melbourne.geojson'))
sub_list=[ sub['properties']["sa2_name16"] for sub in boundary["features"] ]
coordinates={}
for sub in boundary["features"]:
name = sub['properties']["sa2_name16"]
bounda = sub["geometry"]["coordinates"]
coordinates.update({ name : bounda })
"""
function defined to normalized a suburb name from a tweet data,
so as to match the suburb name in standard (boundary info) list,
and then to find the its coordianates from boundary data.
"""
def sub_name_normalized(tweet_suburb):
for standard_sub in sub_list:
if tweet_suburb.lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.lower() in standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.replace(" ", " - ").lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.replace(" ", " - ").lower() in standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if ('South' not in standard_sub
) and ('North' not in standard_sub
) and ('West' not in standard_sub
) and ('East' not in standard_sub):
new = re.sub(r' South| North| West| East', '', tweet_suburb)
if new.lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if ('South' not in standard_sub
) and ('North' not in standard_sub
) and ('West' not in standard_sub
) and ('East' not in standard_sub):
new = re.sub(r' South| North| West| East', '', tweet_suburb)
if new.lower() in standard_sub.lower():
return standard_sub
return "Undifined"
"""
Load data and process data from AURIN
grate satisfaction:
satification score over 80 (100)
proverty rate:
proportion of people with equivalised disposable household income
after housing costs is below half median equivalised disposable household income
after housing costs).
"""
# data path
satsf = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/aurin_data/data_used/satisfaction.json'))
pov = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/aurin_data/data_used/proverty.json'))
# satisfaction
satisfaction_datasets = satsf['features']
life_grate_satisfaction = {}
population_sat_survey = {}
for ele in satisfaction_datasets:
satif_data = ele['properties']
if satif_data["_life_satisfaction_80_synth"] is None:
satif_data["_life_satisfaction_80_synth"] = 0
if satif_data["_life_satisfaction_90_synth"] is None:
satif_data["_life_satisfaction_90_synth"] = 0
if satif_data["_life_satisfaction_100_synth"] is None:
satif_data["_life_satisfaction_100_synth"] = 0
grate_num = satif_data["_life_satisfaction_80_synth"] + satif_data[
"_life_satisfaction_90_synth"] + satif_data[
"_life_satisfaction_100_synth"]
total_pop = satif_data["total_pop_synth"]
iloc = satif_data["sa2_name16"]
loc = sub_name_normalized(iloc)
life_grate_satisfaction.update({ loc : grate_num })
population_sat_survey.update({ loc : total_pop })
proverty_datasets = pov['features']
poverty_rate = {}
houshold_income = {}
for elem in proverty_datasets:
pov_data = elem["properties"]
if pov_data["pov_rt_exc_hc_syn"] is None:
pov_data["pov_rt_exc_hc_syn"] = 0
if pov_data["inc_median_syn"] is None:
pov_data["inc_median_syn"] = 0
pov_percent = pov_data["pov_rt_exc_hc_syn"]
inc_median = pov_data["inc_median_syn"]
iloc = pov_data["sa2_name16"]
loc = sub_name_normalized(iloc)
poverty_rate.update({ loc : pov_percent })
houshold_income.update({ loc : inc_median})
"""
Load data from CouchDB...
Create new database
"""
id_list = []
while True:
"""
Load data from CouchDB...
Create new database or save updated data
"""
couch = couchdb.Server("http://user:pass@172.26.133.141:5984")
db = couch['melb']
#couch.delete('tweets_analyzed')
if "tweets_analyzed" in couch:
db2 = couch["tweets_analyzed"]
else:
db2 = couch.create("tweets_analyzed")
updated_tweets_list = []
for doc_id in db:
try:
if doc_id not in id_list:
#num_tweets += 1
#print(num_tweets)
tweet = db[doc_id]
#tweets_id = doc_id
suburb = tweet['suburb']
text = tweet['text']
id_list.append(doc_id)
updated_tweets_list.append(
{'id':doc_id,
'suburb':suburb,
'text':text})
else:
continue
except:
break
"""
word filter
label sentiment
update boundary
label city life attributes of: life satisfaction and poverty rate
"""
for tweets in updated_tweets_list:
lemma_text = init_process(tweets['text'])
if keyword_exist(lemma_text, beers_wines_list):
tweets.update(
{'bw_exist':1})
else:
tweets.update(
{'bw_exist':0})
if keyword_exist(lemma_text, sports_list):
tweets.update(
{'sp_exist':1})
else:
tweets.update(
{'sp_exist':0})
if keyword_exist(lemma_text, coffee_list):
tweets.update(
{'cf_exist':1})
else:
tweets.update(
{'cf_exist':0})
senti = IdentifySentiment(tweets['text'])
scr = ScoringSentiment(tweets['text'])
tweets.update({'sentiment': senti})
tweets.update({'senti_score': scr})
matched_suburb = sub_name_normalized(tweets['suburb'])
# coordiantes
if matched_suburb in coordinates:
tweets.update({ 'bound' : coordinates[matched_suburb] })
else:
tweets.update({ 'bound': 'undifined'})
#satisfaction
if matched_suburb in life_grate_satisfaction:
tweets.update({ 'num_grate_satis' : life_grate_satisfaction[matched_suburb]})
tweets.update({ 'pop_survey' : population_sat_survey[matched_suburb] })
else:
tweets.update({ 'num_grate_satis' : 'Unkown' })
tweets.update({ 'pop_survey' : 'Unkown' })
#poverty
if matched_suburb in poverty_rate:
tweets.update({ 'poverty_rate' : poverty_rate[matched_suburb] })
tweets.update({ 'houshold_income' : houshold_income[matched_suburb]})
else:
tweets.update({ 'poverty_rate' : 'Unkown' })
tweets.update({ 'houshold_income' : 'Unkown'})
try:
db2.save({"_id": tweets['id'],
"suburb": tweets['suburb'],
"bw_exist": tweets['bw_exist'],
"sp_exist": tweets['sp_exist'],
"cf_exist": tweets['cf_exist'],
"sentiment":tweets['sentiment'],
"senti_score": tweets['senti_score'],
"boundaries":tweets['bound'],
"num_grate_satisfcation" : tweets['num_grate_satis'],
"population_survey" : tweets['pop_survey'],
"poverty_rate" : tweets['poverty_rate'],
"houshold_median_income" : tweets['houshold_income']})
except couchdb.http.ResourceConflict:
print("Duplicate tweets found and ignored.")
time.sleep(86400)
| keyword_exist | identifier_name |
process_all_day_long.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Team 16: COMP90024-Assignment2
# Team Members:
# Qingmeng Xu, 969413
# Tingqian Wang, 1043988
# Zhong Liao, 1056020
# Cheng Qian, 962539
# Zongcheng Du, 1096319
"""
Created on Sun May 24 21:41:09 2020
@author: ciciwang
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 01:20:56 2020
Initial Analysis on raw data
detect the related words
label the sentiment
@author: ciciwang
"""
import time
import couchdb
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import json
import re
"""
nltk function declaration
"""
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('vader_lexicon')
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
lemmatizer = nltk.stem.WordNetLemmatizer()
#lemmatisation of the words from text
def lemmatisation(word):
lemma = lemmatizer.lemmatize(word,'v')
if lemma == word:
lemma = lemmatizer.lemmatize(word,'n')
return lemma
# applying lemmatisation to process text
def init_process(text) -> str:
# lower cased
text = text.lower()
# tokenized
words = tokenizer.tokenize(text)
# check if word is alphabetic
words = [w for w in words if w.isalpha()]
# lemmatized
words = [lemmatisation(w) for w in words]
init_processed_text = " ".join(words)
return init_processed_text
# check whether the keyword in the text or not
def keyword_exist(processed_text, keyword_list) ->bool:
for word in keyword_list:
if word in processed_text:
return True
return False
#SentimentIntensityAnalyzer
def IdentifySentiment( sentence ):
sia = SentimentIntensityAnalyzer()
ps = sia.polarity_scores( sentence )
sentiment = max(ps, key = ps.get)
return sentiment
def ScoringSentiment( sentence ):
si = SentimentIntensityAnalyzer()
score = si.polarity_scores( sentence )
return score
"""
Load related words list
"""
beers_wines_list = []
with open(
"/home/ubuntu/analysis/beers.txt"
, 'r', encoding="utf-8") as bw:
bw_words = bw.readlines()
for word in bw_words:
word = lemmatisation(word)
beers_wines_list.append(word.replace('\n',''))
sports_list = []
with open(
"/home/ubuntu/analysis/sports.txt"
, 'r', encoding="utf-8") as sp:
sp_words = sp.readlines()
for word in sp_words:
word = lemmatisation(word)
sports_list.append(word.replace('\n',''))
coffee_list = []
with open(
"/home/ubuntu/analysis/coffee.txt"
, 'r', encoding="utf-8") as cf:
cf_words = cf.readlines()
for word in cf_words:
word = lemmatisation(word)
coffee_list.append(word.replace('\n',''))
"""
Load bundaries info for suburb
"""
boundary = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/Melbourne.geojson'))
sub_list=[ sub['properties']["sa2_name16"] for sub in boundary["features"] ]
coordinates={}
for sub in boundary["features"]:
name = sub['properties']["sa2_name16"]
bounda = sub["geometry"]["coordinates"]
coordinates.update({ name : bounda })
"""
function defined to normalized a suburb name from a tweet data,
so as to match the suburb name in standard (boundary info) list,
and then to find the its coordianates from boundary data.
"""
def sub_name_normalized(tweet_suburb):
for standard_sub in sub_list:
if tweet_suburb.lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.lower() in standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.replace(" ", " - ").lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.replace(" ", " - ").lower() in standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if ('South' not in standard_sub
) and ('North' not in standard_sub
) and ('West' not in standard_sub
) and ('East' not in standard_sub):
new = re.sub(r' South| North| West| East', '', tweet_suburb)
if new.lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if ('South' not in standard_sub
) and ('North' not in standard_sub
) and ('West' not in standard_sub
) and ('East' not in standard_sub):
new = re.sub(r' South| North| West| East', '', tweet_suburb)
if new.lower() in standard_sub.lower():
return standard_sub
return "Undifined"
"""
Load data and process data from AURIN
grate satisfaction:
satification score over 80 (100)
proverty rate:
proportion of people with equivalised disposable household income
after housing costs is below half median equivalised disposable household income
after housing costs).
"""
# data path
satsf = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/aurin_data/data_used/satisfaction.json'))
pov = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/aurin_data/data_used/proverty.json'))
# satisfaction
satisfaction_datasets = satsf['features']
life_grate_satisfaction = {}
population_sat_survey = {}
for ele in satisfaction_datasets:
satif_data = ele['properties']
if satif_data["_life_satisfaction_80_synth"] is None:
satif_data["_life_satisfaction_80_synth"] = 0
if satif_data["_life_satisfaction_90_synth"] is None:
satif_data["_life_satisfaction_90_synth"] = 0
if satif_data["_life_satisfaction_100_synth"] is None:
satif_data["_life_satisfaction_100_synth"] = 0
grate_num = satif_data["_life_satisfaction_80_synth"] + satif_data[
"_life_satisfaction_90_synth"] + satif_data[
"_life_satisfaction_100_synth"]
total_pop = satif_data["total_pop_synth"]
iloc = satif_data["sa2_name16"]
loc = sub_name_normalized(iloc)
life_grate_satisfaction.update({ loc : grate_num })
population_sat_survey.update({ loc : total_pop })
proverty_datasets = pov['features']
poverty_rate = {}
houshold_income = {}
for elem in proverty_datasets:
pov_data = elem["properties"]
if pov_data["pov_rt_exc_hc_syn"] is None:
pov_data["pov_rt_exc_hc_syn"] = 0
if pov_data["inc_median_syn"] is None:
|
pov_percent = pov_data["pov_rt_exc_hc_syn"]
inc_median = pov_data["inc_median_syn"]
iloc = pov_data["sa2_name16"]
loc = sub_name_normalized(iloc)
poverty_rate.update({ loc : pov_percent })
houshold_income.update({ loc : inc_median})
"""
Load data from CouchDB...
Create new database
"""
id_list = []
while True:
"""
Load data from CouchDB...
Create new database or save updated data
"""
couch = couchdb.Server("http://user:pass@172.26.133.141:5984")
db = couch['melb']
#couch.delete('tweets_analyzed')
if "tweets_analyzed" in couch:
db2 = couch["tweets_analyzed"]
else:
db2 = couch.create("tweets_analyzed")
updated_tweets_list = []
for doc_id in db:
try:
if doc_id not in id_list:
#num_tweets += 1
#print(num_tweets)
tweet = db[doc_id]
#tweets_id = doc_id
suburb = tweet['suburb']
text = tweet['text']
id_list.append(doc_id)
updated_tweets_list.append(
{'id':doc_id,
'suburb':suburb,
'text':text})
else:
continue
except:
break
"""
word filter
label sentiment
update boundary
label city life attributes of: life satisfaction and poverty rate
"""
for tweets in updated_tweets_list:
lemma_text = init_process(tweets['text'])
if keyword_exist(lemma_text, beers_wines_list):
tweets.update(
{'bw_exist':1})
else:
tweets.update(
{'bw_exist':0})
if keyword_exist(lemma_text, sports_list):
tweets.update(
{'sp_exist':1})
else:
tweets.update(
{'sp_exist':0})
if keyword_exist(lemma_text, coffee_list):
tweets.update(
{'cf_exist':1})
else:
tweets.update(
{'cf_exist':0})
senti = IdentifySentiment(tweets['text'])
scr = ScoringSentiment(tweets['text'])
tweets.update({'sentiment': senti})
tweets.update({'senti_score': scr})
matched_suburb = sub_name_normalized(tweets['suburb'])
# coordiantes
if matched_suburb in coordinates:
tweets.update({ 'bound' : coordinates[matched_suburb] })
else:
tweets.update({ 'bound': 'undifined'})
#satisfaction
if matched_suburb in life_grate_satisfaction:
tweets.update({ 'num_grate_satis' : life_grate_satisfaction[matched_suburb]})
tweets.update({ 'pop_survey' : population_sat_survey[matched_suburb] })
else:
tweets.update({ 'num_grate_satis' : 'Unkown' })
tweets.update({ 'pop_survey' : 'Unkown' })
#poverty
if matched_suburb in poverty_rate:
tweets.update({ 'poverty_rate' : poverty_rate[matched_suburb] })
tweets.update({ 'houshold_income' : houshold_income[matched_suburb]})
else:
tweets.update({ 'poverty_rate' : 'Unkown' })
tweets.update({ 'houshold_income' : 'Unkown'})
try:
db2.save({"_id": tweets['id'],
"suburb": tweets['suburb'],
"bw_exist": tweets['bw_exist'],
"sp_exist": tweets['sp_exist'],
"cf_exist": tweets['cf_exist'],
"sentiment":tweets['sentiment'],
"senti_score": tweets['senti_score'],
"boundaries":tweets['bound'],
"num_grate_satisfcation" : tweets['num_grate_satis'],
"population_survey" : tweets['pop_survey'],
"poverty_rate" : tweets['poverty_rate'],
"houshold_median_income" : tweets['houshold_income']})
except couchdb.http.ResourceConflict:
print("Duplicate tweets found and ignored.")
time.sleep(86400)
| pov_data["inc_median_syn"] = 0 | conditional_block |
process_all_day_long.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Team 16: COMP90024-Assignment2
# Team Members:
# Qingmeng Xu, 969413
# Tingqian Wang, 1043988
# Zhong Liao, 1056020
# Cheng Qian, 962539
# Zongcheng Du, 1096319
"""
Created on Sun May 24 21:41:09 2020
@author: ciciwang
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 01:20:56 2020
Initial Analysis on raw data
detect the related words
label the sentiment
@author: ciciwang
"""
import time
import couchdb
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import json
import re
"""
nltk function declaration
"""
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('vader_lexicon')
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
lemmatizer = nltk.stem.WordNetLemmatizer()
#lemmatisation of the words from text
def lemmatisation(word):
lemma = lemmatizer.lemmatize(word,'v')
if lemma == word:
lemma = lemmatizer.lemmatize(word,'n')
return lemma
# applying lemmatisation to process text
def init_process(text) -> str:
# lower cased
text = text.lower()
# tokenized
words = tokenizer.tokenize(text)
# check if word is alphabetic
words = [w for w in words if w.isalpha()]
# lemmatized
words = [lemmatisation(w) for w in words]
init_processed_text = " ".join(words)
return init_processed_text
# check whether the keyword in the text or not
def keyword_exist(processed_text, keyword_list) ->bool:
for word in keyword_list:
if word in processed_text:
return True
return False
#SentimentIntensityAnalyzer
def IdentifySentiment( sentence ):
sia = SentimentIntensityAnalyzer()
ps = sia.polarity_scores( sentence )
sentiment = max(ps, key = ps.get)
return sentiment
def ScoringSentiment( sentence ):
si = SentimentIntensityAnalyzer()
score = si.polarity_scores( sentence )
return score
"""
Load related words list
"""
beers_wines_list = []
with open(
"/home/ubuntu/analysis/beers.txt"
, 'r', encoding="utf-8") as bw:
bw_words = bw.readlines()
for word in bw_words:
word = lemmatisation(word)
beers_wines_list.append(word.replace('\n',''))
sports_list = []
with open(
"/home/ubuntu/analysis/sports.txt"
, 'r', encoding="utf-8") as sp:
sp_words = sp.readlines()
for word in sp_words:
word = lemmatisation(word)
sports_list.append(word.replace('\n',''))
coffee_list = []
with open(
"/home/ubuntu/analysis/coffee.txt"
, 'r', encoding="utf-8") as cf:
cf_words = cf.readlines()
for word in cf_words:
word = lemmatisation(word)
coffee_list.append(word.replace('\n',''))
"""
Load bundaries info for suburb
"""
boundary = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/Melbourne.geojson'))
sub_list=[ sub['properties']["sa2_name16"] for sub in boundary["features"] ]
coordinates={}
for sub in boundary["features"]:
name = sub['properties']["sa2_name16"]
bounda = sub["geometry"]["coordinates"]
coordinates.update({ name : bounda })
"""
function defined to normalized a suburb name from a tweet data,
so as to match the suburb name in standard (boundary info) list,
and then to find the its coordianates from boundary data.
"""
def sub_name_normalized(tweet_suburb):
for standard_sub in sub_list:
if tweet_suburb.lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.lower() in standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.replace(" ", " - ").lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.replace(" ", " - ").lower() in standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if ('South' not in standard_sub
) and ('North' not in standard_sub
) and ('West' not in standard_sub
) and ('East' not in standard_sub):
new = re.sub(r' South| North| West| East', '', tweet_suburb)
if new.lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if ('South' not in standard_sub
) and ('North' not in standard_sub
) and ('West' not in standard_sub
) and ('East' not in standard_sub):
new = re.sub(r' South| North| West| East', '', tweet_suburb)
if new.lower() in standard_sub.lower():
return standard_sub
return "Undifined"
"""
Load data and process data from AURIN
grate satisfaction:
satification score over 80 (100)
proverty rate:
proportion of people with equivalised disposable household income
after housing costs is below half median equivalised disposable household income
after housing costs).
"""
# data path
satsf = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/aurin_data/data_used/satisfaction.json'))
pov = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/aurin_data/data_used/proverty.json'))
# satisfaction
satisfaction_datasets = satsf['features']
life_grate_satisfaction = {}
population_sat_survey = {}
for ele in satisfaction_datasets:
satif_data = ele['properties']
if satif_data["_life_satisfaction_80_synth"] is None:
satif_data["_life_satisfaction_80_synth"] = 0
if satif_data["_life_satisfaction_90_synth"] is None:
satif_data["_life_satisfaction_90_synth"] = 0
if satif_data["_life_satisfaction_100_synth"] is None:
satif_data["_life_satisfaction_100_synth"] = 0
grate_num = satif_data["_life_satisfaction_80_synth"] + satif_data[
"_life_satisfaction_90_synth"] + satif_data[
"_life_satisfaction_100_synth"]
total_pop = satif_data["total_pop_synth"]
iloc = satif_data["sa2_name16"]
loc = sub_name_normalized(iloc)
life_grate_satisfaction.update({ loc : grate_num })
population_sat_survey.update({ loc : total_pop })
proverty_datasets = pov['features']
poverty_rate = {}
houshold_income = {}
for elem in proverty_datasets:
pov_data = elem["properties"]
if pov_data["pov_rt_exc_hc_syn"] is None:
pov_data["pov_rt_exc_hc_syn"] = 0
if pov_data["inc_median_syn"] is None:
pov_data["inc_median_syn"] = 0
pov_percent = pov_data["pov_rt_exc_hc_syn"]
inc_median = pov_data["inc_median_syn"]
iloc = pov_data["sa2_name16"]
loc = sub_name_normalized(iloc)
poverty_rate.update({ loc : pov_percent })
houshold_income.update({ loc : inc_median})
|
while True:
"""
Load data from CouchDB...
Create new database or save updated data
"""
couch = couchdb.Server("http://user:pass@172.26.133.141:5984")
db = couch['melb']
#couch.delete('tweets_analyzed')
if "tweets_analyzed" in couch:
db2 = couch["tweets_analyzed"]
else:
db2 = couch.create("tweets_analyzed")
updated_tweets_list = []
for doc_id in db:
try:
if doc_id not in id_list:
#num_tweets += 1
#print(num_tweets)
tweet = db[doc_id]
#tweets_id = doc_id
suburb = tweet['suburb']
text = tweet['text']
id_list.append(doc_id)
updated_tweets_list.append(
{'id':doc_id,
'suburb':suburb,
'text':text})
else:
continue
except:
break
"""
word filter
label sentiment
update boundary
label city life attributes of: life satisfaction and poverty rate
"""
for tweets in updated_tweets_list:
lemma_text = init_process(tweets['text'])
if keyword_exist(lemma_text, beers_wines_list):
tweets.update(
{'bw_exist':1})
else:
tweets.update(
{'bw_exist':0})
if keyword_exist(lemma_text, sports_list):
tweets.update(
{'sp_exist':1})
else:
tweets.update(
{'sp_exist':0})
if keyword_exist(lemma_text, coffee_list):
tweets.update(
{'cf_exist':1})
else:
tweets.update(
{'cf_exist':0})
senti = IdentifySentiment(tweets['text'])
scr = ScoringSentiment(tweets['text'])
tweets.update({'sentiment': senti})
tweets.update({'senti_score': scr})
matched_suburb = sub_name_normalized(tweets['suburb'])
# coordiantes
if matched_suburb in coordinates:
tweets.update({ 'bound' : coordinates[matched_suburb] })
else:
tweets.update({ 'bound': 'undifined'})
#satisfaction
if matched_suburb in life_grate_satisfaction:
tweets.update({ 'num_grate_satis' : life_grate_satisfaction[matched_suburb]})
tweets.update({ 'pop_survey' : population_sat_survey[matched_suburb] })
else:
tweets.update({ 'num_grate_satis' : 'Unkown' })
tweets.update({ 'pop_survey' : 'Unkown' })
#poverty
if matched_suburb in poverty_rate:
tweets.update({ 'poverty_rate' : poverty_rate[matched_suburb] })
tweets.update({ 'houshold_income' : houshold_income[matched_suburb]})
else:
tweets.update({ 'poverty_rate' : 'Unkown' })
tweets.update({ 'houshold_income' : 'Unkown'})
try:
db2.save({"_id": tweets['id'],
"suburb": tweets['suburb'],
"bw_exist": tweets['bw_exist'],
"sp_exist": tweets['sp_exist'],
"cf_exist": tweets['cf_exist'],
"sentiment":tweets['sentiment'],
"senti_score": tweets['senti_score'],
"boundaries":tweets['bound'],
"num_grate_satisfcation" : tweets['num_grate_satis'],
"population_survey" : tweets['pop_survey'],
"poverty_rate" : tweets['poverty_rate'],
"houshold_median_income" : tweets['houshold_income']})
except couchdb.http.ResourceConflict:
print("Duplicate tweets found and ignored.")
time.sleep(86400) | """
Load data from CouchDB...
Create new database
"""
id_list = [] | random_line_split |
process_all_day_long.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Team 16: COMP90024-Assignment2
# Team Members:
# Qingmeng Xu, 969413
# Tingqian Wang, 1043988
# Zhong Liao, 1056020
# Cheng Qian, 962539
# Zongcheng Du, 1096319
"""
Created on Sun May 24 21:41:09 2020
@author: ciciwang
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 01:20:56 2020
Initial Analysis on raw data
detect the related words
label the sentiment
@author: ciciwang
"""
import time
import couchdb
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import json
import re
"""
nltk function declaration
"""
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('vader_lexicon')
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
lemmatizer = nltk.stem.WordNetLemmatizer()
#lemmatisation of the words from text
def lemmatisation(word):
lemma = lemmatizer.lemmatize(word,'v')
if lemma == word:
lemma = lemmatizer.lemmatize(word,'n')
return lemma
# applying lemmatisation to process text
def init_process(text) -> str:
# lower cased
|
# check whether the keyword in the text or not
def keyword_exist(processed_text, keyword_list) ->bool:
for word in keyword_list:
if word in processed_text:
return True
return False
#SentimentIntensityAnalyzer
def IdentifySentiment( sentence ):
sia = SentimentIntensityAnalyzer()
ps = sia.polarity_scores( sentence )
sentiment = max(ps, key = ps.get)
return sentiment
def ScoringSentiment( sentence ):
si = SentimentIntensityAnalyzer()
score = si.polarity_scores( sentence )
return score
"""
Load related words list
"""
beers_wines_list = []
with open(
"/home/ubuntu/analysis/beers.txt"
, 'r', encoding="utf-8") as bw:
bw_words = bw.readlines()
for word in bw_words:
word = lemmatisation(word)
beers_wines_list.append(word.replace('\n',''))
sports_list = []
with open(
"/home/ubuntu/analysis/sports.txt"
, 'r', encoding="utf-8") as sp:
sp_words = sp.readlines()
for word in sp_words:
word = lemmatisation(word)
sports_list.append(word.replace('\n',''))
coffee_list = []
with open(
"/home/ubuntu/analysis/coffee.txt"
, 'r', encoding="utf-8") as cf:
cf_words = cf.readlines()
for word in cf_words:
word = lemmatisation(word)
coffee_list.append(word.replace('\n',''))
"""
Load bundaries info for suburb
"""
boundary = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/Melbourne.geojson'))
sub_list=[ sub['properties']["sa2_name16"] for sub in boundary["features"] ]
coordinates={}
for sub in boundary["features"]:
name = sub['properties']["sa2_name16"]
bounda = sub["geometry"]["coordinates"]
coordinates.update({ name : bounda })
"""
function defined to normalized a suburb name from a tweet data,
so as to match the suburb name in standard (boundary info) list,
and then to find the its coordianates from boundary data.
"""
def sub_name_normalized(tweet_suburb):
for standard_sub in sub_list:
if tweet_suburb.lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.lower() in standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.replace(" ", " - ").lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if tweet_suburb.replace(" ", " - ").lower() in standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if ('South' not in standard_sub
) and ('North' not in standard_sub
) and ('West' not in standard_sub
) and ('East' not in standard_sub):
new = re.sub(r' South| North| West| East', '', tweet_suburb)
if new.lower() == standard_sub.lower():
return standard_sub
for standard_sub in sub_list:
if ('South' not in standard_sub
) and ('North' not in standard_sub
) and ('West' not in standard_sub
) and ('East' not in standard_sub):
new = re.sub(r' South| North| West| East', '', tweet_suburb)
if new.lower() in standard_sub.lower():
return standard_sub
return "Undifined"
"""
Load data and process data from AURIN
grate satisfaction:
satification score over 80 (100)
proverty rate:
proportion of people with equivalised disposable household income
after housing costs is below half median equivalised disposable household income
after housing costs).
"""
# data path
satsf = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/aurin_data/data_used/satisfaction.json'))
pov = json.load(open('/Users/ciciwang/Desktop/COMP90024_ASS2/analysis/aurin_data/data_used/proverty.json'))
# satisfaction
satisfaction_datasets = satsf['features']
life_grate_satisfaction = {}
population_sat_survey = {}
for ele in satisfaction_datasets:
satif_data = ele['properties']
if satif_data["_life_satisfaction_80_synth"] is None:
satif_data["_life_satisfaction_80_synth"] = 0
if satif_data["_life_satisfaction_90_synth"] is None:
satif_data["_life_satisfaction_90_synth"] = 0
if satif_data["_life_satisfaction_100_synth"] is None:
satif_data["_life_satisfaction_100_synth"] = 0
grate_num = satif_data["_life_satisfaction_80_synth"] + satif_data[
"_life_satisfaction_90_synth"] + satif_data[
"_life_satisfaction_100_synth"]
total_pop = satif_data["total_pop_synth"]
iloc = satif_data["sa2_name16"]
loc = sub_name_normalized(iloc)
life_grate_satisfaction.update({ loc : grate_num })
population_sat_survey.update({ loc : total_pop })
proverty_datasets = pov['features']
poverty_rate = {}
houshold_income = {}
for elem in proverty_datasets:
pov_data = elem["properties"]
if pov_data["pov_rt_exc_hc_syn"] is None:
pov_data["pov_rt_exc_hc_syn"] = 0
if pov_data["inc_median_syn"] is None:
pov_data["inc_median_syn"] = 0
pov_percent = pov_data["pov_rt_exc_hc_syn"]
inc_median = pov_data["inc_median_syn"]
iloc = pov_data["sa2_name16"]
loc = sub_name_normalized(iloc)
poverty_rate.update({ loc : pov_percent })
houshold_income.update({ loc : inc_median})
"""
Load data from CouchDB...
Create new database
"""
id_list = []
while True:
"""
Load data from CouchDB...
Create new database or save updated data
"""
couch = couchdb.Server("http://user:pass@172.26.133.141:5984")
db = couch['melb']
#couch.delete('tweets_analyzed')
if "tweets_analyzed" in couch:
db2 = couch["tweets_analyzed"]
else:
db2 = couch.create("tweets_analyzed")
updated_tweets_list = []
for doc_id in db:
try:
if doc_id not in id_list:
#num_tweets += 1
#print(num_tweets)
tweet = db[doc_id]
#tweets_id = doc_id
suburb = tweet['suburb']
text = tweet['text']
id_list.append(doc_id)
updated_tweets_list.append(
{'id':doc_id,
'suburb':suburb,
'text':text})
else:
continue
except:
break
"""
word filter
label sentiment
update boundary
label city life attributes of: life satisfaction and poverty rate
"""
for tweets in updated_tweets_list:
lemma_text = init_process(tweets['text'])
if keyword_exist(lemma_text, beers_wines_list):
tweets.update(
{'bw_exist':1})
else:
tweets.update(
{'bw_exist':0})
if keyword_exist(lemma_text, sports_list):
tweets.update(
{'sp_exist':1})
else:
tweets.update(
{'sp_exist':0})
if keyword_exist(lemma_text, coffee_list):
tweets.update(
{'cf_exist':1})
else:
tweets.update(
{'cf_exist':0})
senti = IdentifySentiment(tweets['text'])
scr = ScoringSentiment(tweets['text'])
tweets.update({'sentiment': senti})
tweets.update({'senti_score': scr})
matched_suburb = sub_name_normalized(tweets['suburb'])
# coordiantes
if matched_suburb in coordinates:
tweets.update({ 'bound' : coordinates[matched_suburb] })
else:
tweets.update({ 'bound': 'undifined'})
#satisfaction
if matched_suburb in life_grate_satisfaction:
tweets.update({ 'num_grate_satis' : life_grate_satisfaction[matched_suburb]})
tweets.update({ 'pop_survey' : population_sat_survey[matched_suburb] })
else:
tweets.update({ 'num_grate_satis' : 'Unkown' })
tweets.update({ 'pop_survey' : 'Unkown' })
#poverty
if matched_suburb in poverty_rate:
tweets.update({ 'poverty_rate' : poverty_rate[matched_suburb] })
tweets.update({ 'houshold_income' : houshold_income[matched_suburb]})
else:
tweets.update({ 'poverty_rate' : 'Unkown' })
tweets.update({ 'houshold_income' : 'Unkown'})
try:
db2.save({"_id": tweets['id'],
"suburb": tweets['suburb'],
"bw_exist": tweets['bw_exist'],
"sp_exist": tweets['sp_exist'],
"cf_exist": tweets['cf_exist'],
"sentiment":tweets['sentiment'],
"senti_score": tweets['senti_score'],
"boundaries":tweets['bound'],
"num_grate_satisfcation" : tweets['num_grate_satis'],
"population_survey" : tweets['pop_survey'],
"poverty_rate" : tweets['poverty_rate'],
"houshold_median_income" : tweets['houshold_income']})
except couchdb.http.ResourceConflict:
print("Duplicate tweets found and ignored.")
time.sleep(86400)
| text = text.lower()
# tokenized
words = tokenizer.tokenize(text)
# check if word is alphabetic
words = [w for w in words if w.isalpha()]
# lemmatized
words = [lemmatisation(w) for w in words]
init_processed_text = " ".join(words)
return init_processed_text | identifier_body |
app.js | Math.range = (min, max) => min + Math.random() * (max - min);
Math.degtorad = (d) => d * Math.PI / 180;
Math.lendirx = (l, d) => l * Math.cos(Math.degtorad(d));
Math.lendiry = (l, d) => l * Math.sin(Math.degtorad(d));
const CANVAS = document.createElement('canvas');
const CTX = CANVAS.getContext('2d');
const CANVAS_SCALER = 2;
const SCALER = {
get w() {
return CANVAS.width / 960;
},
get h() {
return CANVAS.height / 540;
}
};
CANVAS.style.backgroundImage = 'radial-gradient(darkorchid 33%, darkslateblue)';
const Time = {
time: 0,
lastTime: 0,
deltaTime: 0,
fixedDeltaTime: 1000 / 60,
update(t) {
this.lastTime = this.time || 0;
this.time = t || 0;
this.deltaTime = this.time - this.lastTime || this.fixedDeltaTime;
}
};
const KeyCode = {
Space: 32,
Left: 37,
Up: 38,
Right: 39,
Down: 40
};
class BranthKey {
constructor(keyCode) {
this.keyCode = keyCode;
this.hold = false;
this.pressed = false;
this.released = false;
}
up() {
this.hold = false;
this.released = true;
}
down() {
this.hold = true;
this.pressed = true;
}
reset() {
this.pressed = false;
this.released = false;
}
}
const Input = {
list: [[]],
reset() {
for (const i of this.list) {
for (const j of i) {
j.reset();
}
}
},
getKey(keyCode) {
for (const k of this.list[0]) {
if (k.keyCode === keyCode) {
return k;
}
}
},
keyUp(keyCode) {
return this.getKey(keyCode).released;
},
keyDown(keyCode) {
return this.getKey(keyCode).pressed;
},
keyHold(keyCode) {
return this.getKey(keyCode).hold;
},
eventkeyup(e) {
for (const k of this.list[0]) {
if (k.keyCode == e.which || k.keyCode == e.keyCode) {
k.up();
}
}
},
eventkeydown(e) {
for (const k of this.list[0]) {
if (k.keyCode == e.which || k.keyCode == e.keyCode) {
if (!k.hold) k.down();
}
}
}
};
for (const keyCode of Object.values(KeyCode)) {
Input.list[0].push(new BranthKey(keyCode));
}
const C = {
black: 'black',
darkGreen: 'darkgreen',
fireBrick: 'firebrick',
green: 'green',
indianRed: 'indianred',
limeGreen: 'limegreen',
mediumSeaGreen: 'mediumseagreen',
red: 'red',
white: 'white'
};
const Font = {
get s() {
return `${10 * SCALER.w}px`;
},
get m() {
return `${16 * SCALER.w}px`;
},
get l() {
return `${24 * SCALER.w}px`;
},
get xl() {
return `${36 * SCALER.w}px`;
},
get xxl() {
return `${48 * SCALER.w}px`;
},
get size() {
return +CTX.font.split(' ').filter(v => v.includes('px')).shift().replace('px', '');
},
get sb() {
return `bold ${this.s}`;
},
get mb() {
return `bold ${this.m}`;
},
get lb() {
return `bold ${this.l}`;
},
get xlb() {
return `bold ${this.xl}`;
},
get xxlb() {
return `bold ${this.xxl}`;
}
};
const Align = {
l: 'left',
r: 'right',
c: 'center',
t: 'top',
m: 'middle',
b: 'bottom'
};
const Draw = {
setFont(f) {
CTX.font = `${f} sans-serif`;
},
setAlpha(a) {
CTX.globalAlpha = a;
},
setColor(c) {
CTX.fillStyle = c;
CTX.strokeStyle = c;
},
setHAlign(a) {
CTX.textAlign = a;
},
setVAlign(a) {
CTX.textBaseline = a;
},
setHVAlign(h, v) {
this.setHAlign(h);
this.setVAlign(v);
},
text(x, y, text) {
CTX.fillText(text, x, y);
},
draw(outline) {
if (outline === true) {
CTX.stroke();
}
else {
CTX.fill();
}
},
rect(x, y, w, h, outline) {
CTX.beginPath();
CTX.rect(x, y, w, h);
this.draw(outline);
},
circle(x, y, r, outline) {
CTX.beginPath();
CTX.arc(x, y, r, 0, 2 * Math.PI);
this.draw(outline);
}
};
const OBJ = {
ID: 0,
list: [],
classes: [],
add(cls) {
this.list.push([]);
this.classes.push(cls);
},
get(id) {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.id === id) {
return i;
}
}
}
}
},
take(cls) {
return this.list[this.classes.indexOf(cls)];
},
push(cls, i) {
if (this.classes.includes(cls)) {
this.list[this.classes.indexOf(cls)].push(i);
i.start();
}
},
create(cls, x, y) {
const n = new cls(x, y);
this.list[this.classes.indexOf(cls)].push(n);
n.start();
return n;
},
update() {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.active) {
i.earlyUpdate();
i.update();
}
}
}
}
},
render() {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.visible) {
i.render();
}
}
}
}
},
destroy(id) {
for (const o of this.list) {
for (const i in o) {
if (o[i].id === id) {
delete o[i];
}
}
}
},
clear(cls) {
this.list[this.classes.indexOf(cls)] = [];
},
clearAll() {
for (const i in this.list) {
this.list[i] = [];
}
}
};
class BranthObject {
constructor(x, y) {
this.id = OBJ.ID++;
this.active = true;
this.visible = true;
this.x = x;
this.y = y;
}
start() {}
earlyUpdate() {}
update() {}
render() {}
renderUI() {}
}
class BranthParticle extends BranthObject {
constructor(x, y, spd, spdinc, size, sizeinc, d, dinc, r, rinc, a, c, life, grav) {
super(x, y);
this.spd = spd;
this.spdinc = spdinc;
this.size = size;
this.sizeinc = sizeinc;
this.d = d;
this.dinc = dinc;
this.r = r;
this.rinc = rinc;
this.a = a;
this.c = c;
this.life = life;
this.grav = grav;
this.g = grav;
}
update() {
this.a = Math.max(0, this.a - Time.deltaTime / this.life);
if (this.a <= 0) {
OBJ.destroy(this.id);
}
this.x += Math.lendirx(this.spd, this.d);
this.y += Math.lendiry(this.spd, this.d) + Math.lendiry(this.g, 90);
this.size = Math.max(this.size + this.sizeinc, 0);
this.spd += this.spdinc;
this.g += this.grav;
this.d += this.dinc;
this.r += this.rinc;
}
render() {
Draw.setAlpha(this.a);
Draw.setColor(this.c);
Draw.circle(this.x, this.y, this.size);
Draw.setAlpha(1);
}
}
const Emitter = {
depth: 0,
x: {
min: 0,
max: 100
},
y: {
min: 0,
max: 100
},
spd: {
min: 1,
max: 2
},
spdinc: {
min: 0,
max: 0
},
size: {
min: 2,
max: 8
},
sizeinc: {
min: 0,
max: 0
},
d: {
min: 0,
max: 360
},
dinc: {
min: 5,
max: 10
},
r: {
min: 0,
max: 360
},
rinc: {
min: 5,
max: 10
},
a: {
min: 1,
max: 1
},
c: C.fireBrick,
life: {
min: 3000,
max: 4000
},
grav: {
min: 0.01,
max: 0.01
},
setDepth(depth) {
this.depth = depth;
},
setArea(xmin, xmax, ymin, ymax) {
this.x.min = xmin;
this.x.max = xmax;
this.y.min = ymin;
this.y.max = ymax;
},
setSpeed(min, max) {
this.spd.min = min * SCALER.w * 0.5;
this.spd.max = max * SCALER.w * 0.5;
},
setSpeedInc(min, max) {
this.spdinc.min = min * SCALER.w * 0.5;
this.spdinc.max = max * SCALER.w * 0.5;
},
setSize(min, max) {
this.size.min = min * SCALER.w * 0.5;
this.size.max = max * SCALER.w * 0.5;
},
setSizeInc(min, max) {
this.sizeinc.min = min * SCALER.w * 0.5;
this.sizeinc.max = max * SCALER.w * 0.5;
},
setDirection(min, max) {
this.d.min = min;
this.d.max = max;
},
setDirectionInc(min, max) {
this.dinc.min = min;
this.dinc.max = max;
},
setRotation(min, max) {
this.r.min = min;
this.r.max = max;
},
setRotationInc(min, max) {
this.rinc.min = min;
this.rinc.max = max;
},
setAlpha(min, max) {
this.a.min = min;
this.a.max = max;
},
setColor(c) {
this.c = c;
},
setLife(min, max) {
this.life.min = min;
this.life.max = max;
},
setGravity(min, max) {
this.grav.min = min;
this.grav.max = max;
},
preset(s) {
switch (s) {
case 'bigstar':
this.setSpeed(4, 7);
this.setSpeedInc(-0.05, -0.05);
this.setSize(15, 22);
this.setSizeInc(-0.1, -0.1);
this.setDirection(180, 360);
this.setDirectionInc(0, 0);
this.setRotation(0, 0);
this.setRotationInc(0, 0);
this.setAlpha(0.2, 0.2);
this.setColor(C.fireBrick);
this.setLife(3000, 4000);
this.setGravity(0, 0);
break;
case 'sparkle':
this.setSpeed(2, 5);
this.setSpeedInc(-0.1, -0.1);
this.setSize(5, 10);
this.setSizeInc(-0.1, -0.1);
this.setDirection(0, 360);
this.setDirectionInc(0, 0);
this.setRotation(0, 0);
this.setRotationInc(0, 0);
this.setAlpha(1, 1);
this.setColor(C.fireBrick);
this.setLife(1000, 2000);
this.setGravity(0, 0);
break;
case 'puff':
this.setSize(3, 5);
this.setColor(C.indianRed);
break;
}
},
emit(n) {
for (let i = 0; i < n; i++) {
const n = new BranthParticle(
Math.range(this.x.min, this.x.max),
Math.range(this.y.min, this.y.max),
Math.range(this.spd.min, this.spd.max),
Math.range(this.spdinc.min, this.spdinc.max),
Math.range(this.size.min, this.size.max),
Math.range(this.sizeinc.min, this.sizeinc.max),
Math.range(this.d.min, this.d.max),
Math.range(this.dinc.min, this.dinc.max),
Math.range(this.r.min, this.r.max),
Math.range(this.rinc.min, this.rinc.max),
Math.range(this.a.min, this.a.max),
this.c,
Math.range(this.life.min, this.life.max),
Math.range(this.grav.min, this.grav.max)
);
n.depth = this.depth;
OBJ.push(BranthParticle, n);
}
}
};
const Room = {
get w() {
return CANVAS.width / CANVAS_SCALER;
},
get h() {
return CANVAS.height / CANVAS_SCALER;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
const View = {
x: 0,
y: 0,
xto: 0,
yto: 0,
alarm: -1,
interval: 0,
magnitude: 0,
shake(mag, int) {
this.magnitude = mag;
this.interval = int;
this.alarm = this.interval;
},
update() {
if (this.alarm > 0) {
const mag = this.magnitude * this.alarm / this.interval;
this.xto = Math.range(mag * 0.5, mag * 0.6) * (Math.random() > 0.5? -1 : 1);
this.yto = Math.range(mag * 0.8, mag) * (Math.random() > 0.5? -1 : 1);
this.alarm -= Time.deltaTime;
if (this.alarm <= 0) {
this.xto = 0;
this.yto = 0;
}
}
const t = 0.2;
this.x += t * (this.xto - this.x);
this.y += t * (this.yto - this.y);
}
};
const UI = {
render() {
for (const o of OBJ.list) {
for (const i of o) {
if (i) {
if (i.visible) {
i.renderUI();
}
}
}
}
}
};
const RAF = window.requestAnimationFrame
|| window.msRequestAnimationFrame
|| window.mozRequestAnimationFrame
|| window.webkitRequestAnimationFrame
|| function(f) { return setTimeout(f, Time.fixedDeltaTime) }
const BRANTH = {
start() {
document.body.appendChild(CANVAS);
window.onkeyup = (e) => Input.eventkeyup(e);
window.onkeydown = (e) => {
const keyCodes = [32, 37, 38, 39, 40];
if (keyCodes.includes(e.keyCode)) {
e.preventDefault();
}
Input.eventkeydown(e);
}
window.onresize = () => BRANTH.resize();
BRANTH.resize();
BRANTH.update();
},
update(t) {
Time.update(t);
View.update();
OBJ.update();
CTX.clearRect(0, 0, Room.w, Room.h);
OBJ.render();
UI.render();
Input.reset();
RAF(BRANTH.update);
},
resize() {
CANVAS.width = CANVAS.getBoundingClientRect().width * CANVAS_SCALER;
CANVAS.height = CANVAS.getBoundingClientRect().height * CANVAS_SCALER;
CTX.resetTransform();
CTX.scale(CANVAS_SCALER, CANVAS_SCALER);
}
};
const Tile = {
w: 20,
get h() {
return this.w * 0.5;
},
get | () {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
const World = {
get x() {
return Room.mid.w;
},
get y() {
return Room.mid.h - this.mid.h;
},
get w() {
return Grid.c * Tile.w;
},
get h() {
return Grid.r * Tile.h;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
class Point {
constructor(x, y) {
this.x = x || 0;
this.y = y || 0;
}
}
class Line {
constructor(p1, p2) {
this.p = [p1, p2];
}
intersect(line) {
const p1 = this.p[0], p2 = this.p[1], p3 = line.p[0], p4 = line.p[1];
const s1 = new Point(p2.x - p1.x, p2.y - p1.y);
const s2 = new Point(p4.x - p3.x, p4.y - p3.y);
const s = (-s1.y * (p1.x - p3.x) + s1.x * (p1.y - p3.y)) / (-s2.x * s1.y + s1.x * s2.y);
const t = (s2.x * (p1.y - p3.y) - s2.y * (p1.x - p3.x)) / (-s2.x * s1.y + s1.x * s2.y);
if (s >= 0 && s <= 1 && t >= 0 && t <= 1) {
return new Point(p1.x + (t * s1.x), p1.y + (t * s1.y));
}
return null;
}
}
const Grid = {
g: [],
c: 30,
r: 30,
get mid() {
return {
c: this.c * 0.5,
r: this.r * 0.5
};
},
get(c, r) {
const g = new Point(
c * Tile.mid.w - r * Tile.mid.w,
r * Tile.mid.h + c * Tile.mid.h
);
return new Point(View.x + World.x + g.x, View.y + World.y + g.y);
},
tilePath(x, y) {
if (x instanceof Point) {
y = x.y;
x = x.x;
}
CTX.beginPath();
CTX.moveTo(x, y - Tile.mid.h);
CTX.lineTo(x + Tile.mid.w, y);
CTX.lineTo(x, y + Tile.mid.h);
CTX.lineTo(x - Tile.mid.w, y);
CTX.closePath();
}
};
for (let c = 0; c < Grid.c; c++) {
Grid.g.push([]);
for (let r = 0; r < Grid.r; r++) {
Grid.g[c].push(0);
}
}
class BranthGrid extends BranthObject {
constructor(c, r) {
super(0, 0);
this.c = c;
this.r = r;
}
meet(c, r) {
if (r === undefined) {
r = c.r;
c = c.c;
}
return c === this.c && r === this.r;
}
earlyUpdate() {
const b = Grid.get(this.c, this.r);
this.x = b.x;
this.y = b.y;
}
}
class Food extends BranthGrid {
start() {
this.respawn();
}
move() {
this.c = Math.floor(Math.random() * Grid.c);
this.r = Math.floor(Math.random() * Grid.r);
}
respawn() {
const s = OBJ.take(Snake)[0];
if (s) {
let i = 50;
let isMeet = true;
while (isMeet) {
this.move();
isMeet = false;
for (let i = 0; i < s.tails.length; i++) {
const t = s.tails[i];
if (this.meet(t)) {
isMeet = true;
}
for (let i = 0; i < OBJ.take(Food).length; i++) {
const a = OBJ.take(Food)[i];
this.meet(a.c, a.r);
}
}
i--;
if (i < 0) {
break;
}
}
if (isMeet) {
this.c = -3;
this.r = -3;
this.visible = false;
}
}
else {
this.move();
}
}
}
class Snake extends BranthGrid {
start() {
this.dc = 0;
this.dr = 0;
this.idle = true;
this.tails = [{
c: this.c,
r: this.r
}];
this.tailCount = 3;
this.isPressed = false;
this.moveInterval = 100;
this.alarm = this.moveInterval;
}
update() {
const keyUp = Input.keyDown(KeyCode.Up);
const keyLeft = Input.keyDown(KeyCode.Left);
const keyDown = Input.keyDown(KeyCode.Down);
const keyRight = Input.keyDown(KeyCode.Right);
if (this.idle || !this.isPressed) {
if (keyUp && this.dr === 0) {
this.dc = 0;
this.dr = -1;
this.isPressed = true;
}
else if (keyLeft && this.dc === 0) {
this.dc = -1;
this.dr = 0;
this.isPressed = true;
}
else if (keyDown && this.dr === 0) {
this.dc = 0;
this.dr = 1;
this.isPressed = true;
}
else if (keyRight && this.dc === 0) {
this.dc = 1;
this.dr = 0;
this.isPressed = true;
}
}
if (this.alarm <= 0 && this.alarm !== -1) {
if (!this.idle) {
this.c += this.dc;
this.r += this.dr;
if (this.c < 0) this.c = Grid.c - 1;
if (this.r < 0) this.r = Grid.r - 1;
if (this.c > Grid.c - 1) this.c = 0;
if (this.r > Grid.r - 1) this.r = 0;
const b = Grid.get(this.c, this.r);
for (let i = 0; i < this.tails.length; i++) {
const t = this.tails[i];
if (this.meet(t)) {
this.dc = 0;
this.dr = 0;
this.tailCount = 3;
this.idle = true;
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.setColor(C.mediumSeaGreen);
Emitter.emit(10);
Emitter.preset('puff');
Emitter.setColor(C.limeGreen);
Emitter.emit(10);
View.shake(20, 1000);
}
}
if (this.tails.length < this.tailCount + 5) {
this.tails.push({
c: this.c,
r: this.r
});
}
if (!(this.dc === 0 && this.dr === 0)) {
while (this.tails.length > this.tailCount) {
this.tails.shift();
}
}
for (let i = 0; i < OBJ.take(Food).length; i++) {
const a = OBJ.take(Food)[i];
if (a.meet(this.c, this.r)) {
this.tailCount++;
a.respawn();
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.emit(10);
Emitter.preset('puff');
Emitter.emit(10);
View.shake(8, 300);
}
}
}
else {
if (this.isPressed) {
this.tails = [{
c: this.c,
r: this.r
}];
this.idle = false;
const b = Grid.get(this.c, this.r);
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.setColor(C.mediumSeaGreen);
Emitter.emit(10);
Emitter.preset('puff');
Emitter.setColor(C.limeGreen);
Emitter.emit(10);
View.shake(8, 300);
}
}
this.isPressed = false;
this.alarm = this.moveInterval;
}
else {
this.alarm -= Time.deltaTime;
}
}
render() {
const tailsSorted = this.tails.slice();
for (let i = 0; i < OBJ.take(Food).length; i++) {
tailsSorted.push(OBJ.take(Food)[i]);
}
tailsSorted.sort((a, b) => a.r < b.r || (a.r === b.r && a.c < b.c)? -1 : 1);
for (let i = 0; i < tailsSorted.length; i++) {
const t = tailsSorted[i];
const b = Grid.get(t.c, t.r);
for (let j = 0; j < Tile.mid.h; j++) {
Grid.tilePath(b.x, b.y - j);
if (t instanceof Food) {
if (t.visible) {
Draw.setColor(j === Tile.mid.h - 1? C.indianRed : C.fireBrick);
Draw.draw();
}
}
else {
Draw.setColor(j === Tile.mid.h - 1? (this.meet(t)? 'springgreen' : C.limeGreen) : C.mediumSeaGreen);
Draw.draw();
}
}
}
}
}
class Manager extends BranthObject {
start() {
const n = new Snake(Grid.mid.c, Grid.mid.r);
OBJ.push(Snake, n);
for (let i = 0; i < 3; i++) {
OBJ.create(Food);
}
this.triggerTime = 0;
}
update() {
let keySpace = Input.keyDown(KeyCode.Space);
if (Input.keyUp(KeyCode.Space)) {
this.triggerTime = 0;
}
if (Input.keyHold(KeyCode.Space)) {
if (this.triggerTime > 600) {
keySpace = true;
}
else {
this.triggerTime += Time.deltaTime;
}
}
if (keySpace) {
if (OBJ.take(Food).length < Grid.mid.c * Grid.mid.r) {
const n = OBJ.create(Food);
const b = Grid.get(n.c, n.r);
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.emit(10);
Emitter.preset('puff');
Emitter.emit(10);
}
View.shake(8, 300);
}
}
render() {
Draw.setColor(C.black);
for (let c = 0; c < Grid.c; c++) {
for (let r = 0; r < Grid.r; r++) {
const b = Grid.get(c, r);
Grid.tilePath(b);
Draw.draw(true);
}
}
}
}
OBJ.add(Manager);
OBJ.add(Food);
OBJ.add(Snake);
OBJ.add(BranthParticle);
BRANTH.start();
OBJ.create(Manager); | mid | identifier_name |
app.js | Math.range = (min, max) => min + Math.random() * (max - min);
Math.degtorad = (d) => d * Math.PI / 180;
Math.lendirx = (l, d) => l * Math.cos(Math.degtorad(d));
Math.lendiry = (l, d) => l * Math.sin(Math.degtorad(d));
const CANVAS = document.createElement('canvas');
const CTX = CANVAS.getContext('2d');
const CANVAS_SCALER = 2;
const SCALER = {
get w() {
return CANVAS.width / 960;
},
get h() {
return CANVAS.height / 540;
}
};
CANVAS.style.backgroundImage = 'radial-gradient(darkorchid 33%, darkslateblue)';
const Time = {
time: 0,
lastTime: 0,
deltaTime: 0,
fixedDeltaTime: 1000 / 60,
update(t) {
this.lastTime = this.time || 0;
this.time = t || 0;
this.deltaTime = this.time - this.lastTime || this.fixedDeltaTime;
}
};
const KeyCode = {
Space: 32,
Left: 37,
Up: 38,
Right: 39,
Down: 40
};
class BranthKey {
constructor(keyCode) {
this.keyCode = keyCode;
this.hold = false;
this.pressed = false;
this.released = false;
}
up() {
this.hold = false;
this.released = true;
}
down() {
this.hold = true;
this.pressed = true;
}
reset() {
this.pressed = false;
this.released = false;
}
}
const Input = {
list: [[]],
reset() {
for (const i of this.list) {
for (const j of i) {
j.reset();
}
}
},
getKey(keyCode) {
for (const k of this.list[0]) {
if (k.keyCode === keyCode) {
return k;
}
}
},
keyUp(keyCode) {
return this.getKey(keyCode).released;
},
keyDown(keyCode) {
return this.getKey(keyCode).pressed;
},
keyHold(keyCode) {
return this.getKey(keyCode).hold;
},
eventkeyup(e) {
for (const k of this.list[0]) {
if (k.keyCode == e.which || k.keyCode == e.keyCode) {
k.up();
}
}
},
eventkeydown(e) {
for (const k of this.list[0]) {
if (k.keyCode == e.which || k.keyCode == e.keyCode) {
if (!k.hold) k.down();
}
}
}
};
for (const keyCode of Object.values(KeyCode)) {
Input.list[0].push(new BranthKey(keyCode));
}
const C = {
black: 'black',
darkGreen: 'darkgreen',
fireBrick: 'firebrick',
green: 'green',
indianRed: 'indianred',
limeGreen: 'limegreen',
mediumSeaGreen: 'mediumseagreen',
red: 'red',
white: 'white'
};
const Font = {
get s() {
return `${10 * SCALER.w}px`;
},
get m() {
return `${16 * SCALER.w}px`;
},
get l() {
return `${24 * SCALER.w}px`;
},
get xl() {
return `${36 * SCALER.w}px`;
},
get xxl() {
return `${48 * SCALER.w}px`;
},
get size() {
return +CTX.font.split(' ').filter(v => v.includes('px')).shift().replace('px', '');
},
get sb() {
return `bold ${this.s}`;
},
get mb() {
return `bold ${this.m}`;
},
get lb() {
return `bold ${this.l}`;
},
get xlb() {
return `bold ${this.xl}`;
},
get xxlb() {
return `bold ${this.xxl}`;
}
};
const Align = {
l: 'left',
r: 'right',
c: 'center',
t: 'top',
m: 'middle',
b: 'bottom'
};
const Draw = {
setFont(f) {
CTX.font = `${f} sans-serif`;
},
setAlpha(a) {
CTX.globalAlpha = a;
},
setColor(c) {
CTX.fillStyle = c;
CTX.strokeStyle = c;
},
setHAlign(a) {
CTX.textAlign = a;
},
setVAlign(a) {
CTX.textBaseline = a;
},
setHVAlign(h, v) {
this.setHAlign(h);
this.setVAlign(v);
},
text(x, y, text) {
CTX.fillText(text, x, y);
},
draw(outline) {
if (outline === true) {
CTX.stroke();
}
else {
CTX.fill();
}
},
rect(x, y, w, h, outline) {
CTX.beginPath();
CTX.rect(x, y, w, h);
this.draw(outline);
},
circle(x, y, r, outline) {
CTX.beginPath();
CTX.arc(x, y, r, 0, 2 * Math.PI);
this.draw(outline);
}
};
const OBJ = {
ID: 0,
list: [],
classes: [],
add(cls) {
this.list.push([]);
this.classes.push(cls);
},
get(id) {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.id === id) {
return i;
}
}
}
}
},
take(cls) {
return this.list[this.classes.indexOf(cls)];
},
push(cls, i) {
if (this.classes.includes(cls)) {
this.list[this.classes.indexOf(cls)].push(i);
i.start();
}
},
create(cls, x, y) {
const n = new cls(x, y);
this.list[this.classes.indexOf(cls)].push(n);
n.start();
return n;
},
update() {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.active) {
i.earlyUpdate();
i.update();
}
}
}
}
},
render() {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.visible) {
i.render();
}
}
}
}
},
destroy(id) {
for (const o of this.list) {
for (const i in o) {
if (o[i].id === id) {
delete o[i];
}
}
}
},
clear(cls) {
this.list[this.classes.indexOf(cls)] = [];
},
clearAll() {
for (const i in this.list) {
this.list[i] = [];
}
}
};
class BranthObject {
constructor(x, y) |
start() {}
earlyUpdate() {}
update() {}
render() {}
renderUI() {}
}
class BranthParticle extends BranthObject {
constructor(x, y, spd, spdinc, size, sizeinc, d, dinc, r, rinc, a, c, life, grav) {
super(x, y);
this.spd = spd;
this.spdinc = spdinc;
this.size = size;
this.sizeinc = sizeinc;
this.d = d;
this.dinc = dinc;
this.r = r;
this.rinc = rinc;
this.a = a;
this.c = c;
this.life = life;
this.grav = grav;
this.g = grav;
}
update() {
this.a = Math.max(0, this.a - Time.deltaTime / this.life);
if (this.a <= 0) {
OBJ.destroy(this.id);
}
this.x += Math.lendirx(this.spd, this.d);
this.y += Math.lendiry(this.spd, this.d) + Math.lendiry(this.g, 90);
this.size = Math.max(this.size + this.sizeinc, 0);
this.spd += this.spdinc;
this.g += this.grav;
this.d += this.dinc;
this.r += this.rinc;
}
render() {
Draw.setAlpha(this.a);
Draw.setColor(this.c);
Draw.circle(this.x, this.y, this.size);
Draw.setAlpha(1);
}
}
const Emitter = {
depth: 0,
x: {
min: 0,
max: 100
},
y: {
min: 0,
max: 100
},
spd: {
min: 1,
max: 2
},
spdinc: {
min: 0,
max: 0
},
size: {
min: 2,
max: 8
},
sizeinc: {
min: 0,
max: 0
},
d: {
min: 0,
max: 360
},
dinc: {
min: 5,
max: 10
},
r: {
min: 0,
max: 360
},
rinc: {
min: 5,
max: 10
},
a: {
min: 1,
max: 1
},
c: C.fireBrick,
life: {
min: 3000,
max: 4000
},
grav: {
min: 0.01,
max: 0.01
},
setDepth(depth) {
this.depth = depth;
},
setArea(xmin, xmax, ymin, ymax) {
this.x.min = xmin;
this.x.max = xmax;
this.y.min = ymin;
this.y.max = ymax;
},
setSpeed(min, max) {
this.spd.min = min * SCALER.w * 0.5;
this.spd.max = max * SCALER.w * 0.5;
},
setSpeedInc(min, max) {
this.spdinc.min = min * SCALER.w * 0.5;
this.spdinc.max = max * SCALER.w * 0.5;
},
setSize(min, max) {
this.size.min = min * SCALER.w * 0.5;
this.size.max = max * SCALER.w * 0.5;
},
setSizeInc(min, max) {
this.sizeinc.min = min * SCALER.w * 0.5;
this.sizeinc.max = max * SCALER.w * 0.5;
},
setDirection(min, max) {
this.d.min = min;
this.d.max = max;
},
setDirectionInc(min, max) {
this.dinc.min = min;
this.dinc.max = max;
},
setRotation(min, max) {
this.r.min = min;
this.r.max = max;
},
setRotationInc(min, max) {
this.rinc.min = min;
this.rinc.max = max;
},
setAlpha(min, max) {
this.a.min = min;
this.a.max = max;
},
setColor(c) {
this.c = c;
},
setLife(min, max) {
this.life.min = min;
this.life.max = max;
},
setGravity(min, max) {
this.grav.min = min;
this.grav.max = max;
},
preset(s) {
switch (s) {
case 'bigstar':
this.setSpeed(4, 7);
this.setSpeedInc(-0.05, -0.05);
this.setSize(15, 22);
this.setSizeInc(-0.1, -0.1);
this.setDirection(180, 360);
this.setDirectionInc(0, 0);
this.setRotation(0, 0);
this.setRotationInc(0, 0);
this.setAlpha(0.2, 0.2);
this.setColor(C.fireBrick);
this.setLife(3000, 4000);
this.setGravity(0, 0);
break;
case 'sparkle':
this.setSpeed(2, 5);
this.setSpeedInc(-0.1, -0.1);
this.setSize(5, 10);
this.setSizeInc(-0.1, -0.1);
this.setDirection(0, 360);
this.setDirectionInc(0, 0);
this.setRotation(0, 0);
this.setRotationInc(0, 0);
this.setAlpha(1, 1);
this.setColor(C.fireBrick);
this.setLife(1000, 2000);
this.setGravity(0, 0);
break;
case 'puff':
this.setSize(3, 5);
this.setColor(C.indianRed);
break;
}
},
emit(n) {
for (let i = 0; i < n; i++) {
const n = new BranthParticle(
Math.range(this.x.min, this.x.max),
Math.range(this.y.min, this.y.max),
Math.range(this.spd.min, this.spd.max),
Math.range(this.spdinc.min, this.spdinc.max),
Math.range(this.size.min, this.size.max),
Math.range(this.sizeinc.min, this.sizeinc.max),
Math.range(this.d.min, this.d.max),
Math.range(this.dinc.min, this.dinc.max),
Math.range(this.r.min, this.r.max),
Math.range(this.rinc.min, this.rinc.max),
Math.range(this.a.min, this.a.max),
this.c,
Math.range(this.life.min, this.life.max),
Math.range(this.grav.min, this.grav.max)
);
n.depth = this.depth;
OBJ.push(BranthParticle, n);
}
}
};
const Room = {
get w() {
return CANVAS.width / CANVAS_SCALER;
},
get h() {
return CANVAS.height / CANVAS_SCALER;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
const View = {
x: 0,
y: 0,
xto: 0,
yto: 0,
alarm: -1,
interval: 0,
magnitude: 0,
shake(mag, int) {
this.magnitude = mag;
this.interval = int;
this.alarm = this.interval;
},
update() {
if (this.alarm > 0) {
const mag = this.magnitude * this.alarm / this.interval;
this.xto = Math.range(mag * 0.5, mag * 0.6) * (Math.random() > 0.5? -1 : 1);
this.yto = Math.range(mag * 0.8, mag) * (Math.random() > 0.5? -1 : 1);
this.alarm -= Time.deltaTime;
if (this.alarm <= 0) {
this.xto = 0;
this.yto = 0;
}
}
const t = 0.2;
this.x += t * (this.xto - this.x);
this.y += t * (this.yto - this.y);
}
};
const UI = {
render() {
for (const o of OBJ.list) {
for (const i of o) {
if (i) {
if (i.visible) {
i.renderUI();
}
}
}
}
}
};
const RAF = window.requestAnimationFrame
|| window.msRequestAnimationFrame
|| window.mozRequestAnimationFrame
|| window.webkitRequestAnimationFrame
|| function(f) { return setTimeout(f, Time.fixedDeltaTime) }
const BRANTH = {
start() {
document.body.appendChild(CANVAS);
window.onkeyup = (e) => Input.eventkeyup(e);
window.onkeydown = (e) => {
const keyCodes = [32, 37, 38, 39, 40];
if (keyCodes.includes(e.keyCode)) {
e.preventDefault();
}
Input.eventkeydown(e);
}
window.onresize = () => BRANTH.resize();
BRANTH.resize();
BRANTH.update();
},
update(t) {
Time.update(t);
View.update();
OBJ.update();
CTX.clearRect(0, 0, Room.w, Room.h);
OBJ.render();
UI.render();
Input.reset();
RAF(BRANTH.update);
},
resize() {
CANVAS.width = CANVAS.getBoundingClientRect().width * CANVAS_SCALER;
CANVAS.height = CANVAS.getBoundingClientRect().height * CANVAS_SCALER;
CTX.resetTransform();
CTX.scale(CANVAS_SCALER, CANVAS_SCALER);
}
};
const Tile = {
w: 20,
get h() {
return this.w * 0.5;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
const World = {
get x() {
return Room.mid.w;
},
get y() {
return Room.mid.h - this.mid.h;
},
get w() {
return Grid.c * Tile.w;
},
get h() {
return Grid.r * Tile.h;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
class Point {
constructor(x, y) {
this.x = x || 0;
this.y = y || 0;
}
}
class Line {
constructor(p1, p2) {
this.p = [p1, p2];
}
intersect(line) {
const p1 = this.p[0], p2 = this.p[1], p3 = line.p[0], p4 = line.p[1];
const s1 = new Point(p2.x - p1.x, p2.y - p1.y);
const s2 = new Point(p4.x - p3.x, p4.y - p3.y);
const s = (-s1.y * (p1.x - p3.x) + s1.x * (p1.y - p3.y)) / (-s2.x * s1.y + s1.x * s2.y);
const t = (s2.x * (p1.y - p3.y) - s2.y * (p1.x - p3.x)) / (-s2.x * s1.y + s1.x * s2.y);
if (s >= 0 && s <= 1 && t >= 0 && t <= 1) {
return new Point(p1.x + (t * s1.x), p1.y + (t * s1.y));
}
return null;
}
}
const Grid = {
g: [],
c: 30,
r: 30,
get mid() {
return {
c: this.c * 0.5,
r: this.r * 0.5
};
},
get(c, r) {
const g = new Point(
c * Tile.mid.w - r * Tile.mid.w,
r * Tile.mid.h + c * Tile.mid.h
);
return new Point(View.x + World.x + g.x, View.y + World.y + g.y);
},
tilePath(x, y) {
if (x instanceof Point) {
y = x.y;
x = x.x;
}
CTX.beginPath();
CTX.moveTo(x, y - Tile.mid.h);
CTX.lineTo(x + Tile.mid.w, y);
CTX.lineTo(x, y + Tile.mid.h);
CTX.lineTo(x - Tile.mid.w, y);
CTX.closePath();
}
};
for (let c = 0; c < Grid.c; c++) {
Grid.g.push([]);
for (let r = 0; r < Grid.r; r++) {
Grid.g[c].push(0);
}
}
class BranthGrid extends BranthObject {
constructor(c, r) {
super(0, 0);
this.c = c;
this.r = r;
}
meet(c, r) {
if (r === undefined) {
r = c.r;
c = c.c;
}
return c === this.c && r === this.r;
}
earlyUpdate() {
const b = Grid.get(this.c, this.r);
this.x = b.x;
this.y = b.y;
}
}
class Food extends BranthGrid {
start() {
this.respawn();
}
move() {
this.c = Math.floor(Math.random() * Grid.c);
this.r = Math.floor(Math.random() * Grid.r);
}
respawn() {
const s = OBJ.take(Snake)[0];
if (s) {
let i = 50;
let isMeet = true;
while (isMeet) {
this.move();
isMeet = false;
for (let i = 0; i < s.tails.length; i++) {
const t = s.tails[i];
if (this.meet(t)) {
isMeet = true;
}
for (let i = 0; i < OBJ.take(Food).length; i++) {
const a = OBJ.take(Food)[i];
this.meet(a.c, a.r);
}
}
i--;
if (i < 0) {
break;
}
}
if (isMeet) {
this.c = -3;
this.r = -3;
this.visible = false;
}
}
else {
this.move();
}
}
}
class Snake extends BranthGrid {
start() {
this.dc = 0;
this.dr = 0;
this.idle = true;
this.tails = [{
c: this.c,
r: this.r
}];
this.tailCount = 3;
this.isPressed = false;
this.moveInterval = 100;
this.alarm = this.moveInterval;
}
update() {
const keyUp = Input.keyDown(KeyCode.Up);
const keyLeft = Input.keyDown(KeyCode.Left);
const keyDown = Input.keyDown(KeyCode.Down);
const keyRight = Input.keyDown(KeyCode.Right);
if (this.idle || !this.isPressed) {
if (keyUp && this.dr === 0) {
this.dc = 0;
this.dr = -1;
this.isPressed = true;
}
else if (keyLeft && this.dc === 0) {
this.dc = -1;
this.dr = 0;
this.isPressed = true;
}
else if (keyDown && this.dr === 0) {
this.dc = 0;
this.dr = 1;
this.isPressed = true;
}
else if (keyRight && this.dc === 0) {
this.dc = 1;
this.dr = 0;
this.isPressed = true;
}
}
if (this.alarm <= 0 && this.alarm !== -1) {
if (!this.idle) {
this.c += this.dc;
this.r += this.dr;
if (this.c < 0) this.c = Grid.c - 1;
if (this.r < 0) this.r = Grid.r - 1;
if (this.c > Grid.c - 1) this.c = 0;
if (this.r > Grid.r - 1) this.r = 0;
const b = Grid.get(this.c, this.r);
for (let i = 0; i < this.tails.length; i++) {
const t = this.tails[i];
if (this.meet(t)) {
this.dc = 0;
this.dr = 0;
this.tailCount = 3;
this.idle = true;
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.setColor(C.mediumSeaGreen);
Emitter.emit(10);
Emitter.preset('puff');
Emitter.setColor(C.limeGreen);
Emitter.emit(10);
View.shake(20, 1000);
}
}
if (this.tails.length < this.tailCount + 5) {
this.tails.push({
c: this.c,
r: this.r
});
}
if (!(this.dc === 0 && this.dr === 0)) {
while (this.tails.length > this.tailCount) {
this.tails.shift();
}
}
for (let i = 0; i < OBJ.take(Food).length; i++) {
const a = OBJ.take(Food)[i];
if (a.meet(this.c, this.r)) {
this.tailCount++;
a.respawn();
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.emit(10);
Emitter.preset('puff');
Emitter.emit(10);
View.shake(8, 300);
}
}
}
else {
if (this.isPressed) {
this.tails = [{
c: this.c,
r: this.r
}];
this.idle = false;
const b = Grid.get(this.c, this.r);
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.setColor(C.mediumSeaGreen);
Emitter.emit(10);
Emitter.preset('puff');
Emitter.setColor(C.limeGreen);
Emitter.emit(10);
View.shake(8, 300);
}
}
this.isPressed = false;
this.alarm = this.moveInterval;
}
else {
this.alarm -= Time.deltaTime;
}
}
render() {
const tailsSorted = this.tails.slice();
for (let i = 0; i < OBJ.take(Food).length; i++) {
tailsSorted.push(OBJ.take(Food)[i]);
}
tailsSorted.sort((a, b) => a.r < b.r || (a.r === b.r && a.c < b.c)? -1 : 1);
for (let i = 0; i < tailsSorted.length; i++) {
const t = tailsSorted[i];
const b = Grid.get(t.c, t.r);
for (let j = 0; j < Tile.mid.h; j++) {
Grid.tilePath(b.x, b.y - j);
if (t instanceof Food) {
if (t.visible) {
Draw.setColor(j === Tile.mid.h - 1? C.indianRed : C.fireBrick);
Draw.draw();
}
}
else {
Draw.setColor(j === Tile.mid.h - 1? (this.meet(t)? 'springgreen' : C.limeGreen) : C.mediumSeaGreen);
Draw.draw();
}
}
}
}
}
class Manager extends BranthObject {
start() {
const n = new Snake(Grid.mid.c, Grid.mid.r);
OBJ.push(Snake, n);
for (let i = 0; i < 3; i++) {
OBJ.create(Food);
}
this.triggerTime = 0;
}
update() {
let keySpace = Input.keyDown(KeyCode.Space);
if (Input.keyUp(KeyCode.Space)) {
this.triggerTime = 0;
}
if (Input.keyHold(KeyCode.Space)) {
if (this.triggerTime > 600) {
keySpace = true;
}
else {
this.triggerTime += Time.deltaTime;
}
}
if (keySpace) {
if (OBJ.take(Food).length < Grid.mid.c * Grid.mid.r) {
const n = OBJ.create(Food);
const b = Grid.get(n.c, n.r);
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.emit(10);
Emitter.preset('puff');
Emitter.emit(10);
}
View.shake(8, 300);
}
}
render() {
Draw.setColor(C.black);
for (let c = 0; c < Grid.c; c++) {
for (let r = 0; r < Grid.r; r++) {
const b = Grid.get(c, r);
Grid.tilePath(b);
Draw.draw(true);
}
}
}
}
OBJ.add(Manager);
OBJ.add(Food);
OBJ.add(Snake);
OBJ.add(BranthParticle);
BRANTH.start();
OBJ.create(Manager); | {
this.id = OBJ.ID++;
this.active = true;
this.visible = true;
this.x = x;
this.y = y;
} | identifier_body |
app.js | Math.range = (min, max) => min + Math.random() * (max - min);
Math.degtorad = (d) => d * Math.PI / 180;
Math.lendirx = (l, d) => l * Math.cos(Math.degtorad(d));
Math.lendiry = (l, d) => l * Math.sin(Math.degtorad(d));
const CANVAS = document.createElement('canvas');
const CTX = CANVAS.getContext('2d');
const CANVAS_SCALER = 2;
const SCALER = {
get w() {
return CANVAS.width / 960;
},
get h() {
return CANVAS.height / 540;
}
};
CANVAS.style.backgroundImage = 'radial-gradient(darkorchid 33%, darkslateblue)';
const Time = {
time: 0,
lastTime: 0,
deltaTime: 0,
fixedDeltaTime: 1000 / 60,
update(t) {
this.lastTime = this.time || 0;
this.time = t || 0;
this.deltaTime = this.time - this.lastTime || this.fixedDeltaTime;
}
};
const KeyCode = {
Space: 32,
Left: 37,
Up: 38,
Right: 39,
Down: 40
};
class BranthKey {
constructor(keyCode) {
this.keyCode = keyCode;
this.hold = false;
this.pressed = false;
this.released = false;
}
up() {
this.hold = false;
this.released = true;
}
down() {
this.hold = true;
this.pressed = true;
}
reset() {
this.pressed = false;
this.released = false;
}
}
const Input = {
list: [[]],
reset() {
for (const i of this.list) {
for (const j of i) {
j.reset();
}
}
},
getKey(keyCode) {
for (const k of this.list[0]) {
if (k.keyCode === keyCode) {
return k;
}
}
},
keyUp(keyCode) {
return this.getKey(keyCode).released;
},
keyDown(keyCode) {
return this.getKey(keyCode).pressed;
},
keyHold(keyCode) {
return this.getKey(keyCode).hold;
},
eventkeyup(e) {
for (const k of this.list[0]) {
if (k.keyCode == e.which || k.keyCode == e.keyCode) {
k.up();
}
}
},
eventkeydown(e) {
for (const k of this.list[0]) {
if (k.keyCode == e.which || k.keyCode == e.keyCode) {
if (!k.hold) k.down();
}
}
}
};
for (const keyCode of Object.values(KeyCode)) {
Input.list[0].push(new BranthKey(keyCode));
}
const C = {
black: 'black',
darkGreen: 'darkgreen',
fireBrick: 'firebrick',
green: 'green',
indianRed: 'indianred',
limeGreen: 'limegreen',
mediumSeaGreen: 'mediumseagreen',
red: 'red',
white: 'white'
};
const Font = {
get s() {
return `${10 * SCALER.w}px`;
},
get m() {
return `${16 * SCALER.w}px`;
},
get l() {
return `${24 * SCALER.w}px`;
},
get xl() {
return `${36 * SCALER.w}px`;
},
get xxl() {
return `${48 * SCALER.w}px`;
},
get size() {
return +CTX.font.split(' ').filter(v => v.includes('px')).shift().replace('px', '');
},
get sb() {
return `bold ${this.s}`;
},
get mb() {
return `bold ${this.m}`;
},
get lb() {
return `bold ${this.l}`;
},
get xlb() {
return `bold ${this.xl}`;
},
get xxlb() {
return `bold ${this.xxl}`;
}
};
const Align = {
l: 'left',
r: 'right',
c: 'center',
t: 'top',
m: 'middle',
b: 'bottom'
};
const Draw = {
setFont(f) {
CTX.font = `${f} sans-serif`;
},
setAlpha(a) {
CTX.globalAlpha = a;
},
setColor(c) {
CTX.fillStyle = c;
CTX.strokeStyle = c;
},
setHAlign(a) {
CTX.textAlign = a;
},
setVAlign(a) {
CTX.textBaseline = a;
},
setHVAlign(h, v) {
this.setHAlign(h);
this.setVAlign(v);
},
text(x, y, text) {
CTX.fillText(text, x, y);
},
draw(outline) {
if (outline === true) {
CTX.stroke();
}
else {
CTX.fill();
}
},
rect(x, y, w, h, outline) {
CTX.beginPath();
CTX.rect(x, y, w, h);
this.draw(outline);
},
circle(x, y, r, outline) {
CTX.beginPath();
CTX.arc(x, y, r, 0, 2 * Math.PI);
this.draw(outline);
}
};
const OBJ = {
ID: 0,
list: [],
classes: [],
add(cls) {
this.list.push([]);
this.classes.push(cls);
},
get(id) {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.id === id) {
return i;
}
}
}
}
},
take(cls) {
return this.list[this.classes.indexOf(cls)];
},
push(cls, i) {
if (this.classes.includes(cls)) {
this.list[this.classes.indexOf(cls)].push(i);
i.start();
}
},
create(cls, x, y) {
const n = new cls(x, y);
this.list[this.classes.indexOf(cls)].push(n);
n.start();
return n;
},
update() {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.active) {
i.earlyUpdate();
i.update();
}
}
}
}
},
render() {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.visible) {
i.render();
}
}
}
}
},
destroy(id) {
for (const o of this.list) {
for (const i in o) {
if (o[i].id === id) {
delete o[i];
}
}
}
},
clear(cls) {
this.list[this.classes.indexOf(cls)] = [];
},
clearAll() {
for (const i in this.list) {
this.list[i] = [];
}
}
};
class BranthObject {
constructor(x, y) {
this.id = OBJ.ID++;
this.active = true;
this.visible = true;
this.x = x;
this.y = y;
}
start() {}
earlyUpdate() {}
update() {}
render() {}
renderUI() {}
}
class BranthParticle extends BranthObject {
constructor(x, y, spd, spdinc, size, sizeinc, d, dinc, r, rinc, a, c, life, grav) {
super(x, y);
this.spd = spd;
this.spdinc = spdinc;
this.size = size;
this.sizeinc = sizeinc;
this.d = d;
this.dinc = dinc;
this.r = r;
this.rinc = rinc;
this.a = a;
this.c = c;
this.life = life;
this.grav = grav;
this.g = grav;
}
update() {
this.a = Math.max(0, this.a - Time.deltaTime / this.life);
if (this.a <= 0) {
OBJ.destroy(this.id);
}
this.x += Math.lendirx(this.spd, this.d);
this.y += Math.lendiry(this.spd, this.d) + Math.lendiry(this.g, 90);
this.size = Math.max(this.size + this.sizeinc, 0);
this.spd += this.spdinc;
this.g += this.grav;
this.d += this.dinc;
this.r += this.rinc;
}
render() {
Draw.setAlpha(this.a);
Draw.setColor(this.c);
Draw.circle(this.x, this.y, this.size);
Draw.setAlpha(1);
}
}
const Emitter = {
depth: 0,
x: {
min: 0,
max: 100
},
y: {
min: 0,
max: 100
},
spd: {
min: 1,
max: 2
},
spdinc: {
min: 0,
max: 0
},
size: {
min: 2,
max: 8
},
sizeinc: {
min: 0,
max: 0
},
d: {
min: 0,
max: 360
},
dinc: {
min: 5,
max: 10
},
r: {
min: 0,
max: 360
},
rinc: {
min: 5,
max: 10
},
a: {
min: 1,
max: 1
},
c: C.fireBrick,
life: {
min: 3000,
max: 4000
},
grav: {
min: 0.01,
max: 0.01
},
setDepth(depth) {
this.depth = depth;
},
setArea(xmin, xmax, ymin, ymax) {
this.x.min = xmin;
this.x.max = xmax;
this.y.min = ymin;
this.y.max = ymax;
},
setSpeed(min, max) {
this.spd.min = min * SCALER.w * 0.5;
this.spd.max = max * SCALER.w * 0.5;
},
setSpeedInc(min, max) {
this.spdinc.min = min * SCALER.w * 0.5;
this.spdinc.max = max * SCALER.w * 0.5;
},
setSize(min, max) {
this.size.min = min * SCALER.w * 0.5;
this.size.max = max * SCALER.w * 0.5;
},
setSizeInc(min, max) {
this.sizeinc.min = min * SCALER.w * 0.5;
this.sizeinc.max = max * SCALER.w * 0.5;
},
setDirection(min, max) {
this.d.min = min;
this.d.max = max;
},
setDirectionInc(min, max) {
this.dinc.min = min;
this.dinc.max = max;
},
setRotation(min, max) {
this.r.min = min;
this.r.max = max;
},
setRotationInc(min, max) {
this.rinc.min = min;
this.rinc.max = max;
},
setAlpha(min, max) {
this.a.min = min;
this.a.max = max;
},
setColor(c) {
this.c = c;
},
setLife(min, max) {
this.life.min = min;
this.life.max = max;
},
setGravity(min, max) {
this.grav.min = min;
this.grav.max = max;
},
preset(s) {
switch (s) {
case 'bigstar':
this.setSpeed(4, 7);
this.setSpeedInc(-0.05, -0.05);
this.setSize(15, 22);
this.setSizeInc(-0.1, -0.1);
this.setDirection(180, 360);
this.setDirectionInc(0, 0);
this.setRotation(0, 0);
this.setRotationInc(0, 0);
this.setAlpha(0.2, 0.2);
this.setColor(C.fireBrick);
this.setLife(3000, 4000);
this.setGravity(0, 0);
break;
case 'sparkle':
this.setSpeed(2, 5);
this.setSpeedInc(-0.1, -0.1);
this.setSize(5, 10);
this.setSizeInc(-0.1, -0.1);
this.setDirection(0, 360);
this.setDirectionInc(0, 0);
this.setRotation(0, 0);
this.setRotationInc(0, 0);
this.setAlpha(1, 1);
this.setColor(C.fireBrick);
this.setLife(1000, 2000);
this.setGravity(0, 0);
break;
case 'puff':
this.setSize(3, 5);
this.setColor(C.indianRed);
break;
}
},
emit(n) {
for (let i = 0; i < n; i++) {
const n = new BranthParticle(
Math.range(this.x.min, this.x.max),
Math.range(this.y.min, this.y.max),
Math.range(this.spd.min, this.spd.max),
Math.range(this.spdinc.min, this.spdinc.max),
Math.range(this.size.min, this.size.max),
Math.range(this.sizeinc.min, this.sizeinc.max),
Math.range(this.d.min, this.d.max),
Math.range(this.dinc.min, this.dinc.max),
Math.range(this.r.min, this.r.max),
Math.range(this.rinc.min, this.rinc.max),
Math.range(this.a.min, this.a.max),
this.c,
Math.range(this.life.min, this.life.max),
Math.range(this.grav.min, this.grav.max)
);
n.depth = this.depth;
OBJ.push(BranthParticle, n);
}
}
};
const Room = {
get w() {
return CANVAS.width / CANVAS_SCALER;
},
get h() {
return CANVAS.height / CANVAS_SCALER;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
const View = {
x: 0,
y: 0,
xto: 0,
yto: 0,
alarm: -1,
interval: 0,
magnitude: 0,
shake(mag, int) {
this.magnitude = mag;
this.interval = int;
this.alarm = this.interval;
},
update() {
if (this.alarm > 0) {
const mag = this.magnitude * this.alarm / this.interval;
this.xto = Math.range(mag * 0.5, mag * 0.6) * (Math.random() > 0.5? -1 : 1);
this.yto = Math.range(mag * 0.8, mag) * (Math.random() > 0.5? -1 : 1);
this.alarm -= Time.deltaTime;
if (this.alarm <= 0) {
this.xto = 0;
this.yto = 0;
}
}
const t = 0.2;
this.x += t * (this.xto - this.x);
this.y += t * (this.yto - this.y);
}
};
const UI = {
render() {
for (const o of OBJ.list) {
for (const i of o) {
if (i) {
if (i.visible) |
}
}
}
}
};
const RAF = window.requestAnimationFrame
|| window.msRequestAnimationFrame
|| window.mozRequestAnimationFrame
|| window.webkitRequestAnimationFrame
|| function(f) { return setTimeout(f, Time.fixedDeltaTime) }
const BRANTH = {
start() {
document.body.appendChild(CANVAS);
window.onkeyup = (e) => Input.eventkeyup(e);
window.onkeydown = (e) => {
const keyCodes = [32, 37, 38, 39, 40];
if (keyCodes.includes(e.keyCode)) {
e.preventDefault();
}
Input.eventkeydown(e);
}
window.onresize = () => BRANTH.resize();
BRANTH.resize();
BRANTH.update();
},
update(t) {
Time.update(t);
View.update();
OBJ.update();
CTX.clearRect(0, 0, Room.w, Room.h);
OBJ.render();
UI.render();
Input.reset();
RAF(BRANTH.update);
},
resize() {
CANVAS.width = CANVAS.getBoundingClientRect().width * CANVAS_SCALER;
CANVAS.height = CANVAS.getBoundingClientRect().height * CANVAS_SCALER;
CTX.resetTransform();
CTX.scale(CANVAS_SCALER, CANVAS_SCALER);
}
};
const Tile = {
w: 20,
get h() {
return this.w * 0.5;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
const World = {
get x() {
return Room.mid.w;
},
get y() {
return Room.mid.h - this.mid.h;
},
get w() {
return Grid.c * Tile.w;
},
get h() {
return Grid.r * Tile.h;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
class Point {
constructor(x, y) {
this.x = x || 0;
this.y = y || 0;
}
}
class Line {
constructor(p1, p2) {
this.p = [p1, p2];
}
intersect(line) {
const p1 = this.p[0], p2 = this.p[1], p3 = line.p[0], p4 = line.p[1];
const s1 = new Point(p2.x - p1.x, p2.y - p1.y);
const s2 = new Point(p4.x - p3.x, p4.y - p3.y);
const s = (-s1.y * (p1.x - p3.x) + s1.x * (p1.y - p3.y)) / (-s2.x * s1.y + s1.x * s2.y);
const t = (s2.x * (p1.y - p3.y) - s2.y * (p1.x - p3.x)) / (-s2.x * s1.y + s1.x * s2.y);
if (s >= 0 && s <= 1 && t >= 0 && t <= 1) {
return new Point(p1.x + (t * s1.x), p1.y + (t * s1.y));
}
return null;
}
}
const Grid = {
g: [],
c: 30,
r: 30,
get mid() {
return {
c: this.c * 0.5,
r: this.r * 0.5
};
},
get(c, r) {
const g = new Point(
c * Tile.mid.w - r * Tile.mid.w,
r * Tile.mid.h + c * Tile.mid.h
);
return new Point(View.x + World.x + g.x, View.y + World.y + g.y);
},
tilePath(x, y) {
if (x instanceof Point) {
y = x.y;
x = x.x;
}
CTX.beginPath();
CTX.moveTo(x, y - Tile.mid.h);
CTX.lineTo(x + Tile.mid.w, y);
CTX.lineTo(x, y + Tile.mid.h);
CTX.lineTo(x - Tile.mid.w, y);
CTX.closePath();
}
};
for (let c = 0; c < Grid.c; c++) {
Grid.g.push([]);
for (let r = 0; r < Grid.r; r++) {
Grid.g[c].push(0);
}
}
class BranthGrid extends BranthObject {
constructor(c, r) {
super(0, 0);
this.c = c;
this.r = r;
}
meet(c, r) {
if (r === undefined) {
r = c.r;
c = c.c;
}
return c === this.c && r === this.r;
}
earlyUpdate() {
const b = Grid.get(this.c, this.r);
this.x = b.x;
this.y = b.y;
}
}
class Food extends BranthGrid {
start() {
this.respawn();
}
move() {
this.c = Math.floor(Math.random() * Grid.c);
this.r = Math.floor(Math.random() * Grid.r);
}
respawn() {
const s = OBJ.take(Snake)[0];
if (s) {
let i = 50;
let isMeet = true;
while (isMeet) {
this.move();
isMeet = false;
for (let i = 0; i < s.tails.length; i++) {
const t = s.tails[i];
if (this.meet(t)) {
isMeet = true;
}
for (let i = 0; i < OBJ.take(Food).length; i++) {
const a = OBJ.take(Food)[i];
this.meet(a.c, a.r);
}
}
i--;
if (i < 0) {
break;
}
}
if (isMeet) {
this.c = -3;
this.r = -3;
this.visible = false;
}
}
else {
this.move();
}
}
}
class Snake extends BranthGrid {
start() {
this.dc = 0;
this.dr = 0;
this.idle = true;
this.tails = [{
c: this.c,
r: this.r
}];
this.tailCount = 3;
this.isPressed = false;
this.moveInterval = 100;
this.alarm = this.moveInterval;
}
update() {
const keyUp = Input.keyDown(KeyCode.Up);
const keyLeft = Input.keyDown(KeyCode.Left);
const keyDown = Input.keyDown(KeyCode.Down);
const keyRight = Input.keyDown(KeyCode.Right);
if (this.idle || !this.isPressed) {
if (keyUp && this.dr === 0) {
this.dc = 0;
this.dr = -1;
this.isPressed = true;
}
else if (keyLeft && this.dc === 0) {
this.dc = -1;
this.dr = 0;
this.isPressed = true;
}
else if (keyDown && this.dr === 0) {
this.dc = 0;
this.dr = 1;
this.isPressed = true;
}
else if (keyRight && this.dc === 0) {
this.dc = 1;
this.dr = 0;
this.isPressed = true;
}
}
if (this.alarm <= 0 && this.alarm !== -1) {
if (!this.idle) {
this.c += this.dc;
this.r += this.dr;
if (this.c < 0) this.c = Grid.c - 1;
if (this.r < 0) this.r = Grid.r - 1;
if (this.c > Grid.c - 1) this.c = 0;
if (this.r > Grid.r - 1) this.r = 0;
const b = Grid.get(this.c, this.r);
for (let i = 0; i < this.tails.length; i++) {
const t = this.tails[i];
if (this.meet(t)) {
this.dc = 0;
this.dr = 0;
this.tailCount = 3;
this.idle = true;
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.setColor(C.mediumSeaGreen);
Emitter.emit(10);
Emitter.preset('puff');
Emitter.setColor(C.limeGreen);
Emitter.emit(10);
View.shake(20, 1000);
}
}
if (this.tails.length < this.tailCount + 5) {
this.tails.push({
c: this.c,
r: this.r
});
}
if (!(this.dc === 0 && this.dr === 0)) {
while (this.tails.length > this.tailCount) {
this.tails.shift();
}
}
for (let i = 0; i < OBJ.take(Food).length; i++) {
const a = OBJ.take(Food)[i];
if (a.meet(this.c, this.r)) {
this.tailCount++;
a.respawn();
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.emit(10);
Emitter.preset('puff');
Emitter.emit(10);
View.shake(8, 300);
}
}
}
else {
if (this.isPressed) {
this.tails = [{
c: this.c,
r: this.r
}];
this.idle = false;
const b = Grid.get(this.c, this.r);
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.setColor(C.mediumSeaGreen);
Emitter.emit(10);
Emitter.preset('puff');
Emitter.setColor(C.limeGreen);
Emitter.emit(10);
View.shake(8, 300);
}
}
this.isPressed = false;
this.alarm = this.moveInterval;
}
else {
this.alarm -= Time.deltaTime;
}
}
render() {
const tailsSorted = this.tails.slice();
for (let i = 0; i < OBJ.take(Food).length; i++) {
tailsSorted.push(OBJ.take(Food)[i]);
}
tailsSorted.sort((a, b) => a.r < b.r || (a.r === b.r && a.c < b.c)? -1 : 1);
for (let i = 0; i < tailsSorted.length; i++) {
const t = tailsSorted[i];
const b = Grid.get(t.c, t.r);
for (let j = 0; j < Tile.mid.h; j++) {
Grid.tilePath(b.x, b.y - j);
if (t instanceof Food) {
if (t.visible) {
Draw.setColor(j === Tile.mid.h - 1? C.indianRed : C.fireBrick);
Draw.draw();
}
}
else {
Draw.setColor(j === Tile.mid.h - 1? (this.meet(t)? 'springgreen' : C.limeGreen) : C.mediumSeaGreen);
Draw.draw();
}
}
}
}
}
class Manager extends BranthObject {
start() {
const n = new Snake(Grid.mid.c, Grid.mid.r);
OBJ.push(Snake, n);
for (let i = 0; i < 3; i++) {
OBJ.create(Food);
}
this.triggerTime = 0;
}
update() {
let keySpace = Input.keyDown(KeyCode.Space);
if (Input.keyUp(KeyCode.Space)) {
this.triggerTime = 0;
}
if (Input.keyHold(KeyCode.Space)) {
if (this.triggerTime > 600) {
keySpace = true;
}
else {
this.triggerTime += Time.deltaTime;
}
}
if (keySpace) {
if (OBJ.take(Food).length < Grid.mid.c * Grid.mid.r) {
const n = OBJ.create(Food);
const b = Grid.get(n.c, n.r);
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.emit(10);
Emitter.preset('puff');
Emitter.emit(10);
}
View.shake(8, 300);
}
}
render() {
Draw.setColor(C.black);
for (let c = 0; c < Grid.c; c++) {
for (let r = 0; r < Grid.r; r++) {
const b = Grid.get(c, r);
Grid.tilePath(b);
Draw.draw(true);
}
}
}
}
OBJ.add(Manager);
OBJ.add(Food);
OBJ.add(Snake);
OBJ.add(BranthParticle);
BRANTH.start();
OBJ.create(Manager); | {
i.renderUI();
} | conditional_block |
app.js | Math.range = (min, max) => min + Math.random() * (max - min);
Math.degtorad = (d) => d * Math.PI / 180;
Math.lendirx = (l, d) => l * Math.cos(Math.degtorad(d));
Math.lendiry = (l, d) => l * Math.sin(Math.degtorad(d));
const CANVAS = document.createElement('canvas');
const CTX = CANVAS.getContext('2d');
const CANVAS_SCALER = 2;
const SCALER = {
get w() {
return CANVAS.width / 960;
},
get h() {
return CANVAS.height / 540;
}
};
CANVAS.style.backgroundImage = 'radial-gradient(darkorchid 33%, darkslateblue)';
const Time = {
time: 0,
lastTime: 0,
deltaTime: 0,
fixedDeltaTime: 1000 / 60,
update(t) {
this.lastTime = this.time || 0;
this.time = t || 0;
this.deltaTime = this.time - this.lastTime || this.fixedDeltaTime;
}
};
const KeyCode = {
Space: 32,
Left: 37,
Up: 38,
Right: 39,
Down: 40
};
class BranthKey {
constructor(keyCode) {
this.keyCode = keyCode;
this.hold = false;
this.pressed = false;
this.released = false;
}
up() {
this.hold = false;
this.released = true;
}
down() {
this.hold = true;
this.pressed = true;
}
reset() {
this.pressed = false;
this.released = false;
}
}
const Input = {
list: [[]],
reset() {
for (const i of this.list) {
for (const j of i) {
j.reset();
}
}
},
getKey(keyCode) {
for (const k of this.list[0]) {
if (k.keyCode === keyCode) {
return k;
}
}
},
keyUp(keyCode) {
return this.getKey(keyCode).released;
},
keyDown(keyCode) {
return this.getKey(keyCode).pressed;
},
keyHold(keyCode) {
return this.getKey(keyCode).hold;
},
eventkeyup(e) {
for (const k of this.list[0]) {
if (k.keyCode == e.which || k.keyCode == e.keyCode) {
k.up();
}
}
},
eventkeydown(e) {
for (const k of this.list[0]) {
if (k.keyCode == e.which || k.keyCode == e.keyCode) {
if (!k.hold) k.down();
}
}
}
};
for (const keyCode of Object.values(KeyCode)) {
Input.list[0].push(new BranthKey(keyCode));
}
const C = {
black: 'black',
darkGreen: 'darkgreen',
fireBrick: 'firebrick',
green: 'green',
indianRed: 'indianred',
limeGreen: 'limegreen',
mediumSeaGreen: 'mediumseagreen',
red: 'red',
white: 'white'
};
const Font = {
get s() {
return `${10 * SCALER.w}px`;
},
get m() {
return `${16 * SCALER.w}px`;
},
get l() {
return `${24 * SCALER.w}px`;
},
get xl() {
return `${36 * SCALER.w}px`;
},
get xxl() {
return `${48 * SCALER.w}px`;
},
get size() {
return +CTX.font.split(' ').filter(v => v.includes('px')).shift().replace('px', '');
},
get sb() {
return `bold ${this.s}`;
},
get mb() {
return `bold ${this.m}`;
},
get lb() {
return `bold ${this.l}`;
},
get xlb() {
return `bold ${this.xl}`;
},
get xxlb() {
return `bold ${this.xxl}`;
}
};
const Align = {
l: 'left',
r: 'right',
c: 'center',
t: 'top',
m: 'middle',
b: 'bottom'
};
const Draw = {
setFont(f) {
CTX.font = `${f} sans-serif`;
},
setAlpha(a) {
CTX.globalAlpha = a;
},
setColor(c) {
CTX.fillStyle = c;
CTX.strokeStyle = c;
},
setHAlign(a) {
CTX.textAlign = a;
},
setVAlign(a) {
CTX.textBaseline = a;
},
setHVAlign(h, v) {
this.setHAlign(h);
this.setVAlign(v);
},
text(x, y, text) {
CTX.fillText(text, x, y);
},
draw(outline) {
if (outline === true) {
CTX.stroke();
}
else {
CTX.fill();
}
},
rect(x, y, w, h, outline) {
CTX.beginPath();
CTX.rect(x, y, w, h);
this.draw(outline);
},
circle(x, y, r, outline) {
CTX.beginPath();
CTX.arc(x, y, r, 0, 2 * Math.PI);
this.draw(outline);
}
};
const OBJ = {
ID: 0,
list: [],
classes: [],
add(cls) {
this.list.push([]);
this.classes.push(cls);
},
get(id) {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.id === id) {
return i;
}
}
}
}
},
take(cls) {
return this.list[this.classes.indexOf(cls)];
},
push(cls, i) {
if (this.classes.includes(cls)) {
this.list[this.classes.indexOf(cls)].push(i);
i.start();
}
},
create(cls, x, y) {
const n = new cls(x, y);
this.list[this.classes.indexOf(cls)].push(n);
n.start();
return n;
},
update() {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.active) {
i.earlyUpdate();
i.update();
}
}
}
}
},
render() {
for (const o of this.list) {
for (const i of o) {
if (i) {
if (i.visible) {
i.render();
}
}
}
}
},
destroy(id) {
for (const o of this.list) {
for (const i in o) {
if (o[i].id === id) {
delete o[i];
}
}
}
},
clear(cls) {
this.list[this.classes.indexOf(cls)] = [];
},
clearAll() {
for (const i in this.list) {
this.list[i] = [];
}
}
};
class BranthObject {
constructor(x, y) {
this.id = OBJ.ID++;
this.active = true;
this.visible = true;
this.x = x;
this.y = y;
}
start() {}
earlyUpdate() {}
update() {}
render() {}
renderUI() {}
}
class BranthParticle extends BranthObject {
constructor(x, y, spd, spdinc, size, sizeinc, d, dinc, r, rinc, a, c, life, grav) {
super(x, y);
this.spd = spd;
this.spdinc = spdinc;
this.size = size;
this.sizeinc = sizeinc;
this.d = d;
this.dinc = dinc;
this.r = r;
this.rinc = rinc;
this.a = a;
this.c = c;
this.life = life;
this.grav = grav;
this.g = grav;
}
update() {
this.a = Math.max(0, this.a - Time.deltaTime / this.life);
if (this.a <= 0) {
OBJ.destroy(this.id);
}
this.x += Math.lendirx(this.spd, this.d);
this.y += Math.lendiry(this.spd, this.d) + Math.lendiry(this.g, 90);
this.size = Math.max(this.size + this.sizeinc, 0);
this.spd += this.spdinc;
this.g += this.grav;
this.d += this.dinc;
this.r += this.rinc;
}
render() {
Draw.setAlpha(this.a);
Draw.setColor(this.c);
Draw.circle(this.x, this.y, this.size);
Draw.setAlpha(1);
}
}
const Emitter = {
depth: 0,
x: {
min: 0,
max: 100
},
y: {
min: 0,
max: 100
},
spd: {
min: 1,
max: 2
},
spdinc: {
min: 0,
max: 0
},
size: {
min: 2,
max: 8
},
sizeinc: {
min: 0,
max: 0
},
d: {
min: 0,
max: 360
},
dinc: {
min: 5,
max: 10
},
r: {
min: 0,
max: 360
},
rinc: {
min: 5,
max: 10
},
a: {
min: 1,
max: 1
},
c: C.fireBrick,
life: {
min: 3000,
max: 4000
},
grav: {
min: 0.01,
max: 0.01
},
setDepth(depth) {
this.depth = depth;
},
setArea(xmin, xmax, ymin, ymax) {
this.x.min = xmin;
this.x.max = xmax;
this.y.min = ymin;
this.y.max = ymax;
},
setSpeed(min, max) {
this.spd.min = min * SCALER.w * 0.5;
this.spd.max = max * SCALER.w * 0.5;
},
setSpeedInc(min, max) {
this.spdinc.min = min * SCALER.w * 0.5;
this.spdinc.max = max * SCALER.w * 0.5;
},
setSize(min, max) {
this.size.min = min * SCALER.w * 0.5;
this.size.max = max * SCALER.w * 0.5;
},
setSizeInc(min, max) {
this.sizeinc.min = min * SCALER.w * 0.5;
this.sizeinc.max = max * SCALER.w * 0.5;
},
setDirection(min, max) {
this.d.min = min;
this.d.max = max;
},
setDirectionInc(min, max) {
this.dinc.min = min;
this.dinc.max = max;
},
setRotation(min, max) {
this.r.min = min;
this.r.max = max;
},
setRotationInc(min, max) {
this.rinc.min = min;
this.rinc.max = max;
},
setAlpha(min, max) {
this.a.min = min;
this.a.max = max;
},
setColor(c) {
this.c = c;
},
setLife(min, max) {
this.life.min = min;
this.life.max = max;
},
setGravity(min, max) {
this.grav.min = min;
this.grav.max = max;
},
preset(s) {
switch (s) {
case 'bigstar':
this.setSpeed(4, 7);
this.setSpeedInc(-0.05, -0.05);
this.setSize(15, 22);
this.setSizeInc(-0.1, -0.1);
this.setDirection(180, 360);
this.setDirectionInc(0, 0);
this.setRotation(0, 0);
this.setRotationInc(0, 0);
this.setAlpha(0.2, 0.2);
this.setColor(C.fireBrick);
this.setLife(3000, 4000);
this.setGravity(0, 0);
break;
case 'sparkle':
this.setSpeed(2, 5);
this.setSpeedInc(-0.1, -0.1);
this.setSize(5, 10);
this.setSizeInc(-0.1, -0.1);
this.setDirection(0, 360);
this.setDirectionInc(0, 0);
this.setRotation(0, 0);
this.setRotationInc(0, 0);
this.setAlpha(1, 1);
this.setColor(C.fireBrick);
this.setLife(1000, 2000);
this.setGravity(0, 0);
break;
case 'puff':
this.setSize(3, 5);
this.setColor(C.indianRed);
break;
}
},
emit(n) {
for (let i = 0; i < n; i++) {
const n = new BranthParticle(
Math.range(this.x.min, this.x.max),
Math.range(this.y.min, this.y.max),
Math.range(this.spd.min, this.spd.max),
Math.range(this.spdinc.min, this.spdinc.max),
Math.range(this.size.min, this.size.max),
Math.range(this.sizeinc.min, this.sizeinc.max),
Math.range(this.d.min, this.d.max),
Math.range(this.dinc.min, this.dinc.max),
Math.range(this.r.min, this.r.max),
Math.range(this.rinc.min, this.rinc.max),
Math.range(this.a.min, this.a.max),
this.c,
Math.range(this.life.min, this.life.max),
Math.range(this.grav.min, this.grav.max)
);
n.depth = this.depth;
OBJ.push(BranthParticle, n);
}
}
};
const Room = {
get w() {
return CANVAS.width / CANVAS_SCALER;
},
get h() {
return CANVAS.height / CANVAS_SCALER;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
const View = {
x: 0,
y: 0,
xto: 0,
yto: 0,
alarm: -1,
interval: 0,
magnitude: 0,
shake(mag, int) {
this.magnitude = mag;
this.interval = int;
this.alarm = this.interval;
},
update() {
if (this.alarm > 0) {
const mag = this.magnitude * this.alarm / this.interval;
this.xto = Math.range(mag * 0.5, mag * 0.6) * (Math.random() > 0.5? -1 : 1);
this.yto = Math.range(mag * 0.8, mag) * (Math.random() > 0.5? -1 : 1);
this.alarm -= Time.deltaTime;
if (this.alarm <= 0) {
this.xto = 0;
this.yto = 0;
}
}
const t = 0.2;
this.x += t * (this.xto - this.x);
this.y += t * (this.yto - this.y);
}
};
const UI = {
render() {
for (const o of OBJ.list) {
for (const i of o) {
if (i) {
if (i.visible) {
i.renderUI();
}
}
}
}
}
};
const RAF = window.requestAnimationFrame
|| window.msRequestAnimationFrame
|| window.mozRequestAnimationFrame
|| window.webkitRequestAnimationFrame
|| function(f) { return setTimeout(f, Time.fixedDeltaTime) }
const BRANTH = {
start() {
document.body.appendChild(CANVAS);
window.onkeyup = (e) => Input.eventkeyup(e);
window.onkeydown = (e) => {
const keyCodes = [32, 37, 38, 39, 40];
if (keyCodes.includes(e.keyCode)) {
e.preventDefault();
}
Input.eventkeydown(e);
}
window.onresize = () => BRANTH.resize();
BRANTH.resize();
BRANTH.update();
},
update(t) {
Time.update(t);
View.update();
OBJ.update();
CTX.clearRect(0, 0, Room.w, Room.h);
OBJ.render();
UI.render();
Input.reset();
RAF(BRANTH.update);
},
resize() {
CANVAS.width = CANVAS.getBoundingClientRect().width * CANVAS_SCALER;
CANVAS.height = CANVAS.getBoundingClientRect().height * CANVAS_SCALER;
CTX.resetTransform();
CTX.scale(CANVAS_SCALER, CANVAS_SCALER);
}
};
const Tile = {
w: 20,
get h() {
return this.w * 0.5;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
const World = {
get x() {
return Room.mid.w;
},
get y() {
return Room.mid.h - this.mid.h;
},
get w() {
return Grid.c * Tile.w;
},
get h() {
return Grid.r * Tile.h;
},
get mid() {
return {
w: this.w * 0.5,
h: this.h * 0.5
};
}
};
class Point {
constructor(x, y) {
this.x = x || 0;
this.y = y || 0;
}
}
class Line {
constructor(p1, p2) {
this.p = [p1, p2];
}
intersect(line) {
const p1 = this.p[0], p2 = this.p[1], p3 = line.p[0], p4 = line.p[1];
const s1 = new Point(p2.x - p1.x, p2.y - p1.y);
const s2 = new Point(p4.x - p3.x, p4.y - p3.y);
const s = (-s1.y * (p1.x - p3.x) + s1.x * (p1.y - p3.y)) / (-s2.x * s1.y + s1.x * s2.y);
const t = (s2.x * (p1.y - p3.y) - s2.y * (p1.x - p3.x)) / (-s2.x * s1.y + s1.x * s2.y);
if (s >= 0 && s <= 1 && t >= 0 && t <= 1) {
return new Point(p1.x + (t * s1.x), p1.y + (t * s1.y));
}
return null;
}
}
const Grid = {
g: [],
c: 30,
r: 30,
get mid() {
return {
c: this.c * 0.5,
r: this.r * 0.5
};
},
get(c, r) {
const g = new Point(
c * Tile.mid.w - r * Tile.mid.w,
r * Tile.mid.h + c * Tile.mid.h
);
return new Point(View.x + World.x + g.x, View.y + World.y + g.y);
},
tilePath(x, y) {
if (x instanceof Point) {
y = x.y;
x = x.x;
}
CTX.beginPath();
CTX.moveTo(x, y - Tile.mid.h);
CTX.lineTo(x + Tile.mid.w, y);
CTX.lineTo(x, y + Tile.mid.h);
CTX.lineTo(x - Tile.mid.w, y);
CTX.closePath();
}
};
for (let c = 0; c < Grid.c; c++) {
Grid.g.push([]);
for (let r = 0; r < Grid.r; r++) {
Grid.g[c].push(0);
}
}
class BranthGrid extends BranthObject {
constructor(c, r) {
super(0, 0);
this.c = c;
this.r = r;
}
meet(c, r) {
if (r === undefined) {
r = c.r;
c = c.c;
}
return c === this.c && r === this.r;
}
earlyUpdate() {
const b = Grid.get(this.c, this.r);
this.x = b.x;
this.y = b.y;
}
}
class Food extends BranthGrid {
start() {
this.respawn();
}
move() {
this.c = Math.floor(Math.random() * Grid.c);
this.r = Math.floor(Math.random() * Grid.r);
}
respawn() {
const s = OBJ.take(Snake)[0];
if (s) {
let i = 50; | while (isMeet) {
this.move();
isMeet = false;
for (let i = 0; i < s.tails.length; i++) {
const t = s.tails[i];
if (this.meet(t)) {
isMeet = true;
}
for (let i = 0; i < OBJ.take(Food).length; i++) {
const a = OBJ.take(Food)[i];
this.meet(a.c, a.r);
}
}
i--;
if (i < 0) {
break;
}
}
if (isMeet) {
this.c = -3;
this.r = -3;
this.visible = false;
}
}
else {
this.move();
}
}
}
class Snake extends BranthGrid {
start() {
this.dc = 0;
this.dr = 0;
this.idle = true;
this.tails = [{
c: this.c,
r: this.r
}];
this.tailCount = 3;
this.isPressed = false;
this.moveInterval = 100;
this.alarm = this.moveInterval;
}
update() {
const keyUp = Input.keyDown(KeyCode.Up);
const keyLeft = Input.keyDown(KeyCode.Left);
const keyDown = Input.keyDown(KeyCode.Down);
const keyRight = Input.keyDown(KeyCode.Right);
if (this.idle || !this.isPressed) {
if (keyUp && this.dr === 0) {
this.dc = 0;
this.dr = -1;
this.isPressed = true;
}
else if (keyLeft && this.dc === 0) {
this.dc = -1;
this.dr = 0;
this.isPressed = true;
}
else if (keyDown && this.dr === 0) {
this.dc = 0;
this.dr = 1;
this.isPressed = true;
}
else if (keyRight && this.dc === 0) {
this.dc = 1;
this.dr = 0;
this.isPressed = true;
}
}
if (this.alarm <= 0 && this.alarm !== -1) {
if (!this.idle) {
this.c += this.dc;
this.r += this.dr;
if (this.c < 0) this.c = Grid.c - 1;
if (this.r < 0) this.r = Grid.r - 1;
if (this.c > Grid.c - 1) this.c = 0;
if (this.r > Grid.r - 1) this.r = 0;
const b = Grid.get(this.c, this.r);
for (let i = 0; i < this.tails.length; i++) {
const t = this.tails[i];
if (this.meet(t)) {
this.dc = 0;
this.dr = 0;
this.tailCount = 3;
this.idle = true;
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.setColor(C.mediumSeaGreen);
Emitter.emit(10);
Emitter.preset('puff');
Emitter.setColor(C.limeGreen);
Emitter.emit(10);
View.shake(20, 1000);
}
}
if (this.tails.length < this.tailCount + 5) {
this.tails.push({
c: this.c,
r: this.r
});
}
if (!(this.dc === 0 && this.dr === 0)) {
while (this.tails.length > this.tailCount) {
this.tails.shift();
}
}
for (let i = 0; i < OBJ.take(Food).length; i++) {
const a = OBJ.take(Food)[i];
if (a.meet(this.c, this.r)) {
this.tailCount++;
a.respawn();
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.emit(10);
Emitter.preset('puff');
Emitter.emit(10);
View.shake(8, 300);
}
}
}
else {
if (this.isPressed) {
this.tails = [{
c: this.c,
r: this.r
}];
this.idle = false;
const b = Grid.get(this.c, this.r);
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.setColor(C.mediumSeaGreen);
Emitter.emit(10);
Emitter.preset('puff');
Emitter.setColor(C.limeGreen);
Emitter.emit(10);
View.shake(8, 300);
}
}
this.isPressed = false;
this.alarm = this.moveInterval;
}
else {
this.alarm -= Time.deltaTime;
}
}
render() {
const tailsSorted = this.tails.slice();
for (let i = 0; i < OBJ.take(Food).length; i++) {
tailsSorted.push(OBJ.take(Food)[i]);
}
tailsSorted.sort((a, b) => a.r < b.r || (a.r === b.r && a.c < b.c)? -1 : 1);
for (let i = 0; i < tailsSorted.length; i++) {
const t = tailsSorted[i];
const b = Grid.get(t.c, t.r);
for (let j = 0; j < Tile.mid.h; j++) {
Grid.tilePath(b.x, b.y - j);
if (t instanceof Food) {
if (t.visible) {
Draw.setColor(j === Tile.mid.h - 1? C.indianRed : C.fireBrick);
Draw.draw();
}
}
else {
Draw.setColor(j === Tile.mid.h - 1? (this.meet(t)? 'springgreen' : C.limeGreen) : C.mediumSeaGreen);
Draw.draw();
}
}
}
}
}
class Manager extends BranthObject {
start() {
const n = new Snake(Grid.mid.c, Grid.mid.r);
OBJ.push(Snake, n);
for (let i = 0; i < 3; i++) {
OBJ.create(Food);
}
this.triggerTime = 0;
}
update() {
let keySpace = Input.keyDown(KeyCode.Space);
if (Input.keyUp(KeyCode.Space)) {
this.triggerTime = 0;
}
if (Input.keyHold(KeyCode.Space)) {
if (this.triggerTime > 600) {
keySpace = true;
}
else {
this.triggerTime += Time.deltaTime;
}
}
if (keySpace) {
if (OBJ.take(Food).length < Grid.mid.c * Grid.mid.r) {
const n = OBJ.create(Food);
const b = Grid.get(n.c, n.r);
Emitter.setArea(b.x, b.x, b.y, b.y);
Emitter.preset('sparkle');
Emitter.emit(10);
Emitter.preset('puff');
Emitter.emit(10);
}
View.shake(8, 300);
}
}
render() {
Draw.setColor(C.black);
for (let c = 0; c < Grid.c; c++) {
for (let r = 0; r < Grid.r; r++) {
const b = Grid.get(c, r);
Grid.tilePath(b);
Draw.draw(true);
}
}
}
}
OBJ.add(Manager);
OBJ.add(Food);
OBJ.add(Snake);
OBJ.add(BranthParticle);
BRANTH.start();
OBJ.create(Manager); | let isMeet = true; | random_line_split |
app.go | // Copyright ©2017 The go-hep Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fwk
import (
"context"
"fmt"
"io"
"math"
"reflect"
"runtime"
"sort"
"time"
"go-hep.org/x/hep/fwk/fsm"
)
type appmgr struct {
state fsm.State
name string
props map[string]map[string]interface{}
dflow *dflowsvc
store *datastore
msg msgstream
evtmax int64
nprocs int
comps map[string]Component
tsks []Task
svcs []Svc
istream Task
ctxs [2][]ctxType
}
// NewApp creates a (default) fwk application with (default and) sensible options.
func NewApp() App {
var err error
var app *appmgr
const appname = "app"
app = &appmgr{
state: fsm.Undefined,
name: appname,
props: make(map[string]map[string]interface{}),
dflow: nil,
store: nil,
msg: newMsgStream(
appname,
LvlInfo,
//LvlDebug,
//LvlError,
nil,
),
evtmax: -1,
nprocs: -1,
comps: make(map[string]Component),
tsks: make([]Task, 0),
svcs: make([]Svc, 0),
}
svc, err := app.New("go-hep.org/x/hep/fwk.datastore", "evtstore")
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create evtstore: %w\n", err)
return nil
}
app.store = svc.(*datastore)
err = app.AddSvc(app.store)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create evtstore: %w\n", err)
return nil
}
svc, err = app.New("go-hep.org/x/hep/fwk.dflowsvc", "dataflow")
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create dataflow svc: %w\n", err)
return nil
}
app.dflow = svc.(*dflowsvc)
err = app.AddSvc(app.dflow)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create dataflow svc: %w\n", err)
return nil
}
err = app.DeclProp(app, "EvtMax", &app.evtmax)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'EvtMax': %w\n", err)
return nil
}
err = app.DeclProp(app, "NProcs", &app.nprocs)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'NProcs': %w\n", err)
return nil
}
err = app.DeclProp(app, "MsgLevel", &app.msg.lvl)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'MsgLevel': %w\n", err)
return nil
}
return app
}
// Type returns the fully qualified type of this application
func (app *appmgr) Type() string {
return "go-hep.org/x/hep/fwk.appmgr"
}
// Name returns the name of this application
func (app *appmgr) Name() string {
return app.name
}
func (app *appmgr) Component(n string) Component {
c, ok := app.comps[n]
if !ok {
return nil
}
return c
}
func (app *appmgr) addComponent(c Component) error {
app.comps[c.Name()] = c
return nil
}
func (app *appmgr) HasComponent(n string) bool {
_, ok := app.comps[n]
return ok
}
func (app *appmgr) Components() []Component {
comps := make([]Component, 0, len(app.comps))
for _, c := range app.comps {
comps = append(comps, c)
}
return comps
}
func (app *appmgr) AddTask(tsk Task) error {
var err error
app.tsks = append(app.tsks, tsk)
app.comps[tsk.Name()] = tsk
return err
}
func (app *appmgr) DelTask(tsk Task) error {
var err error
tsks := make([]Task, 0, len(app.tsks))
for _, t := range app.tsks {
if t.Name() != tsk.Name() {
tsks = append(tsks, t)
}
}
app.tsks = tsks
return err
}
func (app *appmgr) HasTask(n string) bool {
for _, t := range app.tsks {
if t.Name() == n {
return true
}
}
return false
}
func (app *appmgr) GetTask(n string) Task {
for _, t := range app.tsks {
if t.Name() == n {
return t
}
}
return nil
}
func (app *appmgr) Tasks() []Task {
return app.tsks
}
func (app *appmgr) AddSvc(svc Svc) error {
var err error
app.svcs = append(app.svcs, svc)
app.comps[svc.Name()] = svc
return err
}
func (app *appmgr) DelSvc(svc Svc) error {
var err error
svcs := make([]Svc, 0, len(app.svcs))
for _, s := range app.svcs {
if s.Name() != svc.Name() {
svcs = append(svcs, s)
}
}
app.svcs = svcs
return err
}
func (app *appmgr) HasSvc(n string) bool {
for _, s := range app.svcs {
if s.Name() == n {
return true
}
}
return false
}
func (app *appmgr) GetSvc(n string) Svc {
for _, s := range app.svcs {
if s.Name() == n {
return s
}
}
return nil
}
func (app *appmgr) Svcs() []Svc {
return app.svcs
}
func (app *appmgr) DeclProp(c Component, name string, ptr interface{}) error {
cname := c.Name()
_, ok := app.props[cname]
if !ok {
app.props[cname] = make(map[string]interface{})
}
switch reflect.TypeOf(ptr).Kind() {
case reflect.Ptr:
// ok
default:
return fmt.Errorf(
"fwk.DeclProp: component [%s] didn't pass a pointer for the property [%s] (type=%T)",
c.Name(),
name,
ptr,
)
}
app.props[cname][name] = ptr
return nil
}
func (app *appmgr) SetProp(c Component, name string, value interface{}) error {
cname := c.Name()
m, ok := app.props[cname]
if !ok {
return fmt.Errorf(
"fwk.SetProp: component [%s] didn't declare any property",
c.Name(),
)
}
rv := reflect.ValueOf(value)
rt := rv.Type()
ptr := reflect.ValueOf(m[name])
dst := ptr.Elem().Type()
if !rt.AssignableTo(dst) {
return fmt.Errorf(
"fwk.SetProp: component [%s] has property [%s] with type [%s]. got value=%v (type=%s)",
c.Name(),
name,
dst.Name(),
value,
rt.Name(),
)
}
ptr.Elem().Set(rv)
return nil
}
func (app *appmgr) GetProp(c Component, name string) (interface{}, error) {
cname := c.Name()
m, ok := app.props[cname]
if !ok {
return nil, fmt.Errorf(
"fwk.GetProp: component [%s] didn't declare any property",
c.Name(),
)
}
ptr, ok := m[name]
if !ok {
return nil, fmt.Errorf(
"fwk.GetProp: component [%s] didn't declare any property with name [%s]",
c.Name(),
name,
)
}
v := reflect.Indirect(reflect.ValueOf(ptr)).Interface()
return v, nil
}
func (app *appmgr) HasProp(c Component, name string) bool {
cname := c.Name()
_, ok := app.props[cname]
if !ok {
return ok
}
_, ok = app.props[cname][name]
return ok
}
func (app *appmgr) DeclInPort(c Component, name string, t reflect.Type) error {
if app.state < fsm.Configuring {
return fmt.Errorf(
"fwk.DeclInPort: invalid App state (%s). put the DeclInPort in Configure() of %s:%s",
app.state,
c.Type(),
c.Name(),
)
}
return app.dflow.addInNode(c.Name(), name, t)
}
func (app *appmgr) DeclOutPort(c Component, name string, t reflect.Type) error {
if app.state < fsm.Configuring {
return fmt.Errorf(
"fwk.DeclOutPort: invalid App state (%s). put the DeclInPort in Configure() of %s:%s",
app.state,
c.Type(),
c.Name(),
)
}
return app.dflow.addOutNode(c.Name(), name, t)
}
func (app *appmgr) FSMState() fsm.State {
return app.state
}
func (app *appmgr) Run() error {
var err error
ctx := ctxType{
id: 0,
slot: 0,
store: nil,
msg: newMsgStream("<root>", app.msg.lvl, nil),
mgr: app,
}
start := time.Now()
var mstart runtime.MemStats
runtime.ReadMemStats(&mstart)
if app.state == fsm.Undefined {
err = app.configure(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Configured {
err = app.start(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Started {
err = app.run(ctx)
if err != nil && err != io.EOF {
return err
}
}
if app.state == fsm.Running {
err = app.stop(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Stopped {
err = app.shutdown(ctx)
if err != nil {
return err
}
}
app.msg.Infof("cpu: %v\n", time.Since(start))
var mdone runtime.MemStats
runtime.ReadMemStats(&mdone)
diff := func(v1, v0 uint64) int64 {
if v0 > v1 {
return -int64(v0 - v1)
}
return int64(v1 - v0)
}
app.msg.Infof("mem: alloc: %10d kB\n", diff(mdone.Alloc, mstart.Alloc)/1024)
app.msg.Infof("mem: tot-alloc: %10d kB\n", diff(mdone.TotalAlloc, mstart.TotalAlloc)/1024)
app.msg.Infof("mem: n-mallocs: %10d\n", diff(mdone.Mallocs, mstart.Mallocs))
app.msg.Infof("mem: n-frees: %10d\n", diff(mdone.Frees, mstart.Frees))
app.msg.Infof("mem: gc-pauses: %10d ms\n", diff(mdone.PauseTotalNs, mstart.PauseTotalNs)/1000000)
return err
}
func (app *appmgr) Scripter() Scripter {
return &irunner{app}
}
func (app *appmgr) configure(ctx Context) error {
var err error
defer app.msg.flush()
app.msg.Debugf("configure...\n")
app.state = fsm.Configuring
if app.evtmax == -1 {
app.evtmax = math.MaxInt64
}
if app.nprocs < 0 {
app.nprocs = runtime.NumCPU()
}
tsks := make([]ctxType, len(app.tsks))
for j, tsk := range app.tsks {
tsks[j] = ctxType{
id: -1,
slot: 0,
store: app.store,
msg: newMsgStream(tsk.Name(), app.msg.lvl, nil),
mgr: app,
}
}
svcs := make([]ctxType, len(app.svcs))
for j, svc := range app.svcs {
svcs[j] = ctxType{
id: -1,
slot: 0,
store: app.store,
msg: newMsgStream(svc.Name(), app.msg.lvl, nil),
mgr: app,
}
}
for i, svc := range app.svcs {
app.msg.Debugf("configuring [%s]...\n", svc.Name())
cfg, ok := svc.(Configurer)
if !ok {
continue
}
err = cfg.Configure(svcs[i])
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
app.msg.Debugf("configuring [%s]...\n", tsk.Name())
cfg, ok := tsk.(Configurer)
if !ok {
continue
}
err = cfg.Configure(tsks[i])
if err != nil {
return err
}
}
err = app.printDataFlow()
if err != nil {
return err
}
app.ctxs[0] = tsks
app.ctxs[1] = svcs
app.state = fsm.Configured
app.msg.Debugf("configure... [done]\n")
return err
}
func (app *appmgr) start(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Starting
for i, svc := range app.svcs {
app.msg.Debugf("starting [%s]...\n", svc.Name())
err = svc.StartSvc(app.ctxs[1][i])
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
app.msg.Debugf("starting [%s]...\n", tsk.Name())
err = tsk.StartTask(app.ctxs[0][i])
if err != nil {
return err
}
}
app.state = fsm.Started
return err
}
func (app *appmgr) run(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Running
maxprocs := runtime.GOMAXPROCS(app.nprocs)
switch app.nprocs {
case 0:
err = app.runSequential(ctx)
default:
err = app.runConcurrent(ctx)
}
runtime.GOMAXPROCS(maxprocs)
return err
}
func (app *appmgr) runSequential(ctx Context) error {
var err error
runctx, runCancel := context.WithCancel(context.Background())
defer runCancel()
keys := app.dflow.keys()
ctxs := make([]ctxType, len(app.tsks))
store := *app.store
for j, tsk := range app.tsks {
ctxs[j] = ctxType{
id: -1,
slot: 0,
store: &store,
msg: newMsgStream(tsk.Name(), app.msg.lvl, nil),
mgr: app,
}
}
ictrl, err := app.startInputStream()
if err != nil {
return err
}
defer close(ictrl.Quit)
octrl, err := app.startOutputStreams()
if err != nil {
return err
}
defer close(octrl.Quit)
for ievt := int64(0); ievt < app.evtmax; ievt++ {
evtctx, evtCancel := context.WithCancel(runctx)
app.msg.Infof(">>> running evt=%d...\n", ievt)
err = store.reset(keys)
if err != nil {
evtCancel()
return err
}
err = app.istream.Process(ctxs[0])
if err != nil {
evtCancel()
store.close()
app.msg.flush()
return err
}
run := taskrunner{
ievt: ievt,
errc: make(chan error, len(app.tsks)),
evtctx: evtctx,
}
for i, tsk := range app.tsks {
go run.run(i, ctxs[i], tsk)
}
ndone := 0
errloop:
for err = range run.errc {
ndone++
if err != nil {
evtCancel()
store.close()
app.msg.flush()
return err
}
if ndone == len(app.tsks) {
break errloop
}
}
evtCancel()
store.close()
app.msg.flush()
}
return err
}
func (app *appmgr) runConcurrent(ctx Context) error {
var err error
runctx, runCancel := context.WithCancel(context.Background())
defer runCancel()
ctrl := workercontrol{
evts: make(chan ctxType, 2*app.nprocs),
done: make(chan struct{}),
errc: make(chan error),
runctx: runctx,
}
istream, err := app.startInputStream()
if err != nil {
return err
}
defer close(istream.Quit)
ostream, err := app.startOutputStreams()
if err != nil {
return err
}
defer close(ostream.Quit)
workers := make([]worker, app.nprocs)
for i := 0; i < app.nprocs; i++ {
workers[i] = *newWorker(i, app, &ctrl)
}
go func() {
keys := app.dflow.keys()
msg := newMsgStream(app.istream.Name(), app.msg.lvl, nil)
for ievt := int64(0); ievt < app.evtmax; ievt++ {
evtctx, evtCancel := context.WithCancel(runctx)
store := *app.store
store.store = make(map[string]achan, len(keys))
err := store.reset(keys)
if err != nil {
evtCancel()
close(ctrl.evts)
ctrl.errc <- err
return
}
ctx := ctxType{
id: ievt,
slot: 0,
store: &store,
msg: msg,
mgr: nil, // nobody's supposed to access mgr's state during event-loop
ctx: evtctx,
}
err = app.istream.Process(ctx)
if err != nil {
if err != io.EOF {
ctrl.errc <- err
}
close(ctrl.evts)
evtCancel()
return
}
ctrl.evts <- ctx
evtCancel()
}
close(ctrl.evts)
}()
ndone := 0
ctrl:
for {
select {
case eworker, ok := <-ctrl.errc:
if !ok {
continue
}
if eworker != nil && err == nil {
// only record first error.
// FIXME(sbinet) record all of them (errstack)
err = eworker
}
case <-runctx.Done():
return runctx.Err()
case <-ctrl.done:
ndone++
app.msg.Infof("workers done: %d/%d\n", ndone, app.nprocs)
if ndone == len(workers) {
break ctrl
}
}
}
return err
}
func (app *appmgr) startInputStream() (StreamControl, error) {
var err error
ctrl := StreamControl{
Ctx: make(chan Context),
Err: make(chan error), // FIXME: impl. back-pressure
Quit: make(chan struct{}),
}
idx := -1
inputs := make([]*InputStream, 0, len(app.tsks))
// collect input streams
for i, tsk := range app.tsks {
in, ok := tsk.(*InputStream)
if !ok {
continue
}
inputs = append(inputs, in)
idx = i
}
switch len(inputs) {
case 0:
// create an event "pumper"
tsk := &inputStream{NewTask("fwk.inputStream", "app-evtloop", app)}
app.istream = tsk
case 1:
app.istream = inputs[0]
app.tsks = append(app.tsks[:idx], app.tsks[idx+1:]...)
err := inputs[0].connect(ctrl)
if err != nil {
return ctrl, err
}
default:
return ctrl, fmt.Errorf("found more than one InputStream! (n=%d)", len(inputs))
}
return ctrl, err
}
func (app *appmgr) startOutputStreams() (StreamControl, error) {
var err error
ctrl := StreamControl{
Ctx: make(chan Context),
Err: make(chan error), // FIXME: impl. back-pressure
Quit: make(chan struct{}),
}
// start output streams
for _, tsk := range app.tsks {
in, ok := tsk.(*OutputStream)
if !ok {
continue
}
err = in.connect(ctrl)
if err != nil {
return ctrl, err
}
}
return ctrl, err
}
func (app *appmgr) stop(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Stopping
if app.istream != nil {
err = app.istream.StopTask(ctx)
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
err = tsk.StopTask(app.ctxs[0][i])
if err != nil {
return err
}
}
for i, svc := range app.svcs {
err = svc.StopSvc(app.ctxs[1][i])
if err != nil {
return err
}
}
app.state = fsm.Stopped
return err
}
func (app *appmgr) shutdown(ctx Context) error { |
func (app *appmgr) Msg() MsgStream {
return app.msg
}
func (app *appmgr) printDataFlow() error {
var err error
app.msg.Debugf(">>> --- [data flow] --- nodes...\n")
for tsk, node := range app.dflow.nodes {
app.msg.Debugf(">>> ---[%s]---\n", tsk)
app.msg.Debugf(" in: %v\n", node.in)
app.msg.Debugf(" out: %v\n", node.out)
}
app.msg.Debugf(">>> --- [data flow] --- edges...\n")
edges := make([]string, 0, len(app.dflow.edges))
for n := range app.dflow.edges {
edges = append(edges, n)
}
sort.Strings(edges)
app.msg.Debugf(" edges: %v\n", edges)
return err
}
func init() {
Register(
reflect.TypeOf(appmgr{}),
func(t, name string, mgr App) (Component, error) {
app := NewApp().(*appmgr)
app.name = name
return app, nil
},
)
}
// EOF
|
var err error
defer app.msg.flush()
app.comps = nil
app.tsks = nil
app.svcs = nil
app.state = fsm.Offline
app.props = nil
app.dflow = nil
app.store = nil
return err
}
| identifier_body |
app.go | // Copyright ©2017 The go-hep Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fwk
import (
"context"
"fmt"
"io"
"math"
"reflect"
"runtime"
"sort"
"time"
|
type appmgr struct {
state fsm.State
name string
props map[string]map[string]interface{}
dflow *dflowsvc
store *datastore
msg msgstream
evtmax int64
nprocs int
comps map[string]Component
tsks []Task
svcs []Svc
istream Task
ctxs [2][]ctxType
}
// NewApp creates a (default) fwk application with (default and) sensible options.
func NewApp() App {
var err error
var app *appmgr
const appname = "app"
app = &appmgr{
state: fsm.Undefined,
name: appname,
props: make(map[string]map[string]interface{}),
dflow: nil,
store: nil,
msg: newMsgStream(
appname,
LvlInfo,
//LvlDebug,
//LvlError,
nil,
),
evtmax: -1,
nprocs: -1,
comps: make(map[string]Component),
tsks: make([]Task, 0),
svcs: make([]Svc, 0),
}
svc, err := app.New("go-hep.org/x/hep/fwk.datastore", "evtstore")
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create evtstore: %w\n", err)
return nil
}
app.store = svc.(*datastore)
err = app.AddSvc(app.store)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create evtstore: %w\n", err)
return nil
}
svc, err = app.New("go-hep.org/x/hep/fwk.dflowsvc", "dataflow")
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create dataflow svc: %w\n", err)
return nil
}
app.dflow = svc.(*dflowsvc)
err = app.AddSvc(app.dflow)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create dataflow svc: %w\n", err)
return nil
}
err = app.DeclProp(app, "EvtMax", &app.evtmax)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'EvtMax': %w\n", err)
return nil
}
err = app.DeclProp(app, "NProcs", &app.nprocs)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'NProcs': %w\n", err)
return nil
}
err = app.DeclProp(app, "MsgLevel", &app.msg.lvl)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'MsgLevel': %w\n", err)
return nil
}
return app
}
// Type returns the fully qualified type of this application
func (app *appmgr) Type() string {
return "go-hep.org/x/hep/fwk.appmgr"
}
// Name returns the name of this application
func (app *appmgr) Name() string {
return app.name
}
func (app *appmgr) Component(n string) Component {
c, ok := app.comps[n]
if !ok {
return nil
}
return c
}
func (app *appmgr) addComponent(c Component) error {
app.comps[c.Name()] = c
return nil
}
func (app *appmgr) HasComponent(n string) bool {
_, ok := app.comps[n]
return ok
}
func (app *appmgr) Components() []Component {
comps := make([]Component, 0, len(app.comps))
for _, c := range app.comps {
comps = append(comps, c)
}
return comps
}
func (app *appmgr) AddTask(tsk Task) error {
var err error
app.tsks = append(app.tsks, tsk)
app.comps[tsk.Name()] = tsk
return err
}
func (app *appmgr) DelTask(tsk Task) error {
var err error
tsks := make([]Task, 0, len(app.tsks))
for _, t := range app.tsks {
if t.Name() != tsk.Name() {
tsks = append(tsks, t)
}
}
app.tsks = tsks
return err
}
func (app *appmgr) HasTask(n string) bool {
for _, t := range app.tsks {
if t.Name() == n {
return true
}
}
return false
}
func (app *appmgr) GetTask(n string) Task {
for _, t := range app.tsks {
if t.Name() == n {
return t
}
}
return nil
}
func (app *appmgr) Tasks() []Task {
return app.tsks
}
func (app *appmgr) AddSvc(svc Svc) error {
var err error
app.svcs = append(app.svcs, svc)
app.comps[svc.Name()] = svc
return err
}
func (app *appmgr) DelSvc(svc Svc) error {
var err error
svcs := make([]Svc, 0, len(app.svcs))
for _, s := range app.svcs {
if s.Name() != svc.Name() {
svcs = append(svcs, s)
}
}
app.svcs = svcs
return err
}
func (app *appmgr) HasSvc(n string) bool {
for _, s := range app.svcs {
if s.Name() == n {
return true
}
}
return false
}
func (app *appmgr) GetSvc(n string) Svc {
for _, s := range app.svcs {
if s.Name() == n {
return s
}
}
return nil
}
func (app *appmgr) Svcs() []Svc {
return app.svcs
}
func (app *appmgr) DeclProp(c Component, name string, ptr interface{}) error {
cname := c.Name()
_, ok := app.props[cname]
if !ok {
app.props[cname] = make(map[string]interface{})
}
switch reflect.TypeOf(ptr).Kind() {
case reflect.Ptr:
// ok
default:
return fmt.Errorf(
"fwk.DeclProp: component [%s] didn't pass a pointer for the property [%s] (type=%T)",
c.Name(),
name,
ptr,
)
}
app.props[cname][name] = ptr
return nil
}
func (app *appmgr) SetProp(c Component, name string, value interface{}) error {
cname := c.Name()
m, ok := app.props[cname]
if !ok {
return fmt.Errorf(
"fwk.SetProp: component [%s] didn't declare any property",
c.Name(),
)
}
rv := reflect.ValueOf(value)
rt := rv.Type()
ptr := reflect.ValueOf(m[name])
dst := ptr.Elem().Type()
if !rt.AssignableTo(dst) {
return fmt.Errorf(
"fwk.SetProp: component [%s] has property [%s] with type [%s]. got value=%v (type=%s)",
c.Name(),
name,
dst.Name(),
value,
rt.Name(),
)
}
ptr.Elem().Set(rv)
return nil
}
func (app *appmgr) GetProp(c Component, name string) (interface{}, error) {
cname := c.Name()
m, ok := app.props[cname]
if !ok {
return nil, fmt.Errorf(
"fwk.GetProp: component [%s] didn't declare any property",
c.Name(),
)
}
ptr, ok := m[name]
if !ok {
return nil, fmt.Errorf(
"fwk.GetProp: component [%s] didn't declare any property with name [%s]",
c.Name(),
name,
)
}
v := reflect.Indirect(reflect.ValueOf(ptr)).Interface()
return v, nil
}
func (app *appmgr) HasProp(c Component, name string) bool {
cname := c.Name()
_, ok := app.props[cname]
if !ok {
return ok
}
_, ok = app.props[cname][name]
return ok
}
func (app *appmgr) DeclInPort(c Component, name string, t reflect.Type) error {
if app.state < fsm.Configuring {
return fmt.Errorf(
"fwk.DeclInPort: invalid App state (%s). put the DeclInPort in Configure() of %s:%s",
app.state,
c.Type(),
c.Name(),
)
}
return app.dflow.addInNode(c.Name(), name, t)
}
func (app *appmgr) DeclOutPort(c Component, name string, t reflect.Type) error {
if app.state < fsm.Configuring {
return fmt.Errorf(
"fwk.DeclOutPort: invalid App state (%s). put the DeclInPort in Configure() of %s:%s",
app.state,
c.Type(),
c.Name(),
)
}
return app.dflow.addOutNode(c.Name(), name, t)
}
func (app *appmgr) FSMState() fsm.State {
return app.state
}
func (app *appmgr) Run() error {
var err error
ctx := ctxType{
id: 0,
slot: 0,
store: nil,
msg: newMsgStream("<root>", app.msg.lvl, nil),
mgr: app,
}
start := time.Now()
var mstart runtime.MemStats
runtime.ReadMemStats(&mstart)
if app.state == fsm.Undefined {
err = app.configure(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Configured {
err = app.start(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Started {
err = app.run(ctx)
if err != nil && err != io.EOF {
return err
}
}
if app.state == fsm.Running {
err = app.stop(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Stopped {
err = app.shutdown(ctx)
if err != nil {
return err
}
}
app.msg.Infof("cpu: %v\n", time.Since(start))
var mdone runtime.MemStats
runtime.ReadMemStats(&mdone)
diff := func(v1, v0 uint64) int64 {
if v0 > v1 {
return -int64(v0 - v1)
}
return int64(v1 - v0)
}
app.msg.Infof("mem: alloc: %10d kB\n", diff(mdone.Alloc, mstart.Alloc)/1024)
app.msg.Infof("mem: tot-alloc: %10d kB\n", diff(mdone.TotalAlloc, mstart.TotalAlloc)/1024)
app.msg.Infof("mem: n-mallocs: %10d\n", diff(mdone.Mallocs, mstart.Mallocs))
app.msg.Infof("mem: n-frees: %10d\n", diff(mdone.Frees, mstart.Frees))
app.msg.Infof("mem: gc-pauses: %10d ms\n", diff(mdone.PauseTotalNs, mstart.PauseTotalNs)/1000000)
return err
}
func (app *appmgr) Scripter() Scripter {
return &irunner{app}
}
func (app *appmgr) configure(ctx Context) error {
var err error
defer app.msg.flush()
app.msg.Debugf("configure...\n")
app.state = fsm.Configuring
if app.evtmax == -1 {
app.evtmax = math.MaxInt64
}
if app.nprocs < 0 {
app.nprocs = runtime.NumCPU()
}
tsks := make([]ctxType, len(app.tsks))
for j, tsk := range app.tsks {
tsks[j] = ctxType{
id: -1,
slot: 0,
store: app.store,
msg: newMsgStream(tsk.Name(), app.msg.lvl, nil),
mgr: app,
}
}
svcs := make([]ctxType, len(app.svcs))
for j, svc := range app.svcs {
svcs[j] = ctxType{
id: -1,
slot: 0,
store: app.store,
msg: newMsgStream(svc.Name(), app.msg.lvl, nil),
mgr: app,
}
}
for i, svc := range app.svcs {
app.msg.Debugf("configuring [%s]...\n", svc.Name())
cfg, ok := svc.(Configurer)
if !ok {
continue
}
err = cfg.Configure(svcs[i])
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
app.msg.Debugf("configuring [%s]...\n", tsk.Name())
cfg, ok := tsk.(Configurer)
if !ok {
continue
}
err = cfg.Configure(tsks[i])
if err != nil {
return err
}
}
err = app.printDataFlow()
if err != nil {
return err
}
app.ctxs[0] = tsks
app.ctxs[1] = svcs
app.state = fsm.Configured
app.msg.Debugf("configure... [done]\n")
return err
}
func (app *appmgr) start(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Starting
for i, svc := range app.svcs {
app.msg.Debugf("starting [%s]...\n", svc.Name())
err = svc.StartSvc(app.ctxs[1][i])
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
app.msg.Debugf("starting [%s]...\n", tsk.Name())
err = tsk.StartTask(app.ctxs[0][i])
if err != nil {
return err
}
}
app.state = fsm.Started
return err
}
func (app *appmgr) run(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Running
maxprocs := runtime.GOMAXPROCS(app.nprocs)
switch app.nprocs {
case 0:
err = app.runSequential(ctx)
default:
err = app.runConcurrent(ctx)
}
runtime.GOMAXPROCS(maxprocs)
return err
}
func (app *appmgr) runSequential(ctx Context) error {
var err error
runctx, runCancel := context.WithCancel(context.Background())
defer runCancel()
keys := app.dflow.keys()
ctxs := make([]ctxType, len(app.tsks))
store := *app.store
for j, tsk := range app.tsks {
ctxs[j] = ctxType{
id: -1,
slot: 0,
store: &store,
msg: newMsgStream(tsk.Name(), app.msg.lvl, nil),
mgr: app,
}
}
ictrl, err := app.startInputStream()
if err != nil {
return err
}
defer close(ictrl.Quit)
octrl, err := app.startOutputStreams()
if err != nil {
return err
}
defer close(octrl.Quit)
for ievt := int64(0); ievt < app.evtmax; ievt++ {
evtctx, evtCancel := context.WithCancel(runctx)
app.msg.Infof(">>> running evt=%d...\n", ievt)
err = store.reset(keys)
if err != nil {
evtCancel()
return err
}
err = app.istream.Process(ctxs[0])
if err != nil {
evtCancel()
store.close()
app.msg.flush()
return err
}
run := taskrunner{
ievt: ievt,
errc: make(chan error, len(app.tsks)),
evtctx: evtctx,
}
for i, tsk := range app.tsks {
go run.run(i, ctxs[i], tsk)
}
ndone := 0
errloop:
for err = range run.errc {
ndone++
if err != nil {
evtCancel()
store.close()
app.msg.flush()
return err
}
if ndone == len(app.tsks) {
break errloop
}
}
evtCancel()
store.close()
app.msg.flush()
}
return err
}
func (app *appmgr) runConcurrent(ctx Context) error {
var err error
runctx, runCancel := context.WithCancel(context.Background())
defer runCancel()
ctrl := workercontrol{
evts: make(chan ctxType, 2*app.nprocs),
done: make(chan struct{}),
errc: make(chan error),
runctx: runctx,
}
istream, err := app.startInputStream()
if err != nil {
return err
}
defer close(istream.Quit)
ostream, err := app.startOutputStreams()
if err != nil {
return err
}
defer close(ostream.Quit)
workers := make([]worker, app.nprocs)
for i := 0; i < app.nprocs; i++ {
workers[i] = *newWorker(i, app, &ctrl)
}
go func() {
keys := app.dflow.keys()
msg := newMsgStream(app.istream.Name(), app.msg.lvl, nil)
for ievt := int64(0); ievt < app.evtmax; ievt++ {
evtctx, evtCancel := context.WithCancel(runctx)
store := *app.store
store.store = make(map[string]achan, len(keys))
err := store.reset(keys)
if err != nil {
evtCancel()
close(ctrl.evts)
ctrl.errc <- err
return
}
ctx := ctxType{
id: ievt,
slot: 0,
store: &store,
msg: msg,
mgr: nil, // nobody's supposed to access mgr's state during event-loop
ctx: evtctx,
}
err = app.istream.Process(ctx)
if err != nil {
if err != io.EOF {
ctrl.errc <- err
}
close(ctrl.evts)
evtCancel()
return
}
ctrl.evts <- ctx
evtCancel()
}
close(ctrl.evts)
}()
ndone := 0
ctrl:
for {
select {
case eworker, ok := <-ctrl.errc:
if !ok {
continue
}
if eworker != nil && err == nil {
// only record first error.
// FIXME(sbinet) record all of them (errstack)
err = eworker
}
case <-runctx.Done():
return runctx.Err()
case <-ctrl.done:
ndone++
app.msg.Infof("workers done: %d/%d\n", ndone, app.nprocs)
if ndone == len(workers) {
break ctrl
}
}
}
return err
}
func (app *appmgr) startInputStream() (StreamControl, error) {
var err error
ctrl := StreamControl{
Ctx: make(chan Context),
Err: make(chan error), // FIXME: impl. back-pressure
Quit: make(chan struct{}),
}
idx := -1
inputs := make([]*InputStream, 0, len(app.tsks))
// collect input streams
for i, tsk := range app.tsks {
in, ok := tsk.(*InputStream)
if !ok {
continue
}
inputs = append(inputs, in)
idx = i
}
switch len(inputs) {
case 0:
// create an event "pumper"
tsk := &inputStream{NewTask("fwk.inputStream", "app-evtloop", app)}
app.istream = tsk
case 1:
app.istream = inputs[0]
app.tsks = append(app.tsks[:idx], app.tsks[idx+1:]...)
err := inputs[0].connect(ctrl)
if err != nil {
return ctrl, err
}
default:
return ctrl, fmt.Errorf("found more than one InputStream! (n=%d)", len(inputs))
}
return ctrl, err
}
func (app *appmgr) startOutputStreams() (StreamControl, error) {
var err error
ctrl := StreamControl{
Ctx: make(chan Context),
Err: make(chan error), // FIXME: impl. back-pressure
Quit: make(chan struct{}),
}
// start output streams
for _, tsk := range app.tsks {
in, ok := tsk.(*OutputStream)
if !ok {
continue
}
err = in.connect(ctrl)
if err != nil {
return ctrl, err
}
}
return ctrl, err
}
func (app *appmgr) stop(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Stopping
if app.istream != nil {
err = app.istream.StopTask(ctx)
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
err = tsk.StopTask(app.ctxs[0][i])
if err != nil {
return err
}
}
for i, svc := range app.svcs {
err = svc.StopSvc(app.ctxs[1][i])
if err != nil {
return err
}
}
app.state = fsm.Stopped
return err
}
func (app *appmgr) shutdown(ctx Context) error {
var err error
defer app.msg.flush()
app.comps = nil
app.tsks = nil
app.svcs = nil
app.state = fsm.Offline
app.props = nil
app.dflow = nil
app.store = nil
return err
}
func (app *appmgr) Msg() MsgStream {
return app.msg
}
func (app *appmgr) printDataFlow() error {
var err error
app.msg.Debugf(">>> --- [data flow] --- nodes...\n")
for tsk, node := range app.dflow.nodes {
app.msg.Debugf(">>> ---[%s]---\n", tsk)
app.msg.Debugf(" in: %v\n", node.in)
app.msg.Debugf(" out: %v\n", node.out)
}
app.msg.Debugf(">>> --- [data flow] --- edges...\n")
edges := make([]string, 0, len(app.dflow.edges))
for n := range app.dflow.edges {
edges = append(edges, n)
}
sort.Strings(edges)
app.msg.Debugf(" edges: %v\n", edges)
return err
}
func init() {
Register(
reflect.TypeOf(appmgr{}),
func(t, name string, mgr App) (Component, error) {
app := NewApp().(*appmgr)
app.name = name
return app, nil
},
)
}
// EOF | "go-hep.org/x/hep/fwk/fsm"
) | random_line_split |
app.go | // Copyright ©2017 The go-hep Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fwk
import (
"context"
"fmt"
"io"
"math"
"reflect"
"runtime"
"sort"
"time"
"go-hep.org/x/hep/fwk/fsm"
)
type appmgr struct {
state fsm.State
name string
props map[string]map[string]interface{}
dflow *dflowsvc
store *datastore
msg msgstream
evtmax int64
nprocs int
comps map[string]Component
tsks []Task
svcs []Svc
istream Task
ctxs [2][]ctxType
}
// NewApp creates a (default) fwk application with (default and) sensible options.
func NewApp() App {
var err error
var app *appmgr
const appname = "app"
app = &appmgr{
state: fsm.Undefined,
name: appname,
props: make(map[string]map[string]interface{}),
dflow: nil,
store: nil,
msg: newMsgStream(
appname,
LvlInfo,
//LvlDebug,
//LvlError,
nil,
),
evtmax: -1,
nprocs: -1,
comps: make(map[string]Component),
tsks: make([]Task, 0),
svcs: make([]Svc, 0),
}
svc, err := app.New("go-hep.org/x/hep/fwk.datastore", "evtstore")
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create evtstore: %w\n", err)
return nil
}
app.store = svc.(*datastore)
err = app.AddSvc(app.store)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create evtstore: %w\n", err)
return nil
}
svc, err = app.New("go-hep.org/x/hep/fwk.dflowsvc", "dataflow")
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create dataflow svc: %w\n", err)
return nil
}
app.dflow = svc.(*dflowsvc)
err = app.AddSvc(app.dflow)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create dataflow svc: %w\n", err)
return nil
}
err = app.DeclProp(app, "EvtMax", &app.evtmax)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'EvtMax': %w\n", err)
return nil
}
err = app.DeclProp(app, "NProcs", &app.nprocs)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'NProcs': %w\n", err)
return nil
}
err = app.DeclProp(app, "MsgLevel", &app.msg.lvl)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'MsgLevel': %w\n", err)
return nil
}
return app
}
// Type returns the fully qualified type of this application
func (app *appmgr) Type() string {
return "go-hep.org/x/hep/fwk.appmgr"
}
// Name returns the name of this application
func (app *appmgr) Name() string {
return app.name
}
func (app *appmgr) Component(n string) Component {
c, ok := app.comps[n]
if !ok {
return nil
}
return c
}
func (app *appmgr) addComponent(c Component) error {
app.comps[c.Name()] = c
return nil
}
func (app *appmgr) HasComponent(n string) bool {
_, ok := app.comps[n]
return ok
}
func (app *appmgr) C | ) []Component {
comps := make([]Component, 0, len(app.comps))
for _, c := range app.comps {
comps = append(comps, c)
}
return comps
}
func (app *appmgr) AddTask(tsk Task) error {
var err error
app.tsks = append(app.tsks, tsk)
app.comps[tsk.Name()] = tsk
return err
}
func (app *appmgr) DelTask(tsk Task) error {
var err error
tsks := make([]Task, 0, len(app.tsks))
for _, t := range app.tsks {
if t.Name() != tsk.Name() {
tsks = append(tsks, t)
}
}
app.tsks = tsks
return err
}
func (app *appmgr) HasTask(n string) bool {
for _, t := range app.tsks {
if t.Name() == n {
return true
}
}
return false
}
func (app *appmgr) GetTask(n string) Task {
for _, t := range app.tsks {
if t.Name() == n {
return t
}
}
return nil
}
func (app *appmgr) Tasks() []Task {
return app.tsks
}
func (app *appmgr) AddSvc(svc Svc) error {
var err error
app.svcs = append(app.svcs, svc)
app.comps[svc.Name()] = svc
return err
}
func (app *appmgr) DelSvc(svc Svc) error {
var err error
svcs := make([]Svc, 0, len(app.svcs))
for _, s := range app.svcs {
if s.Name() != svc.Name() {
svcs = append(svcs, s)
}
}
app.svcs = svcs
return err
}
func (app *appmgr) HasSvc(n string) bool {
for _, s := range app.svcs {
if s.Name() == n {
return true
}
}
return false
}
func (app *appmgr) GetSvc(n string) Svc {
for _, s := range app.svcs {
if s.Name() == n {
return s
}
}
return nil
}
func (app *appmgr) Svcs() []Svc {
return app.svcs
}
func (app *appmgr) DeclProp(c Component, name string, ptr interface{}) error {
cname := c.Name()
_, ok := app.props[cname]
if !ok {
app.props[cname] = make(map[string]interface{})
}
switch reflect.TypeOf(ptr).Kind() {
case reflect.Ptr:
// ok
default:
return fmt.Errorf(
"fwk.DeclProp: component [%s] didn't pass a pointer for the property [%s] (type=%T)",
c.Name(),
name,
ptr,
)
}
app.props[cname][name] = ptr
return nil
}
func (app *appmgr) SetProp(c Component, name string, value interface{}) error {
cname := c.Name()
m, ok := app.props[cname]
if !ok {
return fmt.Errorf(
"fwk.SetProp: component [%s] didn't declare any property",
c.Name(),
)
}
rv := reflect.ValueOf(value)
rt := rv.Type()
ptr := reflect.ValueOf(m[name])
dst := ptr.Elem().Type()
if !rt.AssignableTo(dst) {
return fmt.Errorf(
"fwk.SetProp: component [%s] has property [%s] with type [%s]. got value=%v (type=%s)",
c.Name(),
name,
dst.Name(),
value,
rt.Name(),
)
}
ptr.Elem().Set(rv)
return nil
}
func (app *appmgr) GetProp(c Component, name string) (interface{}, error) {
cname := c.Name()
m, ok := app.props[cname]
if !ok {
return nil, fmt.Errorf(
"fwk.GetProp: component [%s] didn't declare any property",
c.Name(),
)
}
ptr, ok := m[name]
if !ok {
return nil, fmt.Errorf(
"fwk.GetProp: component [%s] didn't declare any property with name [%s]",
c.Name(),
name,
)
}
v := reflect.Indirect(reflect.ValueOf(ptr)).Interface()
return v, nil
}
func (app *appmgr) HasProp(c Component, name string) bool {
cname := c.Name()
_, ok := app.props[cname]
if !ok {
return ok
}
_, ok = app.props[cname][name]
return ok
}
func (app *appmgr) DeclInPort(c Component, name string, t reflect.Type) error {
if app.state < fsm.Configuring {
return fmt.Errorf(
"fwk.DeclInPort: invalid App state (%s). put the DeclInPort in Configure() of %s:%s",
app.state,
c.Type(),
c.Name(),
)
}
return app.dflow.addInNode(c.Name(), name, t)
}
func (app *appmgr) DeclOutPort(c Component, name string, t reflect.Type) error {
if app.state < fsm.Configuring {
return fmt.Errorf(
"fwk.DeclOutPort: invalid App state (%s). put the DeclInPort in Configure() of %s:%s",
app.state,
c.Type(),
c.Name(),
)
}
return app.dflow.addOutNode(c.Name(), name, t)
}
func (app *appmgr) FSMState() fsm.State {
return app.state
}
func (app *appmgr) Run() error {
var err error
ctx := ctxType{
id: 0,
slot: 0,
store: nil,
msg: newMsgStream("<root>", app.msg.lvl, nil),
mgr: app,
}
start := time.Now()
var mstart runtime.MemStats
runtime.ReadMemStats(&mstart)
if app.state == fsm.Undefined {
err = app.configure(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Configured {
err = app.start(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Started {
err = app.run(ctx)
if err != nil && err != io.EOF {
return err
}
}
if app.state == fsm.Running {
err = app.stop(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Stopped {
err = app.shutdown(ctx)
if err != nil {
return err
}
}
app.msg.Infof("cpu: %v\n", time.Since(start))
var mdone runtime.MemStats
runtime.ReadMemStats(&mdone)
diff := func(v1, v0 uint64) int64 {
if v0 > v1 {
return -int64(v0 - v1)
}
return int64(v1 - v0)
}
app.msg.Infof("mem: alloc: %10d kB\n", diff(mdone.Alloc, mstart.Alloc)/1024)
app.msg.Infof("mem: tot-alloc: %10d kB\n", diff(mdone.TotalAlloc, mstart.TotalAlloc)/1024)
app.msg.Infof("mem: n-mallocs: %10d\n", diff(mdone.Mallocs, mstart.Mallocs))
app.msg.Infof("mem: n-frees: %10d\n", diff(mdone.Frees, mstart.Frees))
app.msg.Infof("mem: gc-pauses: %10d ms\n", diff(mdone.PauseTotalNs, mstart.PauseTotalNs)/1000000)
return err
}
func (app *appmgr) Scripter() Scripter {
return &irunner{app}
}
func (app *appmgr) configure(ctx Context) error {
var err error
defer app.msg.flush()
app.msg.Debugf("configure...\n")
app.state = fsm.Configuring
if app.evtmax == -1 {
app.evtmax = math.MaxInt64
}
if app.nprocs < 0 {
app.nprocs = runtime.NumCPU()
}
tsks := make([]ctxType, len(app.tsks))
for j, tsk := range app.tsks {
tsks[j] = ctxType{
id: -1,
slot: 0,
store: app.store,
msg: newMsgStream(tsk.Name(), app.msg.lvl, nil),
mgr: app,
}
}
svcs := make([]ctxType, len(app.svcs))
for j, svc := range app.svcs {
svcs[j] = ctxType{
id: -1,
slot: 0,
store: app.store,
msg: newMsgStream(svc.Name(), app.msg.lvl, nil),
mgr: app,
}
}
for i, svc := range app.svcs {
app.msg.Debugf("configuring [%s]...\n", svc.Name())
cfg, ok := svc.(Configurer)
if !ok {
continue
}
err = cfg.Configure(svcs[i])
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
app.msg.Debugf("configuring [%s]...\n", tsk.Name())
cfg, ok := tsk.(Configurer)
if !ok {
continue
}
err = cfg.Configure(tsks[i])
if err != nil {
return err
}
}
err = app.printDataFlow()
if err != nil {
return err
}
app.ctxs[0] = tsks
app.ctxs[1] = svcs
app.state = fsm.Configured
app.msg.Debugf("configure... [done]\n")
return err
}
func (app *appmgr) start(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Starting
for i, svc := range app.svcs {
app.msg.Debugf("starting [%s]...\n", svc.Name())
err = svc.StartSvc(app.ctxs[1][i])
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
app.msg.Debugf("starting [%s]...\n", tsk.Name())
err = tsk.StartTask(app.ctxs[0][i])
if err != nil {
return err
}
}
app.state = fsm.Started
return err
}
func (app *appmgr) run(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Running
maxprocs := runtime.GOMAXPROCS(app.nprocs)
switch app.nprocs {
case 0:
err = app.runSequential(ctx)
default:
err = app.runConcurrent(ctx)
}
runtime.GOMAXPROCS(maxprocs)
return err
}
func (app *appmgr) runSequential(ctx Context) error {
var err error
runctx, runCancel := context.WithCancel(context.Background())
defer runCancel()
keys := app.dflow.keys()
ctxs := make([]ctxType, len(app.tsks))
store := *app.store
for j, tsk := range app.tsks {
ctxs[j] = ctxType{
id: -1,
slot: 0,
store: &store,
msg: newMsgStream(tsk.Name(), app.msg.lvl, nil),
mgr: app,
}
}
ictrl, err := app.startInputStream()
if err != nil {
return err
}
defer close(ictrl.Quit)
octrl, err := app.startOutputStreams()
if err != nil {
return err
}
defer close(octrl.Quit)
for ievt := int64(0); ievt < app.evtmax; ievt++ {
evtctx, evtCancel := context.WithCancel(runctx)
app.msg.Infof(">>> running evt=%d...\n", ievt)
err = store.reset(keys)
if err != nil {
evtCancel()
return err
}
err = app.istream.Process(ctxs[0])
if err != nil {
evtCancel()
store.close()
app.msg.flush()
return err
}
run := taskrunner{
ievt: ievt,
errc: make(chan error, len(app.tsks)),
evtctx: evtctx,
}
for i, tsk := range app.tsks {
go run.run(i, ctxs[i], tsk)
}
ndone := 0
errloop:
for err = range run.errc {
ndone++
if err != nil {
evtCancel()
store.close()
app.msg.flush()
return err
}
if ndone == len(app.tsks) {
break errloop
}
}
evtCancel()
store.close()
app.msg.flush()
}
return err
}
func (app *appmgr) runConcurrent(ctx Context) error {
var err error
runctx, runCancel := context.WithCancel(context.Background())
defer runCancel()
ctrl := workercontrol{
evts: make(chan ctxType, 2*app.nprocs),
done: make(chan struct{}),
errc: make(chan error),
runctx: runctx,
}
istream, err := app.startInputStream()
if err != nil {
return err
}
defer close(istream.Quit)
ostream, err := app.startOutputStreams()
if err != nil {
return err
}
defer close(ostream.Quit)
workers := make([]worker, app.nprocs)
for i := 0; i < app.nprocs; i++ {
workers[i] = *newWorker(i, app, &ctrl)
}
go func() {
keys := app.dflow.keys()
msg := newMsgStream(app.istream.Name(), app.msg.lvl, nil)
for ievt := int64(0); ievt < app.evtmax; ievt++ {
evtctx, evtCancel := context.WithCancel(runctx)
store := *app.store
store.store = make(map[string]achan, len(keys))
err := store.reset(keys)
if err != nil {
evtCancel()
close(ctrl.evts)
ctrl.errc <- err
return
}
ctx := ctxType{
id: ievt,
slot: 0,
store: &store,
msg: msg,
mgr: nil, // nobody's supposed to access mgr's state during event-loop
ctx: evtctx,
}
err = app.istream.Process(ctx)
if err != nil {
if err != io.EOF {
ctrl.errc <- err
}
close(ctrl.evts)
evtCancel()
return
}
ctrl.evts <- ctx
evtCancel()
}
close(ctrl.evts)
}()
ndone := 0
ctrl:
for {
select {
case eworker, ok := <-ctrl.errc:
if !ok {
continue
}
if eworker != nil && err == nil {
// only record first error.
// FIXME(sbinet) record all of them (errstack)
err = eworker
}
case <-runctx.Done():
return runctx.Err()
case <-ctrl.done:
ndone++
app.msg.Infof("workers done: %d/%d\n", ndone, app.nprocs)
if ndone == len(workers) {
break ctrl
}
}
}
return err
}
func (app *appmgr) startInputStream() (StreamControl, error) {
var err error
ctrl := StreamControl{
Ctx: make(chan Context),
Err: make(chan error), // FIXME: impl. back-pressure
Quit: make(chan struct{}),
}
idx := -1
inputs := make([]*InputStream, 0, len(app.tsks))
// collect input streams
for i, tsk := range app.tsks {
in, ok := tsk.(*InputStream)
if !ok {
continue
}
inputs = append(inputs, in)
idx = i
}
switch len(inputs) {
case 0:
// create an event "pumper"
tsk := &inputStream{NewTask("fwk.inputStream", "app-evtloop", app)}
app.istream = tsk
case 1:
app.istream = inputs[0]
app.tsks = append(app.tsks[:idx], app.tsks[idx+1:]...)
err := inputs[0].connect(ctrl)
if err != nil {
return ctrl, err
}
default:
return ctrl, fmt.Errorf("found more than one InputStream! (n=%d)", len(inputs))
}
return ctrl, err
}
func (app *appmgr) startOutputStreams() (StreamControl, error) {
var err error
ctrl := StreamControl{
Ctx: make(chan Context),
Err: make(chan error), // FIXME: impl. back-pressure
Quit: make(chan struct{}),
}
// start output streams
for _, tsk := range app.tsks {
in, ok := tsk.(*OutputStream)
if !ok {
continue
}
err = in.connect(ctrl)
if err != nil {
return ctrl, err
}
}
return ctrl, err
}
func (app *appmgr) stop(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Stopping
if app.istream != nil {
err = app.istream.StopTask(ctx)
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
err = tsk.StopTask(app.ctxs[0][i])
if err != nil {
return err
}
}
for i, svc := range app.svcs {
err = svc.StopSvc(app.ctxs[1][i])
if err != nil {
return err
}
}
app.state = fsm.Stopped
return err
}
func (app *appmgr) shutdown(ctx Context) error {
var err error
defer app.msg.flush()
app.comps = nil
app.tsks = nil
app.svcs = nil
app.state = fsm.Offline
app.props = nil
app.dflow = nil
app.store = nil
return err
}
func (app *appmgr) Msg() MsgStream {
return app.msg
}
func (app *appmgr) printDataFlow() error {
var err error
app.msg.Debugf(">>> --- [data flow] --- nodes...\n")
for tsk, node := range app.dflow.nodes {
app.msg.Debugf(">>> ---[%s]---\n", tsk)
app.msg.Debugf(" in: %v\n", node.in)
app.msg.Debugf(" out: %v\n", node.out)
}
app.msg.Debugf(">>> --- [data flow] --- edges...\n")
edges := make([]string, 0, len(app.dflow.edges))
for n := range app.dflow.edges {
edges = append(edges, n)
}
sort.Strings(edges)
app.msg.Debugf(" edges: %v\n", edges)
return err
}
func init() {
Register(
reflect.TypeOf(appmgr{}),
func(t, name string, mgr App) (Component, error) {
app := NewApp().(*appmgr)
app.name = name
return app, nil
},
)
}
// EOF
| omponents( | identifier_name |
app.go | // Copyright ©2017 The go-hep Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fwk
import (
"context"
"fmt"
"io"
"math"
"reflect"
"runtime"
"sort"
"time"
"go-hep.org/x/hep/fwk/fsm"
)
type appmgr struct {
state fsm.State
name string
props map[string]map[string]interface{}
dflow *dflowsvc
store *datastore
msg msgstream
evtmax int64
nprocs int
comps map[string]Component
tsks []Task
svcs []Svc
istream Task
ctxs [2][]ctxType
}
// NewApp creates a (default) fwk application with (default and) sensible options.
func NewApp() App {
var err error
var app *appmgr
const appname = "app"
app = &appmgr{
state: fsm.Undefined,
name: appname,
props: make(map[string]map[string]interface{}),
dflow: nil,
store: nil,
msg: newMsgStream(
appname,
LvlInfo,
//LvlDebug,
//LvlError,
nil,
),
evtmax: -1,
nprocs: -1,
comps: make(map[string]Component),
tsks: make([]Task, 0),
svcs: make([]Svc, 0),
}
svc, err := app.New("go-hep.org/x/hep/fwk.datastore", "evtstore")
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create evtstore: %w\n", err)
return nil
}
app.store = svc.(*datastore)
err = app.AddSvc(app.store)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create evtstore: %w\n", err)
return nil
}
svc, err = app.New("go-hep.org/x/hep/fwk.dflowsvc", "dataflow")
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create dataflow svc: %w\n", err)
return nil
}
app.dflow = svc.(*dflowsvc)
err = app.AddSvc(app.dflow)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not create dataflow svc: %w\n", err)
return nil
}
err = app.DeclProp(app, "EvtMax", &app.evtmax)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'EvtMax': %w\n", err)
return nil
}
err = app.DeclProp(app, "NProcs", &app.nprocs)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'NProcs': %w\n", err)
return nil
}
err = app.DeclProp(app, "MsgLevel", &app.msg.lvl)
if err != nil {
app.msg.Errorf("fwk.NewApp: could not declare property 'MsgLevel': %w\n", err)
return nil
}
return app
}
// Type returns the fully qualified type of this application
func (app *appmgr) Type() string {
return "go-hep.org/x/hep/fwk.appmgr"
}
// Name returns the name of this application
func (app *appmgr) Name() string {
return app.name
}
func (app *appmgr) Component(n string) Component {
c, ok := app.comps[n]
if !ok {
return nil
}
return c
}
func (app *appmgr) addComponent(c Component) error {
app.comps[c.Name()] = c
return nil
}
func (app *appmgr) HasComponent(n string) bool {
_, ok := app.comps[n]
return ok
}
func (app *appmgr) Components() []Component {
comps := make([]Component, 0, len(app.comps))
for _, c := range app.comps {
comps = append(comps, c)
}
return comps
}
func (app *appmgr) AddTask(tsk Task) error {
var err error
app.tsks = append(app.tsks, tsk)
app.comps[tsk.Name()] = tsk
return err
}
func (app *appmgr) DelTask(tsk Task) error {
var err error
tsks := make([]Task, 0, len(app.tsks))
for _, t := range app.tsks {
if t.Name() != tsk.Name() {
tsks = append(tsks, t)
}
}
app.tsks = tsks
return err
}
func (app *appmgr) HasTask(n string) bool {
for _, t := range app.tsks {
if t.Name() == n {
return true
}
}
return false
}
func (app *appmgr) GetTask(n string) Task {
for _, t := range app.tsks {
if t.Name() == n {
return t
}
}
return nil
}
func (app *appmgr) Tasks() []Task {
return app.tsks
}
func (app *appmgr) AddSvc(svc Svc) error {
var err error
app.svcs = append(app.svcs, svc)
app.comps[svc.Name()] = svc
return err
}
func (app *appmgr) DelSvc(svc Svc) error {
var err error
svcs := make([]Svc, 0, len(app.svcs))
for _, s := range app.svcs {
if s.Name() != svc.Name() {
svcs = append(svcs, s)
}
}
app.svcs = svcs
return err
}
func (app *appmgr) HasSvc(n string) bool {
for _, s := range app.svcs {
if s.Name() == n {
return true
}
}
return false
}
func (app *appmgr) GetSvc(n string) Svc {
for _, s := range app.svcs {
if s.Name() == n {
return s
}
}
return nil
}
func (app *appmgr) Svcs() []Svc {
return app.svcs
}
func (app *appmgr) DeclProp(c Component, name string, ptr interface{}) error {
cname := c.Name()
_, ok := app.props[cname]
if !ok {
app.props[cname] = make(map[string]interface{})
}
switch reflect.TypeOf(ptr).Kind() {
case reflect.Ptr:
// ok
default:
return fmt.Errorf(
"fwk.DeclProp: component [%s] didn't pass a pointer for the property [%s] (type=%T)",
c.Name(),
name,
ptr,
)
}
app.props[cname][name] = ptr
return nil
}
func (app *appmgr) SetProp(c Component, name string, value interface{}) error {
cname := c.Name()
m, ok := app.props[cname]
if !ok {
return fmt.Errorf(
"fwk.SetProp: component [%s] didn't declare any property",
c.Name(),
)
}
rv := reflect.ValueOf(value)
rt := rv.Type()
ptr := reflect.ValueOf(m[name])
dst := ptr.Elem().Type()
if !rt.AssignableTo(dst) {
return fmt.Errorf(
"fwk.SetProp: component [%s] has property [%s] with type [%s]. got value=%v (type=%s)",
c.Name(),
name,
dst.Name(),
value,
rt.Name(),
)
}
ptr.Elem().Set(rv)
return nil
}
func (app *appmgr) GetProp(c Component, name string) (interface{}, error) {
cname := c.Name()
m, ok := app.props[cname]
if !ok {
return nil, fmt.Errorf(
"fwk.GetProp: component [%s] didn't declare any property",
c.Name(),
)
}
ptr, ok := m[name]
if !ok {
return nil, fmt.Errorf(
"fwk.GetProp: component [%s] didn't declare any property with name [%s]",
c.Name(),
name,
)
}
v := reflect.Indirect(reflect.ValueOf(ptr)).Interface()
return v, nil
}
func (app *appmgr) HasProp(c Component, name string) bool {
cname := c.Name()
_, ok := app.props[cname]
if !ok {
return ok
}
_, ok = app.props[cname][name]
return ok
}
func (app *appmgr) DeclInPort(c Component, name string, t reflect.Type) error {
if app.state < fsm.Configuring {
return fmt.Errorf(
"fwk.DeclInPort: invalid App state (%s). put the DeclInPort in Configure() of %s:%s",
app.state,
c.Type(),
c.Name(),
)
}
return app.dflow.addInNode(c.Name(), name, t)
}
func (app *appmgr) DeclOutPort(c Component, name string, t reflect.Type) error {
if app.state < fsm.Configuring {
return fmt.Errorf(
"fwk.DeclOutPort: invalid App state (%s). put the DeclInPort in Configure() of %s:%s",
app.state,
c.Type(),
c.Name(),
)
}
return app.dflow.addOutNode(c.Name(), name, t)
}
func (app *appmgr) FSMState() fsm.State {
return app.state
}
func (app *appmgr) Run() error {
var err error
ctx := ctxType{
id: 0,
slot: 0,
store: nil,
msg: newMsgStream("<root>", app.msg.lvl, nil),
mgr: app,
}
start := time.Now()
var mstart runtime.MemStats
runtime.ReadMemStats(&mstart)
if app.state == fsm.Undefined {
err = app.configure(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Configured { |
if app.state == fsm.Started {
err = app.run(ctx)
if err != nil && err != io.EOF {
return err
}
}
if app.state == fsm.Running {
err = app.stop(ctx)
if err != nil {
return err
}
}
if app.state == fsm.Stopped {
err = app.shutdown(ctx)
if err != nil {
return err
}
}
app.msg.Infof("cpu: %v\n", time.Since(start))
var mdone runtime.MemStats
runtime.ReadMemStats(&mdone)
diff := func(v1, v0 uint64) int64 {
if v0 > v1 {
return -int64(v0 - v1)
}
return int64(v1 - v0)
}
app.msg.Infof("mem: alloc: %10d kB\n", diff(mdone.Alloc, mstart.Alloc)/1024)
app.msg.Infof("mem: tot-alloc: %10d kB\n", diff(mdone.TotalAlloc, mstart.TotalAlloc)/1024)
app.msg.Infof("mem: n-mallocs: %10d\n", diff(mdone.Mallocs, mstart.Mallocs))
app.msg.Infof("mem: n-frees: %10d\n", diff(mdone.Frees, mstart.Frees))
app.msg.Infof("mem: gc-pauses: %10d ms\n", diff(mdone.PauseTotalNs, mstart.PauseTotalNs)/1000000)
return err
}
func (app *appmgr) Scripter() Scripter {
return &irunner{app}
}
func (app *appmgr) configure(ctx Context) error {
var err error
defer app.msg.flush()
app.msg.Debugf("configure...\n")
app.state = fsm.Configuring
if app.evtmax == -1 {
app.evtmax = math.MaxInt64
}
if app.nprocs < 0 {
app.nprocs = runtime.NumCPU()
}
tsks := make([]ctxType, len(app.tsks))
for j, tsk := range app.tsks {
tsks[j] = ctxType{
id: -1,
slot: 0,
store: app.store,
msg: newMsgStream(tsk.Name(), app.msg.lvl, nil),
mgr: app,
}
}
svcs := make([]ctxType, len(app.svcs))
for j, svc := range app.svcs {
svcs[j] = ctxType{
id: -1,
slot: 0,
store: app.store,
msg: newMsgStream(svc.Name(), app.msg.lvl, nil),
mgr: app,
}
}
for i, svc := range app.svcs {
app.msg.Debugf("configuring [%s]...\n", svc.Name())
cfg, ok := svc.(Configurer)
if !ok {
continue
}
err = cfg.Configure(svcs[i])
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
app.msg.Debugf("configuring [%s]...\n", tsk.Name())
cfg, ok := tsk.(Configurer)
if !ok {
continue
}
err = cfg.Configure(tsks[i])
if err != nil {
return err
}
}
err = app.printDataFlow()
if err != nil {
return err
}
app.ctxs[0] = tsks
app.ctxs[1] = svcs
app.state = fsm.Configured
app.msg.Debugf("configure... [done]\n")
return err
}
func (app *appmgr) start(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Starting
for i, svc := range app.svcs {
app.msg.Debugf("starting [%s]...\n", svc.Name())
err = svc.StartSvc(app.ctxs[1][i])
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
app.msg.Debugf("starting [%s]...\n", tsk.Name())
err = tsk.StartTask(app.ctxs[0][i])
if err != nil {
return err
}
}
app.state = fsm.Started
return err
}
func (app *appmgr) run(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Running
maxprocs := runtime.GOMAXPROCS(app.nprocs)
switch app.nprocs {
case 0:
err = app.runSequential(ctx)
default:
err = app.runConcurrent(ctx)
}
runtime.GOMAXPROCS(maxprocs)
return err
}
func (app *appmgr) runSequential(ctx Context) error {
var err error
runctx, runCancel := context.WithCancel(context.Background())
defer runCancel()
keys := app.dflow.keys()
ctxs := make([]ctxType, len(app.tsks))
store := *app.store
for j, tsk := range app.tsks {
ctxs[j] = ctxType{
id: -1,
slot: 0,
store: &store,
msg: newMsgStream(tsk.Name(), app.msg.lvl, nil),
mgr: app,
}
}
ictrl, err := app.startInputStream()
if err != nil {
return err
}
defer close(ictrl.Quit)
octrl, err := app.startOutputStreams()
if err != nil {
return err
}
defer close(octrl.Quit)
for ievt := int64(0); ievt < app.evtmax; ievt++ {
evtctx, evtCancel := context.WithCancel(runctx)
app.msg.Infof(">>> running evt=%d...\n", ievt)
err = store.reset(keys)
if err != nil {
evtCancel()
return err
}
err = app.istream.Process(ctxs[0])
if err != nil {
evtCancel()
store.close()
app.msg.flush()
return err
}
run := taskrunner{
ievt: ievt,
errc: make(chan error, len(app.tsks)),
evtctx: evtctx,
}
for i, tsk := range app.tsks {
go run.run(i, ctxs[i], tsk)
}
ndone := 0
errloop:
for err = range run.errc {
ndone++
if err != nil {
evtCancel()
store.close()
app.msg.flush()
return err
}
if ndone == len(app.tsks) {
break errloop
}
}
evtCancel()
store.close()
app.msg.flush()
}
return err
}
func (app *appmgr) runConcurrent(ctx Context) error {
var err error
runctx, runCancel := context.WithCancel(context.Background())
defer runCancel()
ctrl := workercontrol{
evts: make(chan ctxType, 2*app.nprocs),
done: make(chan struct{}),
errc: make(chan error),
runctx: runctx,
}
istream, err := app.startInputStream()
if err != nil {
return err
}
defer close(istream.Quit)
ostream, err := app.startOutputStreams()
if err != nil {
return err
}
defer close(ostream.Quit)
workers := make([]worker, app.nprocs)
for i := 0; i < app.nprocs; i++ {
workers[i] = *newWorker(i, app, &ctrl)
}
go func() {
keys := app.dflow.keys()
msg := newMsgStream(app.istream.Name(), app.msg.lvl, nil)
for ievt := int64(0); ievt < app.evtmax; ievt++ {
evtctx, evtCancel := context.WithCancel(runctx)
store := *app.store
store.store = make(map[string]achan, len(keys))
err := store.reset(keys)
if err != nil {
evtCancel()
close(ctrl.evts)
ctrl.errc <- err
return
}
ctx := ctxType{
id: ievt,
slot: 0,
store: &store,
msg: msg,
mgr: nil, // nobody's supposed to access mgr's state during event-loop
ctx: evtctx,
}
err = app.istream.Process(ctx)
if err != nil {
if err != io.EOF {
ctrl.errc <- err
}
close(ctrl.evts)
evtCancel()
return
}
ctrl.evts <- ctx
evtCancel()
}
close(ctrl.evts)
}()
ndone := 0
ctrl:
for {
select {
case eworker, ok := <-ctrl.errc:
if !ok {
continue
}
if eworker != nil && err == nil {
// only record first error.
// FIXME(sbinet) record all of them (errstack)
err = eworker
}
case <-runctx.Done():
return runctx.Err()
case <-ctrl.done:
ndone++
app.msg.Infof("workers done: %d/%d\n", ndone, app.nprocs)
if ndone == len(workers) {
break ctrl
}
}
}
return err
}
func (app *appmgr) startInputStream() (StreamControl, error) {
var err error
ctrl := StreamControl{
Ctx: make(chan Context),
Err: make(chan error), // FIXME: impl. back-pressure
Quit: make(chan struct{}),
}
idx := -1
inputs := make([]*InputStream, 0, len(app.tsks))
// collect input streams
for i, tsk := range app.tsks {
in, ok := tsk.(*InputStream)
if !ok {
continue
}
inputs = append(inputs, in)
idx = i
}
switch len(inputs) {
case 0:
// create an event "pumper"
tsk := &inputStream{NewTask("fwk.inputStream", "app-evtloop", app)}
app.istream = tsk
case 1:
app.istream = inputs[0]
app.tsks = append(app.tsks[:idx], app.tsks[idx+1:]...)
err := inputs[0].connect(ctrl)
if err != nil {
return ctrl, err
}
default:
return ctrl, fmt.Errorf("found more than one InputStream! (n=%d)", len(inputs))
}
return ctrl, err
}
func (app *appmgr) startOutputStreams() (StreamControl, error) {
var err error
ctrl := StreamControl{
Ctx: make(chan Context),
Err: make(chan error), // FIXME: impl. back-pressure
Quit: make(chan struct{}),
}
// start output streams
for _, tsk := range app.tsks {
in, ok := tsk.(*OutputStream)
if !ok {
continue
}
err = in.connect(ctrl)
if err != nil {
return ctrl, err
}
}
return ctrl, err
}
func (app *appmgr) stop(ctx Context) error {
var err error
defer app.msg.flush()
app.state = fsm.Stopping
if app.istream != nil {
err = app.istream.StopTask(ctx)
if err != nil {
return err
}
}
for i, tsk := range app.tsks {
err = tsk.StopTask(app.ctxs[0][i])
if err != nil {
return err
}
}
for i, svc := range app.svcs {
err = svc.StopSvc(app.ctxs[1][i])
if err != nil {
return err
}
}
app.state = fsm.Stopped
return err
}
func (app *appmgr) shutdown(ctx Context) error {
var err error
defer app.msg.flush()
app.comps = nil
app.tsks = nil
app.svcs = nil
app.state = fsm.Offline
app.props = nil
app.dflow = nil
app.store = nil
return err
}
func (app *appmgr) Msg() MsgStream {
return app.msg
}
func (app *appmgr) printDataFlow() error {
var err error
app.msg.Debugf(">>> --- [data flow] --- nodes...\n")
for tsk, node := range app.dflow.nodes {
app.msg.Debugf(">>> ---[%s]---\n", tsk)
app.msg.Debugf(" in: %v\n", node.in)
app.msg.Debugf(" out: %v\n", node.out)
}
app.msg.Debugf(">>> --- [data flow] --- edges...\n")
edges := make([]string, 0, len(app.dflow.edges))
for n := range app.dflow.edges {
edges = append(edges, n)
}
sort.Strings(edges)
app.msg.Debugf(" edges: %v\n", edges)
return err
}
func init() {
Register(
reflect.TypeOf(appmgr{}),
func(t, name string, mgr App) (Component, error) {
app := NewApp().(*appmgr)
app.name = name
return app, nil
},
)
}
// EOF
|
err = app.start(ctx)
if err != nil {
return err
}
}
| conditional_block |
Step.py | from pySDC import Level as levclass
from pySDC import Stats as statclass
from pySDC import Hooks as hookclass
import copy as cp
import sys
class step():
"""
Step class, referencing most of the structure needed for the time-stepping
This class bundles multiple levels and the corresponding transfer operators and is used by the methods
(e.g. SDC and MLSDC). Status variables like the current time are hidden via properties and setters methods.
Attributes:
__t: current time (property time)
__dt: current step size (property dt)
__k: current iteration (property iter)
__transfer_dict: data structure to couple levels and transfer operators
levels: list of levels
params: parameters given by the user
__slots__: list of attributes to avoid accidential creation of new class attributes
"""
__slots__ = ('params','levels','__transfer_dict','status','__prev')
def __init__(self, params):
"""
Initialization routine
Args:
params: parameters given by the user, will be added as attributes
"""
# short helper class to add params as attributes
class pars():
def __init__(self,params):
defaults = dict()
defaults['maxiter'] = 20
defaults['fine_comm'] = True
defaults['predict'] = True
for k,v in defaults.items():
setattr(self,k,v)
for k,v in params.items():
setattr(self,k,v)
pass
# short helper class to bundle all status variables
class status():
__slots__ = ('iter','stage','slot','first','last','pred_cnt','done','time','dt','step')
def __init__(self):
self.iter = None
self.stage = None
self.slot = None
self.first = None
self.last = None
self.pred_cnt = None
self.done = None
self.time = None
self.dt = None
self.step = None
# set params and status
self.params = pars(params)
self.status = status()
# empty attributes
self.__transfer_dict = {}
self.levels = []
self.__prev = None
def generate_hierarchy(self,descr):
"""
Routine to generate the level hierarchy for a single step
This makes the explicit generation of levels in the frontend obsolete and hides a few dirty hacks here and
there.
Args:
descr: dictionary containing the description of the levels as list per key
"""
# assert the existence of all the keys we need to set up at least on level
assert 'problem_class' in descr
assert 'problem_params' in descr
assert 'dtype_u' in descr
assert 'dtype_f' in descr
assert 'sweeper_class' in descr
assert 'level_params' in descr
# convert problem-dependent parameters consisting of dictionary of lists to a list of dictionaries with only a
# single entry per key, one dict per level
pparams_list = self.__dict_to_list(descr['problem_params'])
# put this newly generated list into the description dictionary (copy to avoid changing the original one)
descr_new = cp.deepcopy(descr)
descr_new['problem_params'] = pparams_list | # generate list of dictionaries out of the description
descr_list = self.__dict_to_list(descr_new)
# sanity check: is there a transfer class? is there one even if only a single level is specified?
if len(descr_list) > 1:
assert 'transfer_class' in descr_new
assert 'transfer_params' in descr_new
elif 'transfer_class' in descr_new:
print('WARNING: you have specified transfer classes, but only a single level...')
# generate levels, register and connect if needed
for l in range(len(descr_list)):
# check if we have a hook on this list. if not, use default class.
if 'hook_class' in descr_list[l]:
hook = descr_list[l]['hook_class']
else:
hook = hookclass.hooks
if 'sweeper_params' in descr_list[l]:
swparams = descr_list[l]['sweeper_params']
else:
swparams = {}
if not 'collocation_class' in swparams:
assert 'collocation_class' in descr_list[l]
swparams['collocation_class'] = descr_list[l]['collocation_class']
if not 'num_nodes' in swparams:
assert 'num_nodes' in descr_list[l]
swparams['num_nodes'] = descr_list[l]['num_nodes']
L = levclass.level(problem_class = descr_list[l]['problem_class'],
problem_params = descr_list[l]['problem_params'],
dtype_u = descr_list[l]['dtype_u'],
dtype_f = descr_list[l]['dtype_f'],
sweeper_class = descr_list[l]['sweeper_class'],
sweeper_params = swparams,
level_params = descr_list[l]['level_params'],
hook_class = hook,
id = 'L'+str(l))
self.register_level(L)
if l > 0:
self.connect_levels(transfer_class = descr_list[l]['transfer_class'],
transfer_params = descr_list[l]['transfer_params'],
fine_level = self.levels[l-1],
coarse_level = self.levels[l])
@staticmethod
def __dict_to_list(dict):
"""
Straightforward helper function to convert dictionary of list to list of dictionaries
Args:
dict: dictionary of lists
Returns:
list of dictionaries
"""
max_val = 1
for k,v in dict.items():
if type(v) is list:
if len(v) > 1 and (max_val > 1 and len(v) is not max_val):
#FIXME: get a real error here
sys.exit('All lists in cparams need to be of length 1 or %i.. key %s has this list: %s' %(max_val,k,v))
max_val = max(max_val,len(v))
ld = [{} for l in range(max_val)]
for d in range(len(ld)):
for k,v in dict.items():
if type(v) is not list:
ld[d][k] = v
else:
if len(v) == 1:
ld[d][k] = v[0]
else:
ld[d][k] = v[d]
return ld
def register_level(self,L):
"""
Routine to register levels
This routine will append levels to the level list of the step instance and link the step to the newly
registered level (Level 0 will be considered as the finest level). It will also allocate the tau correction,
if this level is not the finest one.
Args:
L: level to be registered
"""
assert isinstance(L,levclass.level)
# add level to level list
self.levels.append(L)
# pass this step to the registered level
self.levels[-1]._level__set_step(self)
# if this is not the finest level, allocate tau correction
if len(self.levels) > 1:
L._level__add_tau()
def connect_levels(self, transfer_class, transfer_params, fine_level, coarse_level):
"""
Routine to couple levels with transfer operators
Args:
transfer_class: the class which can transfer between the two levels
transfer_params: parameters for the transfer class
fine_level: the fine level
coarse_level: the coarse level
"""
# create new instance of the specific transfer class
T = transfer_class(fine_level,coarse_level,transfer_params)
# use transfer dictionary twice to set restrict and prolong operator
self.__transfer_dict[tuple([fine_level,coarse_level])] = T.restrict
if T.params.finter:
self.__transfer_dict[tuple([coarse_level,fine_level])] = T.prolong_f
else:
self.__transfer_dict[tuple([coarse_level,fine_level])] = T.prolong
def transfer(self,source,target):
"""
Wrapper routine to ease the call of the transfer functions
This function can be called in the multilevel stepper (e.g. MLSDC), passing a source and a target level.
Using the transfer dictionary, the calling stepper does not need to specify whether to use restrict of prolong.
Args:
source: source level
target: target level
"""
self.__transfer_dict[tuple([source,target])]()
def reset_step(self):
"""
Routine so clean-up step structure and the corresp. levels for further uses
"""
# reset all levels
for l in self.levels:
l.reset_level()
def init_step(self,u0):
"""
Initialization routine for a new step.
This routine uses initial values u0 to set up the u[0] values at the finest level
Args:
u0: initial values
"""
assert len(self.levels) >=1
assert len(self.levels[0].u) >=1
# pass u0 to u[0] on the finest level 0
P = self.levels[0].prob
self.levels[0].u[0] = P.dtype_u(u0)
@property
def prev(self):
"""
Getter for previous step
Returns:
prev
"""
return self.__prev
@prev.setter
def prev(self,p):
"""
Setter for previous step
Args:
p: new previous step
"""
assert type(p) is type(self)
self.__prev = p | random_line_split | |
Step.py | from pySDC import Level as levclass
from pySDC import Stats as statclass
from pySDC import Hooks as hookclass
import copy as cp
import sys
class step():
"""
Step class, referencing most of the structure needed for the time-stepping
This class bundles multiple levels and the corresponding transfer operators and is used by the methods
(e.g. SDC and MLSDC). Status variables like the current time are hidden via properties and setters methods.
Attributes:
__t: current time (property time)
__dt: current step size (property dt)
__k: current iteration (property iter)
__transfer_dict: data structure to couple levels and transfer operators
levels: list of levels
params: parameters given by the user
__slots__: list of attributes to avoid accidential creation of new class attributes
"""
__slots__ = ('params','levels','__transfer_dict','status','__prev')
def __init__(self, params):
"""
Initialization routine
Args:
params: parameters given by the user, will be added as attributes
"""
# short helper class to add params as attributes
class pars():
def __init__(self,params):
defaults = dict()
defaults['maxiter'] = 20
defaults['fine_comm'] = True
defaults['predict'] = True
for k,v in defaults.items():
setattr(self,k,v)
for k,v in params.items():
setattr(self,k,v)
pass
# short helper class to bundle all status variables
class status():
__slots__ = ('iter','stage','slot','first','last','pred_cnt','done','time','dt','step')
def __init__(self):
|
# set params and status
self.params = pars(params)
self.status = status()
# empty attributes
self.__transfer_dict = {}
self.levels = []
self.__prev = None
def generate_hierarchy(self,descr):
"""
Routine to generate the level hierarchy for a single step
This makes the explicit generation of levels in the frontend obsolete and hides a few dirty hacks here and
there.
Args:
descr: dictionary containing the description of the levels as list per key
"""
# assert the existence of all the keys we need to set up at least on level
assert 'problem_class' in descr
assert 'problem_params' in descr
assert 'dtype_u' in descr
assert 'dtype_f' in descr
assert 'sweeper_class' in descr
assert 'level_params' in descr
# convert problem-dependent parameters consisting of dictionary of lists to a list of dictionaries with only a
# single entry per key, one dict per level
pparams_list = self.__dict_to_list(descr['problem_params'])
# put this newly generated list into the description dictionary (copy to avoid changing the original one)
descr_new = cp.deepcopy(descr)
descr_new['problem_params'] = pparams_list
# generate list of dictionaries out of the description
descr_list = self.__dict_to_list(descr_new)
# sanity check: is there a transfer class? is there one even if only a single level is specified?
if len(descr_list) > 1:
assert 'transfer_class' in descr_new
assert 'transfer_params' in descr_new
elif 'transfer_class' in descr_new:
print('WARNING: you have specified transfer classes, but only a single level...')
# generate levels, register and connect if needed
for l in range(len(descr_list)):
# check if we have a hook on this list. if not, use default class.
if 'hook_class' in descr_list[l]:
hook = descr_list[l]['hook_class']
else:
hook = hookclass.hooks
if 'sweeper_params' in descr_list[l]:
swparams = descr_list[l]['sweeper_params']
else:
swparams = {}
if not 'collocation_class' in swparams:
assert 'collocation_class' in descr_list[l]
swparams['collocation_class'] = descr_list[l]['collocation_class']
if not 'num_nodes' in swparams:
assert 'num_nodes' in descr_list[l]
swparams['num_nodes'] = descr_list[l]['num_nodes']
L = levclass.level(problem_class = descr_list[l]['problem_class'],
problem_params = descr_list[l]['problem_params'],
dtype_u = descr_list[l]['dtype_u'],
dtype_f = descr_list[l]['dtype_f'],
sweeper_class = descr_list[l]['sweeper_class'],
sweeper_params = swparams,
level_params = descr_list[l]['level_params'],
hook_class = hook,
id = 'L'+str(l))
self.register_level(L)
if l > 0:
self.connect_levels(transfer_class = descr_list[l]['transfer_class'],
transfer_params = descr_list[l]['transfer_params'],
fine_level = self.levels[l-1],
coarse_level = self.levels[l])
@staticmethod
def __dict_to_list(dict):
"""
Straightforward helper function to convert dictionary of list to list of dictionaries
Args:
dict: dictionary of lists
Returns:
list of dictionaries
"""
max_val = 1
for k,v in dict.items():
if type(v) is list:
if len(v) > 1 and (max_val > 1 and len(v) is not max_val):
#FIXME: get a real error here
sys.exit('All lists in cparams need to be of length 1 or %i.. key %s has this list: %s' %(max_val,k,v))
max_val = max(max_val,len(v))
ld = [{} for l in range(max_val)]
for d in range(len(ld)):
for k,v in dict.items():
if type(v) is not list:
ld[d][k] = v
else:
if len(v) == 1:
ld[d][k] = v[0]
else:
ld[d][k] = v[d]
return ld
def register_level(self,L):
"""
Routine to register levels
This routine will append levels to the level list of the step instance and link the step to the newly
registered level (Level 0 will be considered as the finest level). It will also allocate the tau correction,
if this level is not the finest one.
Args:
L: level to be registered
"""
assert isinstance(L,levclass.level)
# add level to level list
self.levels.append(L)
# pass this step to the registered level
self.levels[-1]._level__set_step(self)
# if this is not the finest level, allocate tau correction
if len(self.levels) > 1:
L._level__add_tau()
def connect_levels(self, transfer_class, transfer_params, fine_level, coarse_level):
"""
Routine to couple levels with transfer operators
Args:
transfer_class: the class which can transfer between the two levels
transfer_params: parameters for the transfer class
fine_level: the fine level
coarse_level: the coarse level
"""
# create new instance of the specific transfer class
T = transfer_class(fine_level,coarse_level,transfer_params)
# use transfer dictionary twice to set restrict and prolong operator
self.__transfer_dict[tuple([fine_level,coarse_level])] = T.restrict
if T.params.finter:
self.__transfer_dict[tuple([coarse_level,fine_level])] = T.prolong_f
else:
self.__transfer_dict[tuple([coarse_level,fine_level])] = T.prolong
def transfer(self,source,target):
"""
Wrapper routine to ease the call of the transfer functions
This function can be called in the multilevel stepper (e.g. MLSDC), passing a source and a target level.
Using the transfer dictionary, the calling stepper does not need to specify whether to use restrict of prolong.
Args:
source: source level
target: target level
"""
self.__transfer_dict[tuple([source,target])]()
def reset_step(self):
"""
Routine so clean-up step structure and the corresp. levels for further uses
"""
# reset all levels
for l in self.levels:
l.reset_level()
def init_step(self,u0):
"""
Initialization routine for a new step.
This routine uses initial values u0 to set up the u[0] values at the finest level
Args:
u0: initial values
"""
assert len(self.levels) >=1
assert len(self.levels[0].u) >=1
# pass u0 to u[0] on the finest level 0
P = self.levels[0].prob
self.levels[0].u[0] = P.dtype_u(u0)
@property
def prev(self):
"""
Getter for previous step
Returns:
prev
"""
return self.__prev
@prev.setter
def prev(self,p):
"""
Setter for previous step
Args:
p: new previous step
"""
assert type(p) is type(self)
self.__prev = p | self.iter = None
self.stage = None
self.slot = None
self.first = None
self.last = None
self.pred_cnt = None
self.done = None
self.time = None
self.dt = None
self.step = None | identifier_body |
Step.py | from pySDC import Level as levclass
from pySDC import Stats as statclass
from pySDC import Hooks as hookclass
import copy as cp
import sys
class step():
"""
Step class, referencing most of the structure needed for the time-stepping
This class bundles multiple levels and the corresponding transfer operators and is used by the methods
(e.g. SDC and MLSDC). Status variables like the current time are hidden via properties and setters methods.
Attributes:
__t: current time (property time)
__dt: current step size (property dt)
__k: current iteration (property iter)
__transfer_dict: data structure to couple levels and transfer operators
levels: list of levels
params: parameters given by the user
__slots__: list of attributes to avoid accidential creation of new class attributes
"""
__slots__ = ('params','levels','__transfer_dict','status','__prev')
def __init__(self, params):
"""
Initialization routine
Args:
params: parameters given by the user, will be added as attributes
"""
# short helper class to add params as attributes
class pars():
def __init__(self,params):
defaults = dict()
defaults['maxiter'] = 20
defaults['fine_comm'] = True
defaults['predict'] = True
for k,v in defaults.items():
setattr(self,k,v)
for k,v in params.items():
setattr(self,k,v)
pass
# short helper class to bundle all status variables
class status():
__slots__ = ('iter','stage','slot','first','last','pred_cnt','done','time','dt','step')
def __init__(self):
self.iter = None
self.stage = None
self.slot = None
self.first = None
self.last = None
self.pred_cnt = None
self.done = None
self.time = None
self.dt = None
self.step = None
# set params and status
self.params = pars(params)
self.status = status()
# empty attributes
self.__transfer_dict = {}
self.levels = []
self.__prev = None
def generate_hierarchy(self,descr):
"""
Routine to generate the level hierarchy for a single step
This makes the explicit generation of levels in the frontend obsolete and hides a few dirty hacks here and
there.
Args:
descr: dictionary containing the description of the levels as list per key
"""
# assert the existence of all the keys we need to set up at least on level
assert 'problem_class' in descr
assert 'problem_params' in descr
assert 'dtype_u' in descr
assert 'dtype_f' in descr
assert 'sweeper_class' in descr
assert 'level_params' in descr
# convert problem-dependent parameters consisting of dictionary of lists to a list of dictionaries with only a
# single entry per key, one dict per level
pparams_list = self.__dict_to_list(descr['problem_params'])
# put this newly generated list into the description dictionary (copy to avoid changing the original one)
descr_new = cp.deepcopy(descr)
descr_new['problem_params'] = pparams_list
# generate list of dictionaries out of the description
descr_list = self.__dict_to_list(descr_new)
# sanity check: is there a transfer class? is there one even if only a single level is specified?
if len(descr_list) > 1:
assert 'transfer_class' in descr_new
assert 'transfer_params' in descr_new
elif 'transfer_class' in descr_new:
print('WARNING: you have specified transfer classes, but only a single level...')
# generate levels, register and connect if needed
for l in range(len(descr_list)):
# check if we have a hook on this list. if not, use default class.
if 'hook_class' in descr_list[l]:
hook = descr_list[l]['hook_class']
else:
hook = hookclass.hooks
if 'sweeper_params' in descr_list[l]:
swparams = descr_list[l]['sweeper_params']
else:
swparams = {}
if not 'collocation_class' in swparams:
assert 'collocation_class' in descr_list[l]
swparams['collocation_class'] = descr_list[l]['collocation_class']
if not 'num_nodes' in swparams:
assert 'num_nodes' in descr_list[l]
swparams['num_nodes'] = descr_list[l]['num_nodes']
L = levclass.level(problem_class = descr_list[l]['problem_class'],
problem_params = descr_list[l]['problem_params'],
dtype_u = descr_list[l]['dtype_u'],
dtype_f = descr_list[l]['dtype_f'],
sweeper_class = descr_list[l]['sweeper_class'],
sweeper_params = swparams,
level_params = descr_list[l]['level_params'],
hook_class = hook,
id = 'L'+str(l))
self.register_level(L)
if l > 0:
self.connect_levels(transfer_class = descr_list[l]['transfer_class'],
transfer_params = descr_list[l]['transfer_params'],
fine_level = self.levels[l-1],
coarse_level = self.levels[l])
@staticmethod
def __dict_to_list(dict):
"""
Straightforward helper function to convert dictionary of list to list of dictionaries
Args:
dict: dictionary of lists
Returns:
list of dictionaries
"""
max_val = 1
for k,v in dict.items():
if type(v) is list:
if len(v) > 1 and (max_val > 1 and len(v) is not max_val):
#FIXME: get a real error here
sys.exit('All lists in cparams need to be of length 1 or %i.. key %s has this list: %s' %(max_val,k,v))
max_val = max(max_val,len(v))
ld = [{} for l in range(max_val)]
for d in range(len(ld)):
for k,v in dict.items():
if type(v) is not list:
ld[d][k] = v
else:
if len(v) == 1:
ld[d][k] = v[0]
else:
|
return ld
def register_level(self,L):
"""
Routine to register levels
This routine will append levels to the level list of the step instance and link the step to the newly
registered level (Level 0 will be considered as the finest level). It will also allocate the tau correction,
if this level is not the finest one.
Args:
L: level to be registered
"""
assert isinstance(L,levclass.level)
# add level to level list
self.levels.append(L)
# pass this step to the registered level
self.levels[-1]._level__set_step(self)
# if this is not the finest level, allocate tau correction
if len(self.levels) > 1:
L._level__add_tau()
def connect_levels(self, transfer_class, transfer_params, fine_level, coarse_level):
"""
Routine to couple levels with transfer operators
Args:
transfer_class: the class which can transfer between the two levels
transfer_params: parameters for the transfer class
fine_level: the fine level
coarse_level: the coarse level
"""
# create new instance of the specific transfer class
T = transfer_class(fine_level,coarse_level,transfer_params)
# use transfer dictionary twice to set restrict and prolong operator
self.__transfer_dict[tuple([fine_level,coarse_level])] = T.restrict
if T.params.finter:
self.__transfer_dict[tuple([coarse_level,fine_level])] = T.prolong_f
else:
self.__transfer_dict[tuple([coarse_level,fine_level])] = T.prolong
def transfer(self,source,target):
"""
Wrapper routine to ease the call of the transfer functions
This function can be called in the multilevel stepper (e.g. MLSDC), passing a source and a target level.
Using the transfer dictionary, the calling stepper does not need to specify whether to use restrict of prolong.
Args:
source: source level
target: target level
"""
self.__transfer_dict[tuple([source,target])]()
def reset_step(self):
"""
Routine so clean-up step structure and the corresp. levels for further uses
"""
# reset all levels
for l in self.levels:
l.reset_level()
def init_step(self,u0):
"""
Initialization routine for a new step.
This routine uses initial values u0 to set up the u[0] values at the finest level
Args:
u0: initial values
"""
assert len(self.levels) >=1
assert len(self.levels[0].u) >=1
# pass u0 to u[0] on the finest level 0
P = self.levels[0].prob
self.levels[0].u[0] = P.dtype_u(u0)
@property
def prev(self):
"""
Getter for previous step
Returns:
prev
"""
return self.__prev
@prev.setter
def prev(self,p):
"""
Setter for previous step
Args:
p: new previous step
"""
assert type(p) is type(self)
self.__prev = p | ld[d][k] = v[d] | conditional_block |
Step.py | from pySDC import Level as levclass
from pySDC import Stats as statclass
from pySDC import Hooks as hookclass
import copy as cp
import sys
class step():
"""
Step class, referencing most of the structure needed for the time-stepping
This class bundles multiple levels and the corresponding transfer operators and is used by the methods
(e.g. SDC and MLSDC). Status variables like the current time are hidden via properties and setters methods.
Attributes:
__t: current time (property time)
__dt: current step size (property dt)
__k: current iteration (property iter)
__transfer_dict: data structure to couple levels and transfer operators
levels: list of levels
params: parameters given by the user
__slots__: list of attributes to avoid accidential creation of new class attributes
"""
__slots__ = ('params','levels','__transfer_dict','status','__prev')
def __init__(self, params):
"""
Initialization routine
Args:
params: parameters given by the user, will be added as attributes
"""
# short helper class to add params as attributes
class pars():
def __init__(self,params):
defaults = dict()
defaults['maxiter'] = 20
defaults['fine_comm'] = True
defaults['predict'] = True
for k,v in defaults.items():
setattr(self,k,v)
for k,v in params.items():
setattr(self,k,v)
pass
# short helper class to bundle all status variables
class status():
__slots__ = ('iter','stage','slot','first','last','pred_cnt','done','time','dt','step')
def __init__(self):
self.iter = None
self.stage = None
self.slot = None
self.first = None
self.last = None
self.pred_cnt = None
self.done = None
self.time = None
self.dt = None
self.step = None
# set params and status
self.params = pars(params)
self.status = status()
# empty attributes
self.__transfer_dict = {}
self.levels = []
self.__prev = None
def generate_hierarchy(self,descr):
"""
Routine to generate the level hierarchy for a single step
This makes the explicit generation of levels in the frontend obsolete and hides a few dirty hacks here and
there.
Args:
descr: dictionary containing the description of the levels as list per key
"""
# assert the existence of all the keys we need to set up at least on level
assert 'problem_class' in descr
assert 'problem_params' in descr
assert 'dtype_u' in descr
assert 'dtype_f' in descr
assert 'sweeper_class' in descr
assert 'level_params' in descr
# convert problem-dependent parameters consisting of dictionary of lists to a list of dictionaries with only a
# single entry per key, one dict per level
pparams_list = self.__dict_to_list(descr['problem_params'])
# put this newly generated list into the description dictionary (copy to avoid changing the original one)
descr_new = cp.deepcopy(descr)
descr_new['problem_params'] = pparams_list
# generate list of dictionaries out of the description
descr_list = self.__dict_to_list(descr_new)
# sanity check: is there a transfer class? is there one even if only a single level is specified?
if len(descr_list) > 1:
assert 'transfer_class' in descr_new
assert 'transfer_params' in descr_new
elif 'transfer_class' in descr_new:
print('WARNING: you have specified transfer classes, but only a single level...')
# generate levels, register and connect if needed
for l in range(len(descr_list)):
# check if we have a hook on this list. if not, use default class.
if 'hook_class' in descr_list[l]:
hook = descr_list[l]['hook_class']
else:
hook = hookclass.hooks
if 'sweeper_params' in descr_list[l]:
swparams = descr_list[l]['sweeper_params']
else:
swparams = {}
if not 'collocation_class' in swparams:
assert 'collocation_class' in descr_list[l]
swparams['collocation_class'] = descr_list[l]['collocation_class']
if not 'num_nodes' in swparams:
assert 'num_nodes' in descr_list[l]
swparams['num_nodes'] = descr_list[l]['num_nodes']
L = levclass.level(problem_class = descr_list[l]['problem_class'],
problem_params = descr_list[l]['problem_params'],
dtype_u = descr_list[l]['dtype_u'],
dtype_f = descr_list[l]['dtype_f'],
sweeper_class = descr_list[l]['sweeper_class'],
sweeper_params = swparams,
level_params = descr_list[l]['level_params'],
hook_class = hook,
id = 'L'+str(l))
self.register_level(L)
if l > 0:
self.connect_levels(transfer_class = descr_list[l]['transfer_class'],
transfer_params = descr_list[l]['transfer_params'],
fine_level = self.levels[l-1],
coarse_level = self.levels[l])
@staticmethod
def __dict_to_list(dict):
"""
Straightforward helper function to convert dictionary of list to list of dictionaries
Args:
dict: dictionary of lists
Returns:
list of dictionaries
"""
max_val = 1
for k,v in dict.items():
if type(v) is list:
if len(v) > 1 and (max_val > 1 and len(v) is not max_val):
#FIXME: get a real error here
sys.exit('All lists in cparams need to be of length 1 or %i.. key %s has this list: %s' %(max_val,k,v))
max_val = max(max_val,len(v))
ld = [{} for l in range(max_val)]
for d in range(len(ld)):
for k,v in dict.items():
if type(v) is not list:
ld[d][k] = v
else:
if len(v) == 1:
ld[d][k] = v[0]
else:
ld[d][k] = v[d]
return ld
def register_level(self,L):
"""
Routine to register levels
This routine will append levels to the level list of the step instance and link the step to the newly
registered level (Level 0 will be considered as the finest level). It will also allocate the tau correction,
if this level is not the finest one.
Args:
L: level to be registered
"""
assert isinstance(L,levclass.level)
# add level to level list
self.levels.append(L)
# pass this step to the registered level
self.levels[-1]._level__set_step(self)
# if this is not the finest level, allocate tau correction
if len(self.levels) > 1:
L._level__add_tau()
def connect_levels(self, transfer_class, transfer_params, fine_level, coarse_level):
"""
Routine to couple levels with transfer operators
Args:
transfer_class: the class which can transfer between the two levels
transfer_params: parameters for the transfer class
fine_level: the fine level
coarse_level: the coarse level
"""
# create new instance of the specific transfer class
T = transfer_class(fine_level,coarse_level,transfer_params)
# use transfer dictionary twice to set restrict and prolong operator
self.__transfer_dict[tuple([fine_level,coarse_level])] = T.restrict
if T.params.finter:
self.__transfer_dict[tuple([coarse_level,fine_level])] = T.prolong_f
else:
self.__transfer_dict[tuple([coarse_level,fine_level])] = T.prolong
def transfer(self,source,target):
"""
Wrapper routine to ease the call of the transfer functions
This function can be called in the multilevel stepper (e.g. MLSDC), passing a source and a target level.
Using the transfer dictionary, the calling stepper does not need to specify whether to use restrict of prolong.
Args:
source: source level
target: target level
"""
self.__transfer_dict[tuple([source,target])]()
def reset_step(self):
"""
Routine so clean-up step structure and the corresp. levels for further uses
"""
# reset all levels
for l in self.levels:
l.reset_level()
def init_step(self,u0):
"""
Initialization routine for a new step.
This routine uses initial values u0 to set up the u[0] values at the finest level
Args:
u0: initial values
"""
assert len(self.levels) >=1
assert len(self.levels[0].u) >=1
# pass u0 to u[0] on the finest level 0
P = self.levels[0].prob
self.levels[0].u[0] = P.dtype_u(u0)
@property
def | (self):
"""
Getter for previous step
Returns:
prev
"""
return self.__prev
@prev.setter
def prev(self,p):
"""
Setter for previous step
Args:
p: new previous step
"""
assert type(p) is type(self)
self.__prev = p | prev | identifier_name |
cubicbez.rs | //! Cubic Bézier segments.
use libm; ////
use core::ops::{Mul, Range}; ////
////use std::ops::{Mul, Range};
use introsort; ////
use crate::MAX_EXTREMA;
use arrayvec::ArrayVec;
use crate::common::solve_quadratic;
use crate::common::GAUSS_LEGENDRE_COEFFS_9;
use crate::{
Affine, ParamCurve, ParamCurveArclen, ParamCurveArea, ParamCurveCurvature, ParamCurveDeriv,
ParamCurveExtrema, ParamCurveNearest, Point, QuadBez,
};
/// A single cubic Bézier segment.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct CubicBez {
pub p0: Point,
pub p1: Point,
pub p2: Point,
pub p3: Point,
}
/// An iterator which produces quadratic Bézier segments.
struct ToQuads {
c: CubicBez,
max_hypot2: f64,
t: f64,
}
impl CubicBez {
/// Create a new cubic Bézier segment.
#[inline]
pub fn new<P: Into<Point>>(p0: P, p1: P, p2: P, p3: P) -> CubicBez {
CubicBez {
p0: p0.into(),
p1: p1.into(),
p2: p2.into(),
p3: p3.into(),
}
}
/// Convert to quadratic Béziers.
///
/// The iterator returns the start and end parameter in the cubic of each quadratic
/// segment, along with the quadratic.
///
/// Note that the resulting quadratic Béziers are not in general G1 continuous;
/// they are optimized for minimizing distance error.
#[inline]
pub fn to_quads(&self, accuracy: f64) -> impl Iterator<Item = (f64, f64, QuadBez)> {
// This magic number is the square of 36 / sqrt(3).
// See: http://caffeineowl.com/graphics/2d/vectorial/cubic2quad01.html
let max_hypot2 = 432.0 * accuracy * accuracy;
ToQuads {
c: *self,
max_hypot2,
t: 0.0,
}
}
}
impl ParamCurve for CubicBez {
#[inline]
fn eval(&self, t: f64) -> Point {
let mt = 1.0 - t;
let v = self.p0.to_vec2() * (mt * mt * mt)
+ (self.p1.to_vec2() * (mt * mt * 3.0)
+ (self.p2.to_vec2() * (mt * 3.0) + self.p3.to_vec2() * t) * t)
* t;
v.to_point()
}
#[inline]
fn start(&self) -> Point {
self.p0
}
#[inline]
fn end(&self) -> Point {
self.p3
}
fn subsegment(&self, range: Range<f64>) -> CubicBez {
let (t0, t1) = (range.start, range.end);
let p0 = self.eval(t0);
let p3 = self.eval(t1);
let d = self.deriv();
let scale = (t1 - t0) * (1.0 / 3.0);
let p1 = p0 + scale * d.eval(t0).to_vec2();
let p2 = p3 - scale * d.eval(t1).to_vec2();
CubicBez { p0, p1, p2, p3 }
}
/// Subdivide into halves, using de Casteljau.
#[inline]
fn subdivide(&self) -> (CubicBez, CubicBez) {
let pm = self.eval(0.5);
(
CubicBez::new(
self.p0,
self.p0.midpoint(self.p1),
((self.p0.to_vec2() + self.p1.to_vec2() * 2.0 + self.p2.to_vec2()) * 0.25)
.to_point(),
pm,
),
CubicBez::new(
pm,
((self.p1.to_vec2() + self.p2.to_vec2() * 2.0 + self.p3.to_vec2()) * 0.25)
.to_point(),
self.p2.midpoint(self.p3),
self.p3,
),
)
}
}
impl ParamCurveDeriv for CubicBez {
type DerivResult = QuadBez;
#[inline]
fn deriv(&self) -> QuadBez {
QuadBez::new(
(3.0 * (self.p1 - self.p0)).to_point(),
(3.0 * (self.p2 - self.p1)).to_point(),
(3.0 * (self.p3 - self.p2)).to_point(),
)
}
}
impl ParamCurveArclen for CubicBez {
/// Arclength of a cubic Bézier segment.
///
/// This is an adaptive subdivision approach using Legendre-Gauss quadrature
/// in the base case, and an error estimate to decide when to subdivide.
fn arclen(&self, accuracy: f64) -> f64 {
// Squared L2 norm of the second derivative of the cubic.
fn cubic_errnorm(c: &CubicBez) -> f64 {
let d = c.deriv().deriv();
let dd = d.end() - d.start();
d.start().to_vec2().hypot2() + d.start().to_vec2().dot(dd) + dd.hypot2() * (1.0 / 3.0)
}
fn est_gauss9_error(c: &CubicBez) -> f64 {
let lc = (c.p3 - c.p0).hypot();
let lp = (c.p1 - c.p0).hypot() + (c.p2 - c.p1).hypot() + (c.p3 - c.p2).hypot();
2.56e-8 * libm::pow(cubic_errnorm(c) / (lc * lc), 8 as f64) * lp ////
////2.56e-8 * (cubic_errnorm(c) / (lc * lc)).powi(8) * lp
}
const MAX_DEPTH: usize = 16;
fn rec(c: &CubicBez, accuracy: f64, depth: usize) -> f64 {
if depth == MAX_DEPTH || est_gauss9_error(c) < accuracy {
c.gauss_arclen(GAUSS_LEGENDRE_COEFFS_9)
} else {
let (c0, c1) = c.subdivide();
rec(&c0, accuracy * 0.5, depth + 1) + rec(&c1, accuracy * 0.5, depth + 1)
}
}
rec(self, accuracy, 0)
}
}
impl ParamCurveArea for CubicBez {
#[inline]
fn signed_area(&self) -> f64 {
(self.p0.x * (6.0 * self.p1.y + 3.0 * self.p2.y + self.p3.y)
+ 3.0
* (self.p1.x * (-2.0 * self.p0.y + self.p2.y + self.p3.y)
- self.p2.x * (self.p0.y + self.p1.y - 2.0 * self.p3.y))
- self.p3.x * (self.p0.y + 3.0 * self.p1.y + 6.0 * self.p2.y))
* (1.0 / 20.0)
}
}
impl ParamCurveNearest for CubicBez {
/// Find nearest point, using subdivision.
fn nearest(&self, p: Point, accuracy: f64) -> (f64, f64) {
let mut best_r = None;
let mut best_t = 0.0;
for (t0, t1, q) in self.to_quads(accuracy) {
let (t, r) = q.nearest(p, accuracy);
if best_r.map(|best_r| r < best_r).unwrap_or(true) {
best_t = t0 + t * (t1 - t0);
best_r = Some(r);
}
}
(best_t, best_r.unwrap())
}
}
impl ParamCurveCurvature for CubicBez {}
impl ParamCurveExtrema for CubicBez {
fn extrema(&self) -> ArrayVec<[f64; MAX_EXTREMA]> {
fn one_coord(result: &mut ArrayVec<[f64; MAX_EXTREMA]>, d0: f64, d1: f64, d2: f64) {
let a = d0 - 2.0 * d1 + d2;
let b = 2.0 * (d1 - d0);
let c = d0;
let roots = solve_quadratic(c, b, a);
for &t in &roots {
if t > 0.0 && t < 1.0 {
result.push(t);
}
}
}
let mut result = ArrayVec::<[f64; MAX_EXTREMA]>::new(); ////
////let mut result = ArrayVec::new();
let d0 = self.p1 - self.p0;
let d1 = self.p2 - self.p1;
let d2 = self.p3 - self.p2;
one_coord(&mut result, d0.x, d1.x, d2.x);
one_coord(&mut result, d0.y, d1.y, d2.y);
introsort::sort_by(&mut result, &|a, b| a.partial_cmp(b).unwrap()); ////
////result.sort_by(|a, b| a.partial_cmp(b).unwrap());
result
}
}
impl Mul<CubicBez> for Affine {
type Output = CubicBez;
#[inline]
fn mul(self, c: CubicBez) -> CubicBez {
CubicBez {
p0: self * c.p0,
p1: self * c.p1,
p2: self * c.p2,
p3: self * c.p3,
}
}
}
impl Iterator for ToQuads {
type Item = (f64, f64, QuadBez);
fn next(&mut self) -> Option<(f64, f64, QuadBez)> {
let t0 = self.t;
let mut t1 = 1.0;
if t0 == t1 {
return None;
}
loop {
let seg = self.c.subsegment(t0..t1);
// Compute error for candidate quadratic.
let p1x2 = 3.0 * seg.p1.to_vec2() - seg.p0.to_vec2();
let p2x2 = 3.0 * seg.p2.to_vec2() - seg.p3.to_vec2();
let err = (p2x2 - p1x2).hypot2();
//println!("{:?} {} {}", t0..t1, err, if err < self.max_hypot2 { "ok" } else { "" });
if err < self.max_hypot2 {
let result = QuadBez::new(seg.p0, ((p1x2 + p2x2) / 4.0).to_point(), seg.p3);
self.t = t1;
return Some((t0, t1, result));
} else {
let shrink = if t1 == 1.0 && err < 64.0 * self.max_hypot2 {
0.5
} else {
0.999_999 * libm::pow(self.max_hypot2 / err, 1. / 6.0) ////
////0.999_999 * (self.max_hypot2 / err).powf(1. / 6.0)
};
t1 = t0 + shrink * (t1 - t0);
}
}
}
}
#[cfg(test)]
mod tests {
use crate::{
Affine, CubicBez, ParamCurve, ParamCurveArclen, ParamCurveArea, ParamCurveDeriv,
ParamCurveExtrema, ParamCurveNearest, Point,
};
#[test]
fn cubicbez_deriv() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let deriv = c.deriv();
let n = 10;
for i in 0..=n {
let t = (i as f64) * (n as f64).recip();
let delta = 1e-6;
let p = c.eval(t);
let p1 = c.eval(t + delta);
let d_approx = (p1 - p) * delta.recip();
let d = deriv.eval(t).to_vec2();
assert!((d - d_approx).hypot() < delta * 2.0);
}
}
#[test]
fn cubicbe | // y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let true_arclen = 0.5 * libm::sqrt(5.0f64) + 0.25 * (2.0 + libm::sqrt(5.0f64)).ln();
for i in 0..12 {
let accuracy = 0.1f64.powi(i);
let error = c.arclen(accuracy) - true_arclen;
//println!("{:e}: {:e}", accuracy, error);
assert!(error.abs() < accuracy);
}
}
#[test]
fn cubicbez_inv_arclen() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let true_arclen = 0.5 * libm::sqrt(5.0f64) + 0.25 * (2.0 + libm::sqrt(5.0f64)).ln();
for i in 0..12 {
let accuracy = 0.1f64.powi(i);
let n = 10;
for j in 0..=n {
let arc = (j as f64) * ((n as f64).recip() * true_arclen);
let t = c.inv_arclen(arc, accuracy * 0.5);
let actual_arc = c.subsegment(0.0..t).arclen(accuracy * 0.5);
assert!(
(arc - actual_arc).abs() < accuracy,
"at accuracy {:e}, wanted {} got {}",
accuracy,
actual_arc,
arc
);
}
}
}
#[test]
fn cubicbez_signed_area_linear() {
// y = 1 - x
let c = CubicBez::new(
(1.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0 / 3.0, 2.0 / 3.0),
(0.0, 1.0),
);
let epsilon = 1e-12;
assert_eq!((Affine::rotate(0.5) * c).signed_area(), 0.5);
assert!(((Affine::rotate(0.5) * c).signed_area() - 0.5).abs() < epsilon);
assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.0).abs() < epsilon);
assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.0).abs() < epsilon);
}
#[test]
fn cubicbez_signed_area() {
// y = 1 - x^3
let c = CubicBez::new((1.0, 0.0), (2.0 / 3.0, 1.0), (1.0 / 3.0, 1.0), (0.0, 1.0));
let epsilon = 1e-12;
assert!((c.signed_area() - 0.75).abs() < epsilon);
assert!(((Affine::rotate(0.5) * c).signed_area() - 0.75).abs() < epsilon);
assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.25).abs() < epsilon);
assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.25).abs() < epsilon);
}
#[test]
fn cubicbez_nearest() {
fn verify(result: (f64, f64), expected: f64) {
assert!(
(result.0 - expected).abs() < 1e-6,
"got {:?} expected {}",
result,
expected
);
}
// y = x^3
let c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
verify(c.nearest((0.1, 0.001).into(), 1e-6), 0.1);
verify(c.nearest((0.2, 0.008).into(), 1e-6), 0.2);
verify(c.nearest((0.3, 0.027).into(), 1e-6), 0.3);
verify(c.nearest((0.4, 0.064).into(), 1e-6), 0.4);
verify(c.nearest((0.5, 0.125).into(), 1e-6), 0.5);
verify(c.nearest((0.6, 0.216).into(), 1e-6), 0.6);
verify(c.nearest((0.7, 0.343).into(), 1e-6), 0.7);
verify(c.nearest((0.8, 0.512).into(), 1e-6), 0.8);
verify(c.nearest((0.9, 0.729).into(), 1e-6), 0.9);
verify(c.nearest((1.0, 1.0).into(), 1e-6), 1.0);
verify(c.nearest((1.1, 1.1).into(), 1e-6), 1.0);
verify(c.nearest((-0.1, 0.0).into(), 1e-6), 0.0);
let a = Affine::rotate(0.5);
verify((a * c).nearest(a * Point::new(0.1, 0.001), 1e-6), 0.1);
}
#[test]
fn cubicbez_extrema() {
// y = x^2
let q = CubicBez::new((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0));
let extrema = q.extrema();
assert_eq!(extrema.len(), 1);
assert!((extrema[0] - 0.5).abs() < 1e-6);
let q = CubicBez::new((0.4, 0.5), (0.0, 1.0), (1.0, 0.0), (0.5, 0.4));
let extrema = q.extrema();
assert_eq!(extrema.len(), 4);
}
#[test]
fn cubicbez_toquads() {
// y = x^3
let c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
for i in 0..10 {
let accuracy = 0.1f64.powi(i);
let mut _count = 0;
let mut worst: f64 = 0.0;
for (t0, t1, q) in c.to_quads(accuracy) {
_count += 1;
let epsilon = 1e-12;
assert!((q.start() - c.eval(t0)).hypot() < epsilon);
assert!((q.end() - c.eval(t1)).hypot() < epsilon);
let n = 4;
for j in 0..=n {
let t = (j as f64) * (n as f64).recip();
let p = q.eval(t);
let err = (p.y - p.x.powi(3)).abs();
worst = worst.max(err);
assert!(err < accuracy, "got {} wanted {}", err, accuracy);
}
}
//println!("accuracy {:e}: got {:e}, {} quads", accuracy, worst, _count);
}
}
}
| z_arclen() {
| identifier_name |
cubicbez.rs | //! Cubic Bézier segments.
use libm; ////
use core::ops::{Mul, Range}; ////
////use std::ops::{Mul, Range};
use introsort; ////
use crate::MAX_EXTREMA;
use arrayvec::ArrayVec;
use crate::common::solve_quadratic;
use crate::common::GAUSS_LEGENDRE_COEFFS_9;
use crate::{
Affine, ParamCurve, ParamCurveArclen, ParamCurveArea, ParamCurveCurvature, ParamCurveDeriv,
ParamCurveExtrema, ParamCurveNearest, Point, QuadBez,
};
/// A single cubic Bézier segment.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct CubicBez {
pub p0: Point,
pub p1: Point,
pub p2: Point,
pub p3: Point,
}
/// An iterator which produces quadratic Bézier segments.
struct ToQuads {
c: CubicBez,
max_hypot2: f64,
t: f64,
}
impl CubicBez {
/// Create a new cubic Bézier segment.
#[inline]
pub fn new<P: Into<Point>>(p0: P, p1: P, p2: P, p3: P) -> CubicBez {
CubicBez {
p0: p0.into(),
p1: p1.into(),
p2: p2.into(),
p3: p3.into(),
}
}
/// Convert to quadratic Béziers.
///
/// The iterator returns the start and end parameter in the cubic of each quadratic
/// segment, along with the quadratic.
///
/// Note that the resulting quadratic Béziers are not in general G1 continuous;
/// they are optimized for minimizing distance error.
#[inline]
pub fn to_quads(&self, accuracy: f64) -> impl Iterator<Item = (f64, f64, QuadBez)> {
// This magic number is the square of 36 / sqrt(3).
// See: http://caffeineowl.com/graphics/2d/vectorial/cubic2quad01.html
let max_hypot2 = 432.0 * accuracy * accuracy;
ToQuads {
c: *self,
max_hypot2,
t: 0.0,
}
}
}
impl ParamCurve for CubicBez {
#[inline]
fn eval(&self, t: f64) -> Point {
let mt = 1.0 - t;
let v = self.p0.to_vec2() * (mt * mt * mt)
+ (self.p1.to_vec2() * (mt * mt * 3.0)
+ (self.p2.to_vec2() * (mt * 3.0) + self.p3.to_vec2() * t) * t)
* t;
v.to_point()
}
#[inline]
fn start(&self) -> Point {
self.p0
}
#[inline]
fn end(&self) -> Point {
self.p3
} | let p0 = self.eval(t0);
let p3 = self.eval(t1);
let d = self.deriv();
let scale = (t1 - t0) * (1.0 / 3.0);
let p1 = p0 + scale * d.eval(t0).to_vec2();
let p2 = p3 - scale * d.eval(t1).to_vec2();
CubicBez { p0, p1, p2, p3 }
}
/// Subdivide into halves, using de Casteljau.
#[inline]
fn subdivide(&self) -> (CubicBez, CubicBez) {
let pm = self.eval(0.5);
(
CubicBez::new(
self.p0,
self.p0.midpoint(self.p1),
((self.p0.to_vec2() + self.p1.to_vec2() * 2.0 + self.p2.to_vec2()) * 0.25)
.to_point(),
pm,
),
CubicBez::new(
pm,
((self.p1.to_vec2() + self.p2.to_vec2() * 2.0 + self.p3.to_vec2()) * 0.25)
.to_point(),
self.p2.midpoint(self.p3),
self.p3,
),
)
}
}
impl ParamCurveDeriv for CubicBez {
type DerivResult = QuadBez;
#[inline]
fn deriv(&self) -> QuadBez {
QuadBez::new(
(3.0 * (self.p1 - self.p0)).to_point(),
(3.0 * (self.p2 - self.p1)).to_point(),
(3.0 * (self.p3 - self.p2)).to_point(),
)
}
}
impl ParamCurveArclen for CubicBez {
/// Arclength of a cubic Bézier segment.
///
/// This is an adaptive subdivision approach using Legendre-Gauss quadrature
/// in the base case, and an error estimate to decide when to subdivide.
fn arclen(&self, accuracy: f64) -> f64 {
// Squared L2 norm of the second derivative of the cubic.
fn cubic_errnorm(c: &CubicBez) -> f64 {
let d = c.deriv().deriv();
let dd = d.end() - d.start();
d.start().to_vec2().hypot2() + d.start().to_vec2().dot(dd) + dd.hypot2() * (1.0 / 3.0)
}
fn est_gauss9_error(c: &CubicBez) -> f64 {
let lc = (c.p3 - c.p0).hypot();
let lp = (c.p1 - c.p0).hypot() + (c.p2 - c.p1).hypot() + (c.p3 - c.p2).hypot();
2.56e-8 * libm::pow(cubic_errnorm(c) / (lc * lc), 8 as f64) * lp ////
////2.56e-8 * (cubic_errnorm(c) / (lc * lc)).powi(8) * lp
}
const MAX_DEPTH: usize = 16;
fn rec(c: &CubicBez, accuracy: f64, depth: usize) -> f64 {
if depth == MAX_DEPTH || est_gauss9_error(c) < accuracy {
c.gauss_arclen(GAUSS_LEGENDRE_COEFFS_9)
} else {
let (c0, c1) = c.subdivide();
rec(&c0, accuracy * 0.5, depth + 1) + rec(&c1, accuracy * 0.5, depth + 1)
}
}
rec(self, accuracy, 0)
}
}
impl ParamCurveArea for CubicBez {
#[inline]
fn signed_area(&self) -> f64 {
(self.p0.x * (6.0 * self.p1.y + 3.0 * self.p2.y + self.p3.y)
+ 3.0
* (self.p1.x * (-2.0 * self.p0.y + self.p2.y + self.p3.y)
- self.p2.x * (self.p0.y + self.p1.y - 2.0 * self.p3.y))
- self.p3.x * (self.p0.y + 3.0 * self.p1.y + 6.0 * self.p2.y))
* (1.0 / 20.0)
}
}
impl ParamCurveNearest for CubicBez {
/// Find nearest point, using subdivision.
fn nearest(&self, p: Point, accuracy: f64) -> (f64, f64) {
let mut best_r = None;
let mut best_t = 0.0;
for (t0, t1, q) in self.to_quads(accuracy) {
let (t, r) = q.nearest(p, accuracy);
if best_r.map(|best_r| r < best_r).unwrap_or(true) {
best_t = t0 + t * (t1 - t0);
best_r = Some(r);
}
}
(best_t, best_r.unwrap())
}
}
impl ParamCurveCurvature for CubicBez {}
impl ParamCurveExtrema for CubicBez {
fn extrema(&self) -> ArrayVec<[f64; MAX_EXTREMA]> {
fn one_coord(result: &mut ArrayVec<[f64; MAX_EXTREMA]>, d0: f64, d1: f64, d2: f64) {
let a = d0 - 2.0 * d1 + d2;
let b = 2.0 * (d1 - d0);
let c = d0;
let roots = solve_quadratic(c, b, a);
for &t in &roots {
if t > 0.0 && t < 1.0 {
result.push(t);
}
}
}
let mut result = ArrayVec::<[f64; MAX_EXTREMA]>::new(); ////
////let mut result = ArrayVec::new();
let d0 = self.p1 - self.p0;
let d1 = self.p2 - self.p1;
let d2 = self.p3 - self.p2;
one_coord(&mut result, d0.x, d1.x, d2.x);
one_coord(&mut result, d0.y, d1.y, d2.y);
introsort::sort_by(&mut result, &|a, b| a.partial_cmp(b).unwrap()); ////
////result.sort_by(|a, b| a.partial_cmp(b).unwrap());
result
}
}
impl Mul<CubicBez> for Affine {
type Output = CubicBez;
#[inline]
fn mul(self, c: CubicBez) -> CubicBez {
CubicBez {
p0: self * c.p0,
p1: self * c.p1,
p2: self * c.p2,
p3: self * c.p3,
}
}
}
impl Iterator for ToQuads {
type Item = (f64, f64, QuadBez);
fn next(&mut self) -> Option<(f64, f64, QuadBez)> {
let t0 = self.t;
let mut t1 = 1.0;
if t0 == t1 {
return None;
}
loop {
let seg = self.c.subsegment(t0..t1);
// Compute error for candidate quadratic.
let p1x2 = 3.0 * seg.p1.to_vec2() - seg.p0.to_vec2();
let p2x2 = 3.0 * seg.p2.to_vec2() - seg.p3.to_vec2();
let err = (p2x2 - p1x2).hypot2();
//println!("{:?} {} {}", t0..t1, err, if err < self.max_hypot2 { "ok" } else { "" });
if err < self.max_hypot2 {
let result = QuadBez::new(seg.p0, ((p1x2 + p2x2) / 4.0).to_point(), seg.p3);
self.t = t1;
return Some((t0, t1, result));
} else {
let shrink = if t1 == 1.0 && err < 64.0 * self.max_hypot2 {
0.5
} else {
0.999_999 * libm::pow(self.max_hypot2 / err, 1. / 6.0) ////
////0.999_999 * (self.max_hypot2 / err).powf(1. / 6.0)
};
t1 = t0 + shrink * (t1 - t0);
}
}
}
}
#[cfg(test)]
mod tests {
use crate::{
Affine, CubicBez, ParamCurve, ParamCurveArclen, ParamCurveArea, ParamCurveDeriv,
ParamCurveExtrema, ParamCurveNearest, Point,
};
#[test]
fn cubicbez_deriv() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let deriv = c.deriv();
let n = 10;
for i in 0..=n {
let t = (i as f64) * (n as f64).recip();
let delta = 1e-6;
let p = c.eval(t);
let p1 = c.eval(t + delta);
let d_approx = (p1 - p) * delta.recip();
let d = deriv.eval(t).to_vec2();
assert!((d - d_approx).hypot() < delta * 2.0);
}
}
#[test]
fn cubicbez_arclen() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let true_arclen = 0.5 * libm::sqrt(5.0f64) + 0.25 * (2.0 + libm::sqrt(5.0f64)).ln();
for i in 0..12 {
let accuracy = 0.1f64.powi(i);
let error = c.arclen(accuracy) - true_arclen;
//println!("{:e}: {:e}", accuracy, error);
assert!(error.abs() < accuracy);
}
}
#[test]
fn cubicbez_inv_arclen() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let true_arclen = 0.5 * libm::sqrt(5.0f64) + 0.25 * (2.0 + libm::sqrt(5.0f64)).ln();
for i in 0..12 {
let accuracy = 0.1f64.powi(i);
let n = 10;
for j in 0..=n {
let arc = (j as f64) * ((n as f64).recip() * true_arclen);
let t = c.inv_arclen(arc, accuracy * 0.5);
let actual_arc = c.subsegment(0.0..t).arclen(accuracy * 0.5);
assert!(
(arc - actual_arc).abs() < accuracy,
"at accuracy {:e}, wanted {} got {}",
accuracy,
actual_arc,
arc
);
}
}
}
#[test]
fn cubicbez_signed_area_linear() {
// y = 1 - x
let c = CubicBez::new(
(1.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0 / 3.0, 2.0 / 3.0),
(0.0, 1.0),
);
let epsilon = 1e-12;
assert_eq!((Affine::rotate(0.5) * c).signed_area(), 0.5);
assert!(((Affine::rotate(0.5) * c).signed_area() - 0.5).abs() < epsilon);
assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.0).abs() < epsilon);
assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.0).abs() < epsilon);
}
#[test]
fn cubicbez_signed_area() {
// y = 1 - x^3
let c = CubicBez::new((1.0, 0.0), (2.0 / 3.0, 1.0), (1.0 / 3.0, 1.0), (0.0, 1.0));
let epsilon = 1e-12;
assert!((c.signed_area() - 0.75).abs() < epsilon);
assert!(((Affine::rotate(0.5) * c).signed_area() - 0.75).abs() < epsilon);
assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.25).abs() < epsilon);
assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.25).abs() < epsilon);
}
#[test]
fn cubicbez_nearest() {
fn verify(result: (f64, f64), expected: f64) {
assert!(
(result.0 - expected).abs() < 1e-6,
"got {:?} expected {}",
result,
expected
);
}
// y = x^3
let c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
verify(c.nearest((0.1, 0.001).into(), 1e-6), 0.1);
verify(c.nearest((0.2, 0.008).into(), 1e-6), 0.2);
verify(c.nearest((0.3, 0.027).into(), 1e-6), 0.3);
verify(c.nearest((0.4, 0.064).into(), 1e-6), 0.4);
verify(c.nearest((0.5, 0.125).into(), 1e-6), 0.5);
verify(c.nearest((0.6, 0.216).into(), 1e-6), 0.6);
verify(c.nearest((0.7, 0.343).into(), 1e-6), 0.7);
verify(c.nearest((0.8, 0.512).into(), 1e-6), 0.8);
verify(c.nearest((0.9, 0.729).into(), 1e-6), 0.9);
verify(c.nearest((1.0, 1.0).into(), 1e-6), 1.0);
verify(c.nearest((1.1, 1.1).into(), 1e-6), 1.0);
verify(c.nearest((-0.1, 0.0).into(), 1e-6), 0.0);
let a = Affine::rotate(0.5);
verify((a * c).nearest(a * Point::new(0.1, 0.001), 1e-6), 0.1);
}
#[test]
fn cubicbez_extrema() {
// y = x^2
let q = CubicBez::new((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0));
let extrema = q.extrema();
assert_eq!(extrema.len(), 1);
assert!((extrema[0] - 0.5).abs() < 1e-6);
let q = CubicBez::new((0.4, 0.5), (0.0, 1.0), (1.0, 0.0), (0.5, 0.4));
let extrema = q.extrema();
assert_eq!(extrema.len(), 4);
}
#[test]
fn cubicbez_toquads() {
// y = x^3
let c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
for i in 0..10 {
let accuracy = 0.1f64.powi(i);
let mut _count = 0;
let mut worst: f64 = 0.0;
for (t0, t1, q) in c.to_quads(accuracy) {
_count += 1;
let epsilon = 1e-12;
assert!((q.start() - c.eval(t0)).hypot() < epsilon);
assert!((q.end() - c.eval(t1)).hypot() < epsilon);
let n = 4;
for j in 0..=n {
let t = (j as f64) * (n as f64).recip();
let p = q.eval(t);
let err = (p.y - p.x.powi(3)).abs();
worst = worst.max(err);
assert!(err < accuracy, "got {} wanted {}", err, accuracy);
}
}
//println!("accuracy {:e}: got {:e}, {} quads", accuracy, worst, _count);
}
}
} |
fn subsegment(&self, range: Range<f64>) -> CubicBez {
let (t0, t1) = (range.start, range.end); | random_line_split |
cubicbez.rs | //! Cubic Bézier segments.
use libm; ////
use core::ops::{Mul, Range}; ////
////use std::ops::{Mul, Range};
use introsort; ////
use crate::MAX_EXTREMA;
use arrayvec::ArrayVec;
use crate::common::solve_quadratic;
use crate::common::GAUSS_LEGENDRE_COEFFS_9;
use crate::{
Affine, ParamCurve, ParamCurveArclen, ParamCurveArea, ParamCurveCurvature, ParamCurveDeriv,
ParamCurveExtrema, ParamCurveNearest, Point, QuadBez,
};
/// A single cubic Bézier segment.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct CubicBez {
pub p0: Point,
pub p1: Point,
pub p2: Point,
pub p3: Point,
}
/// An iterator which produces quadratic Bézier segments.
struct ToQuads {
c: CubicBez,
max_hypot2: f64,
t: f64,
}
impl CubicBez {
/// Create a new cubic Bézier segment.
#[inline]
pub fn new<P: Into<Point>>(p0: P, p1: P, p2: P, p3: P) -> CubicBez {
CubicBez {
p0: p0.into(),
p1: p1.into(),
p2: p2.into(),
p3: p3.into(),
}
}
/// Convert to quadratic Béziers.
///
/// The iterator returns the start and end parameter in the cubic of each quadratic
/// segment, along with the quadratic.
///
/// Note that the resulting quadratic Béziers are not in general G1 continuous;
/// they are optimized for minimizing distance error.
#[inline]
pub fn to_quads(&self, accuracy: f64) -> impl Iterator<Item = (f64, f64, QuadBez)> {
// This magic number is the square of 36 / sqrt(3).
// See: http://caffeineowl.com/graphics/2d/vectorial/cubic2quad01.html
let max_hypot2 = 432.0 * accuracy * accuracy;
ToQuads {
c: *self,
max_hypot2,
t: 0.0,
}
}
}
impl ParamCurve for CubicBez {
#[inline]
fn eval(&self, t: f64) -> Point {
let mt = 1.0 - t;
let v = self.p0.to_vec2() * (mt * mt * mt)
+ (self.p1.to_vec2() * (mt * mt * 3.0)
+ (self.p2.to_vec2() * (mt * 3.0) + self.p3.to_vec2() * t) * t)
* t;
v.to_point()
}
#[inline]
fn start(&self) -> Point {
self.p0
}
#[inline]
fn end(&self) -> Point {
self.p3
}
fn subsegment(&self, range: Range<f64>) -> CubicBez {
let (t0, t1) = (range.start, range.end);
let p0 = self.eval(t0);
let p3 = self.eval(t1);
let d = self.deriv();
let scale = (t1 - t0) * (1.0 / 3.0);
let p1 = p0 + scale * d.eval(t0).to_vec2();
let p2 = p3 - scale * d.eval(t1).to_vec2();
CubicBez { p0, p1, p2, p3 }
}
/// Subdivide into halves, using de Casteljau.
#[inline]
fn subdivide(&self) -> (CubicBez, CubicBez) {
let pm = self.eval(0.5);
(
CubicBez::new(
self.p0,
self.p0.midpoint(self.p1),
((self.p0.to_vec2() + self.p1.to_vec2() * 2.0 + self.p2.to_vec2()) * 0.25)
.to_point(),
pm,
),
CubicBez::new(
pm,
((self.p1.to_vec2() + self.p2.to_vec2() * 2.0 + self.p3.to_vec2()) * 0.25)
.to_point(),
self.p2.midpoint(self.p3),
self.p3,
),
)
}
}
impl ParamCurveDeriv for CubicBez {
type DerivResult = QuadBez;
#[inline]
fn deriv(&self) -> QuadBez {
QuadBez::new(
(3.0 * (self.p1 - self.p0)).to_point(),
(3.0 * (self.p2 - self.p1)).to_point(),
(3.0 * (self.p3 - self.p2)).to_point(),
)
}
}
impl ParamCurveArclen for CubicBez {
/// Arclength of a cubic Bézier segment.
///
/// This is an adaptive subdivision approach using Legendre-Gauss quadrature
/// in the base case, and an error estimate to decide when to subdivide.
fn arclen(&self, accuracy: f64) -> f64 {
// Squared L2 norm of the second derivative of the cubic.
fn cubic_errnorm(c: &CubicBez) -> f64 {
let d = c.deriv().deriv();
let dd = d.end() - d.start();
d.start().to_vec2().hypot2() + d.start().to_vec2().dot(dd) + dd.hypot2() * (1.0 / 3.0)
}
fn est_gauss9_error(c: &CubicBez) -> f64 {
let lc = (c.p3 - c.p0).hypot();
let lp = (c.p1 - c.p0).hypot() + (c.p2 - c.p1).hypot() + (c.p3 - c.p2).hypot();
2.56e-8 * libm::pow(cubic_errnorm(c) / (lc * lc), 8 as f64) * lp ////
////2.56e-8 * (cubic_errnorm(c) / (lc * lc)).powi(8) * lp
}
const MAX_DEPTH: usize = 16;
fn rec(c: &CubicBez, accuracy: f64, depth: usize) -> f64 {
if depth == MAX_DEPTH || est_gauss9_error(c) < accuracy {
c.gauss_arclen(GAUSS_LEGENDRE_COEFFS_9)
} else {
let (c0, c1) = c.subdivide();
rec(&c0, accuracy * 0.5, depth + 1) + rec(&c1, accuracy * 0.5, depth + 1)
}
}
rec(self, accuracy, 0)
}
}
impl ParamCurveArea for CubicBez {
#[inline]
fn signed_area(&self) -> f64 {
(self.p0.x * (6.0 * self.p1.y + 3.0 * self.p2.y + self.p3.y)
+ 3.0
* (self.p1.x * (-2.0 * self.p0.y + self.p2.y + self.p3.y)
- self.p2.x * (self.p0.y + self.p1.y - 2.0 * self.p3.y))
- self.p3.x * (self.p0.y + 3.0 * self.p1.y + 6.0 * self.p2.y))
* (1.0 / 20.0)
}
}
impl ParamCurveNearest for CubicBez {
/// Find nearest point, using subdivision.
fn nearest(&self, p: Point, accuracy: f64) -> (f64, f64) {
let mut best_r = None;
let mut best_t = 0.0;
for (t0, t1, q) in self.to_quads(accuracy) {
let (t, r) = q.nearest(p, accuracy);
if best_r.map(|best_r| r < best_r).unwrap_or(true) {
best_t = t0 + t * (t1 - t0);
best_r = Some(r);
}
}
(best_t, best_r.unwrap())
}
}
impl ParamCurveCurvature for CubicBez {}
impl ParamCurveExtrema for CubicBez {
fn extrema(&self) -> ArrayVec<[f64; MAX_EXTREMA]> {
fn one_coord(result: &mut ArrayVec<[f64; MAX_EXTREMA]>, d0: f64, d1: f64, d2: f64) {
let a = d0 - 2.0 * d1 + d2;
let b = 2.0 * (d1 - d0);
let c = d0;
let roots = solve_quadratic(c, b, a);
for &t in &roots {
if t > 0.0 && t < 1.0 {
result.push(t);
}
}
}
let mut result = ArrayVec::<[f64; MAX_EXTREMA]>::new(); ////
////let mut result = ArrayVec::new();
let d0 = self.p1 - self.p0;
let d1 = self.p2 - self.p1;
let d2 = self.p3 - self.p2;
one_coord(&mut result, d0.x, d1.x, d2.x);
one_coord(&mut result, d0.y, d1.y, d2.y);
introsort::sort_by(&mut result, &|a, b| a.partial_cmp(b).unwrap()); ////
////result.sort_by(|a, b| a.partial_cmp(b).unwrap());
result
}
}
impl Mul<CubicBez> for Affine {
type Output = CubicBez;
#[inline]
fn mul(self, c: CubicBez) -> CubicBez {
CubicBez {
p0: self * c.p0,
p1: self * c.p1,
p2: self * c.p2,
p3: self * c.p3,
}
}
}
impl Iterator for ToQuads {
type Item = (f64, f64, QuadBez);
fn next(&mut self) -> Option<(f64, f64, QuadBez)> {
let t0 = self.t;
let mut t1 = 1.0;
if t0 == t1 {
return None;
}
loop {
let seg = self.c.subsegment(t0..t1);
// Compute error for candidate quadratic.
let p1x2 = 3.0 * seg.p1.to_vec2() - seg.p0.to_vec2();
let p2x2 = 3.0 * seg.p2.to_vec2() - seg.p3.to_vec2();
let err = (p2x2 - p1x2).hypot2();
//println!("{:?} {} {}", t0..t1, err, if err < self.max_hypot2 { "ok" } else { "" });
if err < self.max_hypot2 {
let result = QuadBez::new(seg.p0, ((p1x2 + p2x2) / 4.0).to_point(), seg.p3);
self.t = t1;
return Some((t0, t1, result));
} else {
let shrink = if t1 == 1.0 && err < 64.0 * self.max_hypot2 {
|
0.999_999 * libm::pow(self.max_hypot2 / err, 1. / 6.0) ////
////0.999_999 * (self.max_hypot2 / err).powf(1. / 6.0)
};
t1 = t0 + shrink * (t1 - t0);
}
}
}
}
#[cfg(test)]
mod tests {
use crate::{
Affine, CubicBez, ParamCurve, ParamCurveArclen, ParamCurveArea, ParamCurveDeriv,
ParamCurveExtrema, ParamCurveNearest, Point,
};
#[test]
fn cubicbez_deriv() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let deriv = c.deriv();
let n = 10;
for i in 0..=n {
let t = (i as f64) * (n as f64).recip();
let delta = 1e-6;
let p = c.eval(t);
let p1 = c.eval(t + delta);
let d_approx = (p1 - p) * delta.recip();
let d = deriv.eval(t).to_vec2();
assert!((d - d_approx).hypot() < delta * 2.0);
}
}
#[test]
fn cubicbez_arclen() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let true_arclen = 0.5 * libm::sqrt(5.0f64) + 0.25 * (2.0 + libm::sqrt(5.0f64)).ln();
for i in 0..12 {
let accuracy = 0.1f64.powi(i);
let error = c.arclen(accuracy) - true_arclen;
//println!("{:e}: {:e}", accuracy, error);
assert!(error.abs() < accuracy);
}
}
#[test]
fn cubicbez_inv_arclen() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let true_arclen = 0.5 * libm::sqrt(5.0f64) + 0.25 * (2.0 + libm::sqrt(5.0f64)).ln();
for i in 0..12 {
let accuracy = 0.1f64.powi(i);
let n = 10;
for j in 0..=n {
let arc = (j as f64) * ((n as f64).recip() * true_arclen);
let t = c.inv_arclen(arc, accuracy * 0.5);
let actual_arc = c.subsegment(0.0..t).arclen(accuracy * 0.5);
assert!(
(arc - actual_arc).abs() < accuracy,
"at accuracy {:e}, wanted {} got {}",
accuracy,
actual_arc,
arc
);
}
}
}
#[test]
fn cubicbez_signed_area_linear() {
// y = 1 - x
let c = CubicBez::new(
(1.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0 / 3.0, 2.0 / 3.0),
(0.0, 1.0),
);
let epsilon = 1e-12;
assert_eq!((Affine::rotate(0.5) * c).signed_area(), 0.5);
assert!(((Affine::rotate(0.5) * c).signed_area() - 0.5).abs() < epsilon);
assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.0).abs() < epsilon);
assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.0).abs() < epsilon);
}
#[test]
fn cubicbez_signed_area() {
// y = 1 - x^3
let c = CubicBez::new((1.0, 0.0), (2.0 / 3.0, 1.0), (1.0 / 3.0, 1.0), (0.0, 1.0));
let epsilon = 1e-12;
assert!((c.signed_area() - 0.75).abs() < epsilon);
assert!(((Affine::rotate(0.5) * c).signed_area() - 0.75).abs() < epsilon);
assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.25).abs() < epsilon);
assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.25).abs() < epsilon);
}
#[test]
fn cubicbez_nearest() {
fn verify(result: (f64, f64), expected: f64) {
assert!(
(result.0 - expected).abs() < 1e-6,
"got {:?} expected {}",
result,
expected
);
}
// y = x^3
let c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
verify(c.nearest((0.1, 0.001).into(), 1e-6), 0.1);
verify(c.nearest((0.2, 0.008).into(), 1e-6), 0.2);
verify(c.nearest((0.3, 0.027).into(), 1e-6), 0.3);
verify(c.nearest((0.4, 0.064).into(), 1e-6), 0.4);
verify(c.nearest((0.5, 0.125).into(), 1e-6), 0.5);
verify(c.nearest((0.6, 0.216).into(), 1e-6), 0.6);
verify(c.nearest((0.7, 0.343).into(), 1e-6), 0.7);
verify(c.nearest((0.8, 0.512).into(), 1e-6), 0.8);
verify(c.nearest((0.9, 0.729).into(), 1e-6), 0.9);
verify(c.nearest((1.0, 1.0).into(), 1e-6), 1.0);
verify(c.nearest((1.1, 1.1).into(), 1e-6), 1.0);
verify(c.nearest((-0.1, 0.0).into(), 1e-6), 0.0);
let a = Affine::rotate(0.5);
verify((a * c).nearest(a * Point::new(0.1, 0.001), 1e-6), 0.1);
}
#[test]
fn cubicbez_extrema() {
// y = x^2
let q = CubicBez::new((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0));
let extrema = q.extrema();
assert_eq!(extrema.len(), 1);
assert!((extrema[0] - 0.5).abs() < 1e-6);
let q = CubicBez::new((0.4, 0.5), (0.0, 1.0), (1.0, 0.0), (0.5, 0.4));
let extrema = q.extrema();
assert_eq!(extrema.len(), 4);
}
#[test]
fn cubicbez_toquads() {
// y = x^3
let c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
for i in 0..10 {
let accuracy = 0.1f64.powi(i);
let mut _count = 0;
let mut worst: f64 = 0.0;
for (t0, t1, q) in c.to_quads(accuracy) {
_count += 1;
let epsilon = 1e-12;
assert!((q.start() - c.eval(t0)).hypot() < epsilon);
assert!((q.end() - c.eval(t1)).hypot() < epsilon);
let n = 4;
for j in 0..=n {
let t = (j as f64) * (n as f64).recip();
let p = q.eval(t);
let err = (p.y - p.x.powi(3)).abs();
worst = worst.max(err);
assert!(err < accuracy, "got {} wanted {}", err, accuracy);
}
}
//println!("accuracy {:e}: got {:e}, {} quads", accuracy, worst, _count);
}
}
}
| 0.5
} else { | conditional_block |
cubicbez.rs | //! Cubic Bézier segments.
use libm; ////
use core::ops::{Mul, Range}; ////
////use std::ops::{Mul, Range};
use introsort; ////
use crate::MAX_EXTREMA;
use arrayvec::ArrayVec;
use crate::common::solve_quadratic;
use crate::common::GAUSS_LEGENDRE_COEFFS_9;
use crate::{
Affine, ParamCurve, ParamCurveArclen, ParamCurveArea, ParamCurveCurvature, ParamCurveDeriv,
ParamCurveExtrema, ParamCurveNearest, Point, QuadBez,
};
/// A single cubic Bézier segment.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct CubicBez {
pub p0: Point,
pub p1: Point,
pub p2: Point,
pub p3: Point,
}
/// An iterator which produces quadratic Bézier segments.
struct ToQuads {
c: CubicBez,
max_hypot2: f64,
t: f64,
}
impl CubicBez {
/// Create a new cubic Bézier segment.
#[inline]
pub fn new<P: Into<Point>>(p0: P, p1: P, p2: P, p3: P) -> CubicBez {
CubicBez {
p0: p0.into(),
p1: p1.into(),
p2: p2.into(),
p3: p3.into(),
}
}
/// Convert to quadratic Béziers.
///
/// The iterator returns the start and end parameter in the cubic of each quadratic
/// segment, along with the quadratic.
///
/// Note that the resulting quadratic Béziers are not in general G1 continuous;
/// they are optimized for minimizing distance error.
#[inline]
pub fn to_quads(&self, accuracy: f64) -> impl Iterator<Item = (f64, f64, QuadBez)> {
// This magic number is the square of 36 / sqrt(3).
// See: http://caffeineowl.com/graphics/2d/vectorial/cubic2quad01.html
let max_hypot2 = 432.0 * accuracy * accuracy;
ToQuads {
c: *self,
max_hypot2,
t: 0.0,
}
}
}
impl ParamCurve for CubicBez {
#[inline]
fn eval(&self, t: f64) -> Point {
let mt = 1.0 - t;
let v = self.p0.to_vec2() * (mt * mt * mt)
+ (self.p1.to_vec2() * (mt * mt * 3.0)
+ (self.p2.to_vec2() * (mt * 3.0) + self.p3.to_vec2() * t) * t)
* t;
v.to_point()
}
#[inline]
fn start(&self) -> Point {
self.p0
}
#[inline]
fn end(&self) -> Point {
self.p3
}
fn subsegment(&self, range: Range<f64>) -> CubicBez {
let (t0, t1) = (range.start, range.end);
let p0 = self.eval(t0);
let p3 = self.eval(t1);
let d = self.deriv();
let scale = (t1 - t0) * (1.0 / 3.0);
let p1 = p0 + scale * d.eval(t0).to_vec2();
let p2 = p3 - scale * d.eval(t1).to_vec2();
CubicBez { p0, p1, p2, p3 }
}
/// Subdivide into halves, using de Casteljau.
#[inline]
fn subdivide(&self) -> (CubicBez, CubicBez) {
let pm = self.eval(0.5);
(
CubicBez::new(
self.p0,
self.p0.midpoint(self.p1),
((self.p0.to_vec2() + self.p1.to_vec2() * 2.0 + self.p2.to_vec2()) * 0.25)
.to_point(),
pm,
),
CubicBez::new(
pm,
((self.p1.to_vec2() + self.p2.to_vec2() * 2.0 + self.p3.to_vec2()) * 0.25)
.to_point(),
self.p2.midpoint(self.p3),
self.p3,
),
)
}
}
impl ParamCurveDeriv for CubicBez {
type DerivResult = QuadBez;
#[inline]
fn deriv(&self) -> QuadBez {
QuadBez::new(
(3.0 * (self.p1 - self.p0)).to_point(),
(3.0 * (self.p2 - self.p1)).to_point(),
(3.0 * (self.p3 - self.p2)).to_point(),
)
}
}
impl ParamCurveArclen for CubicBez {
/// Arclength of a cubic Bézier segment.
///
/// This is an adaptive subdivision approach using Legendre-Gauss quadrature
/// in the base case, and an error estimate to decide when to subdivide.
fn arclen(&self, accuracy: f64) -> f64 {
// Squared L2 norm of the second derivative of the cubic.
fn cubic_errnorm(c: &CubicBez) -> f64 {
let d = c.deriv().deriv();
let dd = d.end() - d.start();
d.start().to_vec2().hypot2() + d.start().to_vec2().dot(dd) + dd.hypot2() * (1.0 / 3.0)
}
fn est_gauss9_error(c: &CubicBez) -> f64 {
let lc = (c.p3 - c.p0).hypot();
let lp = (c.p1 - c.p0).hypot() + (c.p2 - c.p1).hypot() + (c.p3 - c.p2).hypot();
2.56e-8 * libm::pow(cubic_errnorm(c) / (lc * lc), 8 as f64) * lp ////
////2.56e-8 * (cubic_errnorm(c) / (lc * lc)).powi(8) * lp
}
const MAX_DEPTH: usize = 16;
fn rec(c: &CubicBez, accuracy: f64, depth: usize) -> f64 {
if depth == MAX_DEPTH || est_gauss9_error(c) < accuracy {
c.gauss_arclen(GAUSS_LEGENDRE_COEFFS_9)
} else {
let (c0, c1) = c.subdivide();
rec(&c0, accuracy * 0.5, depth + 1) + rec(&c1, accuracy * 0.5, depth + 1)
}
}
rec(self, accuracy, 0)
}
}
impl ParamCurveArea for CubicBez {
#[inline]
fn signed_area(&self) -> f64 {
| l ParamCurveNearest for CubicBez {
/// Find nearest point, using subdivision.
fn nearest(&self, p: Point, accuracy: f64) -> (f64, f64) {
let mut best_r = None;
let mut best_t = 0.0;
for (t0, t1, q) in self.to_quads(accuracy) {
let (t, r) = q.nearest(p, accuracy);
if best_r.map(|best_r| r < best_r).unwrap_or(true) {
best_t = t0 + t * (t1 - t0);
best_r = Some(r);
}
}
(best_t, best_r.unwrap())
}
}
impl ParamCurveCurvature for CubicBez {}
impl ParamCurveExtrema for CubicBez {
fn extrema(&self) -> ArrayVec<[f64; MAX_EXTREMA]> {
fn one_coord(result: &mut ArrayVec<[f64; MAX_EXTREMA]>, d0: f64, d1: f64, d2: f64) {
let a = d0 - 2.0 * d1 + d2;
let b = 2.0 * (d1 - d0);
let c = d0;
let roots = solve_quadratic(c, b, a);
for &t in &roots {
if t > 0.0 && t < 1.0 {
result.push(t);
}
}
}
let mut result = ArrayVec::<[f64; MAX_EXTREMA]>::new(); ////
////let mut result = ArrayVec::new();
let d0 = self.p1 - self.p0;
let d1 = self.p2 - self.p1;
let d2 = self.p3 - self.p2;
one_coord(&mut result, d0.x, d1.x, d2.x);
one_coord(&mut result, d0.y, d1.y, d2.y);
introsort::sort_by(&mut result, &|a, b| a.partial_cmp(b).unwrap()); ////
////result.sort_by(|a, b| a.partial_cmp(b).unwrap());
result
}
}
impl Mul<CubicBez> for Affine {
type Output = CubicBez;
#[inline]
fn mul(self, c: CubicBez) -> CubicBez {
CubicBez {
p0: self * c.p0,
p1: self * c.p1,
p2: self * c.p2,
p3: self * c.p3,
}
}
}
impl Iterator for ToQuads {
type Item = (f64, f64, QuadBez);
fn next(&mut self) -> Option<(f64, f64, QuadBez)> {
let t0 = self.t;
let mut t1 = 1.0;
if t0 == t1 {
return None;
}
loop {
let seg = self.c.subsegment(t0..t1);
// Compute error for candidate quadratic.
let p1x2 = 3.0 * seg.p1.to_vec2() - seg.p0.to_vec2();
let p2x2 = 3.0 * seg.p2.to_vec2() - seg.p3.to_vec2();
let err = (p2x2 - p1x2).hypot2();
//println!("{:?} {} {}", t0..t1, err, if err < self.max_hypot2 { "ok" } else { "" });
if err < self.max_hypot2 {
let result = QuadBez::new(seg.p0, ((p1x2 + p2x2) / 4.0).to_point(), seg.p3);
self.t = t1;
return Some((t0, t1, result));
} else {
let shrink = if t1 == 1.0 && err < 64.0 * self.max_hypot2 {
0.5
} else {
0.999_999 * libm::pow(self.max_hypot2 / err, 1. / 6.0) ////
////0.999_999 * (self.max_hypot2 / err).powf(1. / 6.0)
};
t1 = t0 + shrink * (t1 - t0);
}
}
}
}
#[cfg(test)]
mod tests {
use crate::{
Affine, CubicBez, ParamCurve, ParamCurveArclen, ParamCurveArea, ParamCurveDeriv,
ParamCurveExtrema, ParamCurveNearest, Point,
};
#[test]
fn cubicbez_deriv() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let deriv = c.deriv();
let n = 10;
for i in 0..=n {
let t = (i as f64) * (n as f64).recip();
let delta = 1e-6;
let p = c.eval(t);
let p1 = c.eval(t + delta);
let d_approx = (p1 - p) * delta.recip();
let d = deriv.eval(t).to_vec2();
assert!((d - d_approx).hypot() < delta * 2.0);
}
}
#[test]
fn cubicbez_arclen() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let true_arclen = 0.5 * libm::sqrt(5.0f64) + 0.25 * (2.0 + libm::sqrt(5.0f64)).ln();
for i in 0..12 {
let accuracy = 0.1f64.powi(i);
let error = c.arclen(accuracy) - true_arclen;
//println!("{:e}: {:e}", accuracy, error);
assert!(error.abs() < accuracy);
}
}
#[test]
fn cubicbez_inv_arclen() {
// y = x^2
let c = CubicBez::new(
(0.0, 0.0),
(1.0 / 3.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0, 1.0),
);
let true_arclen = 0.5 * libm::sqrt(5.0f64) + 0.25 * (2.0 + libm::sqrt(5.0f64)).ln();
for i in 0..12 {
let accuracy = 0.1f64.powi(i);
let n = 10;
for j in 0..=n {
let arc = (j as f64) * ((n as f64).recip() * true_arclen);
let t = c.inv_arclen(arc, accuracy * 0.5);
let actual_arc = c.subsegment(0.0..t).arclen(accuracy * 0.5);
assert!(
(arc - actual_arc).abs() < accuracy,
"at accuracy {:e}, wanted {} got {}",
accuracy,
actual_arc,
arc
);
}
}
}
#[test]
fn cubicbez_signed_area_linear() {
// y = 1 - x
let c = CubicBez::new(
(1.0, 0.0),
(2.0 / 3.0, 1.0 / 3.0),
(1.0 / 3.0, 2.0 / 3.0),
(0.0, 1.0),
);
let epsilon = 1e-12;
assert_eq!((Affine::rotate(0.5) * c).signed_area(), 0.5);
assert!(((Affine::rotate(0.5) * c).signed_area() - 0.5).abs() < epsilon);
assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.0).abs() < epsilon);
assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.0).abs() < epsilon);
}
#[test]
fn cubicbez_signed_area() {
// y = 1 - x^3
let c = CubicBez::new((1.0, 0.0), (2.0 / 3.0, 1.0), (1.0 / 3.0, 1.0), (0.0, 1.0));
let epsilon = 1e-12;
assert!((c.signed_area() - 0.75).abs() < epsilon);
assert!(((Affine::rotate(0.5) * c).signed_area() - 0.75).abs() < epsilon);
assert!(((Affine::translate((0.0, 1.0)) * c).signed_area() - 1.25).abs() < epsilon);
assert!(((Affine::translate((1.0, 0.0)) * c).signed_area() - 1.25).abs() < epsilon);
}
#[test]
fn cubicbez_nearest() {
fn verify(result: (f64, f64), expected: f64) {
assert!(
(result.0 - expected).abs() < 1e-6,
"got {:?} expected {}",
result,
expected
);
}
// y = x^3
let c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
verify(c.nearest((0.1, 0.001).into(), 1e-6), 0.1);
verify(c.nearest((0.2, 0.008).into(), 1e-6), 0.2);
verify(c.nearest((0.3, 0.027).into(), 1e-6), 0.3);
verify(c.nearest((0.4, 0.064).into(), 1e-6), 0.4);
verify(c.nearest((0.5, 0.125).into(), 1e-6), 0.5);
verify(c.nearest((0.6, 0.216).into(), 1e-6), 0.6);
verify(c.nearest((0.7, 0.343).into(), 1e-6), 0.7);
verify(c.nearest((0.8, 0.512).into(), 1e-6), 0.8);
verify(c.nearest((0.9, 0.729).into(), 1e-6), 0.9);
verify(c.nearest((1.0, 1.0).into(), 1e-6), 1.0);
verify(c.nearest((1.1, 1.1).into(), 1e-6), 1.0);
verify(c.nearest((-0.1, 0.0).into(), 1e-6), 0.0);
let a = Affine::rotate(0.5);
verify((a * c).nearest(a * Point::new(0.1, 0.001), 1e-6), 0.1);
}
#[test]
fn cubicbez_extrema() {
// y = x^2
let q = CubicBez::new((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0));
let extrema = q.extrema();
assert_eq!(extrema.len(), 1);
assert!((extrema[0] - 0.5).abs() < 1e-6);
let q = CubicBez::new((0.4, 0.5), (0.0, 1.0), (1.0, 0.0), (0.5, 0.4));
let extrema = q.extrema();
assert_eq!(extrema.len(), 4);
}
#[test]
fn cubicbez_toquads() {
// y = x^3
let c = CubicBez::new((0.0, 0.0), (1.0 / 3.0, 0.0), (2.0 / 3.0, 0.0), (1.0, 1.0));
for i in 0..10 {
let accuracy = 0.1f64.powi(i);
let mut _count = 0;
let mut worst: f64 = 0.0;
for (t0, t1, q) in c.to_quads(accuracy) {
_count += 1;
let epsilon = 1e-12;
assert!((q.start() - c.eval(t0)).hypot() < epsilon);
assert!((q.end() - c.eval(t1)).hypot() < epsilon);
let n = 4;
for j in 0..=n {
let t = (j as f64) * (n as f64).recip();
let p = q.eval(t);
let err = (p.y - p.x.powi(3)).abs();
worst = worst.max(err);
assert!(err < accuracy, "got {} wanted {}", err, accuracy);
}
}
//println!("accuracy {:e}: got {:e}, {} quads", accuracy, worst, _count);
}
}
}
| (self.p0.x * (6.0 * self.p1.y + 3.0 * self.p2.y + self.p3.y)
+ 3.0
* (self.p1.x * (-2.0 * self.p0.y + self.p2.y + self.p3.y)
- self.p2.x * (self.p0.y + self.p1.y - 2.0 * self.p3.y))
- self.p3.x * (self.p0.y + 3.0 * self.p1.y + 6.0 * self.p2.y))
* (1.0 / 20.0)
}
}
imp | identifier_body |
logs.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awscloudwatchreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver"
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/zap"
)
const (
noStreamName = "THIS IS INVALID STREAM"
)
type logsReceiver struct {
region string
profile string
imdsEndpoint string
pollInterval time.Duration
maxEventsPerRequest int
nextStartTime time.Time
groupRequests []groupRequest
autodiscover *AutodiscoverConfig
logger *zap.Logger
client client
consumer consumer.Logs
wg *sync.WaitGroup
doneChan chan bool
}
const maxLogGroupsPerDiscovery = int64(50)
type client interface {
DescribeLogGroupsWithContext(ctx context.Context, input *cloudwatchlogs.DescribeLogGroupsInput, opts ...request.Option) (*cloudwatchlogs.DescribeLogGroupsOutput, error)
FilterLogEventsWithContext(ctx context.Context, input *cloudwatchlogs.FilterLogEventsInput, opts ...request.Option) (*cloudwatchlogs.FilterLogEventsOutput, error)
}
type streamNames struct {
group string
names []*string
}
func (sn *streamNames) request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput {
base := &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &sn.group,
StartTime: aws.Int64(st.UnixMilli()),
EndTime: aws.Int64(et.UnixMilli()),
Limit: aws.Int64(int64(limit)),
}
if len(sn.names) > 0 |
if nextToken != "" {
base.NextToken = aws.String(nextToken)
}
return base
}
func (sn *streamNames) groupName() string {
return sn.group
}
type streamPrefix struct {
group string
prefix *string
}
func (sp *streamPrefix) request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput {
base := &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &sp.group,
StartTime: aws.Int64(st.UnixMilli()),
EndTime: aws.Int64(et.UnixMilli()),
Limit: aws.Int64(int64(limit)),
LogStreamNamePrefix: sp.prefix,
}
if nextToken != "" {
base.NextToken = aws.String(nextToken)
}
return base
}
func (sp *streamPrefix) groupName() string {
return sp.group
}
type groupRequest interface {
request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput
groupName() string
}
func newLogsReceiver(cfg *Config, logger *zap.Logger, consumer consumer.Logs) *logsReceiver {
groups := []groupRequest{}
for logGroupName, sc := range cfg.Logs.Groups.NamedConfigs {
for _, prefix := range sc.Prefixes {
groups = append(groups, &streamPrefix{group: logGroupName, prefix: prefix})
}
if len(sc.Names) > 0 {
groups = append(groups, &streamNames{group: logGroupName, names: sc.Names})
}
}
// safeguard from using both
autodiscover := cfg.Logs.Groups.AutodiscoverConfig
if len(cfg.Logs.Groups.NamedConfigs) > 0 {
autodiscover = nil
}
return &logsReceiver{
region: cfg.Region,
profile: cfg.Profile,
consumer: consumer,
maxEventsPerRequest: cfg.Logs.MaxEventsPerRequest,
imdsEndpoint: cfg.IMDSEndpoint,
autodiscover: autodiscover,
pollInterval: cfg.Logs.PollInterval,
nextStartTime: time.Now().Add(-cfg.Logs.PollInterval),
groupRequests: groups,
logger: logger,
wg: &sync.WaitGroup{},
doneChan: make(chan bool),
}
}
func (l *logsReceiver) Start(ctx context.Context, _ component.Host) error {
l.logger.Debug("starting to poll for Cloudwatch logs")
l.wg.Add(1)
go l.startPolling(ctx)
return nil
}
func (l *logsReceiver) Shutdown(_ context.Context) error {
l.logger.Debug("shutting down logs receiver")
close(l.doneChan)
l.wg.Wait()
return nil
}
func (l *logsReceiver) startPolling(ctx context.Context) {
defer l.wg.Done()
t := time.NewTicker(l.pollInterval)
for {
select {
case <-ctx.Done():
return
case <-l.doneChan:
return
case <-t.C:
if l.autodiscover != nil {
group, err := l.discoverGroups(ctx, l.autodiscover)
if err != nil {
l.logger.Error("unable to perform discovery of log groups", zap.Error(err))
continue
}
l.groupRequests = group
}
err := l.poll(ctx)
if err != nil {
l.logger.Error("there was an error during the poll", zap.Error(err))
}
}
}
}
func (l *logsReceiver) poll(ctx context.Context) error {
var errs error
startTime := l.nextStartTime
endTime := time.Now()
for _, r := range l.groupRequests {
if err := l.pollForLogs(ctx, r, startTime, endTime); err != nil {
errs = errors.Join(errs, err)
}
}
l.nextStartTime = endTime
return errs
}
func (l *logsReceiver) pollForLogs(ctx context.Context, pc groupRequest, startTime, endTime time.Time) error {
err := l.ensureSession()
if err != nil {
return err
}
nextToken := aws.String("")
for nextToken != nil {
select {
// if done, we want to stop processing paginated stream of events
case _, ok := <-l.doneChan:
if !ok {
return nil
}
default:
input := pc.request(l.maxEventsPerRequest, *nextToken, &startTime, &endTime)
resp, err := l.client.FilterLogEventsWithContext(ctx, input)
if err != nil {
l.logger.Error("unable to retrieve logs from cloudwatch", zap.String("log group", pc.groupName()), zap.Error(err))
break
}
observedTime := pcommon.NewTimestampFromTime(time.Now())
logs := l.processEvents(observedTime, pc.groupName(), resp)
if logs.LogRecordCount() > 0 {
if err = l.consumer.ConsumeLogs(ctx, logs); err != nil {
l.logger.Error("unable to consume logs", zap.Error(err))
break
}
}
nextToken = resp.NextToken
}
}
return nil
}
func (l *logsReceiver) processEvents(now pcommon.Timestamp, logGroupName string, output *cloudwatchlogs.FilterLogEventsOutput) plog.Logs {
logs := plog.NewLogs()
resourceMap := map[string](map[string]*plog.ResourceLogs){}
for _, e := range output.Events {
if e.Timestamp == nil {
l.logger.Error("unable to determine timestamp of event as the timestamp is nil")
continue
}
if e.EventId == nil {
l.logger.Error("no event ID was present on the event, skipping entry")
continue
}
if e.Message == nil {
l.logger.Error("no message was present on the event", zap.String("event.id", *e.EventId))
continue
}
group, ok := resourceMap[logGroupName]
if !ok {
group = map[string]*plog.ResourceLogs{}
resourceMap[logGroupName] = group
}
logStreamName := noStreamName
if e.LogStreamName != nil {
logStreamName = *e.LogStreamName
}
resourceLogs, ok := group[logStreamName]
if !ok {
rl := logs.ResourceLogs().AppendEmpty()
resourceLogs = &rl
resourceAttributes := resourceLogs.Resource().Attributes()
resourceAttributes.PutStr("aws.region", l.region)
resourceAttributes.PutStr("cloudwatch.log.group.name", logGroupName)
resourceAttributes.PutStr("cloudwatch.log.stream", logStreamName)
group[logStreamName] = resourceLogs
// Ensure one scopeLogs is initialized so we can handle in standardized way going forward.
_ = resourceLogs.ScopeLogs().AppendEmpty()
}
// Now we know resourceLogs is initialized and has one scopeLogs so we don't have to handle any special cases.
logRecord := resourceLogs.ScopeLogs().At(0).LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(now)
ts := time.UnixMilli(*e.Timestamp)
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
logRecord.Body().SetStr(*e.Message)
logRecord.Attributes().PutStr("id", *e.EventId)
}
return logs
}
func (l *logsReceiver) discoverGroups(ctx context.Context, auto *AutodiscoverConfig) ([]groupRequest, error) {
l.logger.Debug("attempting to discover log groups.", zap.Int("limit", auto.Limit))
groups := []groupRequest{}
err := l.ensureSession()
if err != nil {
return groups, fmt.Errorf("unable to establish a session to auto discover log groups: %w", err)
}
numGroups := 0
var nextToken = aws.String("")
for nextToken != nil {
if numGroups >= auto.Limit {
break
}
req := &cloudwatchlogs.DescribeLogGroupsInput{
Limit: aws.Int64(maxLogGroupsPerDiscovery),
}
if auto.Prefix != "" {
req.LogGroupNamePrefix = &auto.Prefix
}
dlgResults, err := l.client.DescribeLogGroupsWithContext(ctx, req)
if err != nil {
return groups, fmt.Errorf("unable to list log groups: %w", err)
}
for _, lg := range dlgResults.LogGroups {
if numGroups == auto.Limit {
l.logger.Debug("reached limit of the number of log groups to discover."+
"To increase the number of groups able to be discovered, please increase the autodiscover limit field.",
zap.Int("groups_discovered", numGroups), zap.Int("limit", auto.Limit))
break
}
numGroups++
l.logger.Debug("discovered log group", zap.String("log group", lg.GoString()))
// default behavior is to collect all if not stream filtered
if len(auto.Streams.Names) == 0 && len(auto.Streams.Prefixes) == 0 {
groups = append(groups, &streamNames{group: *lg.LogGroupName})
continue
}
for _, prefix := range auto.Streams.Prefixes {
groups = append(groups, &streamPrefix{group: *lg.LogGroupName, prefix: prefix})
}
if len(auto.Streams.Names) > 0 {
groups = append(groups, &streamNames{group: *lg.LogGroupName, names: auto.Streams.Names})
}
}
nextToken = dlgResults.NextToken
}
return groups, nil
}
func (l *logsReceiver) ensureSession() error {
if l.client != nil {
return nil
}
awsConfig := aws.NewConfig().WithRegion(l.region)
options := session.Options{
Config: *awsConfig,
}
if l.imdsEndpoint != "" {
options.EC2IMDSEndpoint = l.imdsEndpoint
}
if l.profile != "" {
options.Profile = l.profile
}
s, err := session.NewSessionWithOptions(options)
l.client = cloudwatchlogs.New(s)
return err
}
| {
base.LogStreamNames = sn.names
} | conditional_block |
logs.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awscloudwatchreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver"
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/zap"
)
const (
noStreamName = "THIS IS INVALID STREAM"
)
type logsReceiver struct {
region string
profile string
imdsEndpoint string
pollInterval time.Duration
maxEventsPerRequest int
nextStartTime time.Time
groupRequests []groupRequest
autodiscover *AutodiscoverConfig
logger *zap.Logger
client client
consumer consumer.Logs
wg *sync.WaitGroup
doneChan chan bool
}
const maxLogGroupsPerDiscovery = int64(50)
type client interface {
DescribeLogGroupsWithContext(ctx context.Context, input *cloudwatchlogs.DescribeLogGroupsInput, opts ...request.Option) (*cloudwatchlogs.DescribeLogGroupsOutput, error)
FilterLogEventsWithContext(ctx context.Context, input *cloudwatchlogs.FilterLogEventsInput, opts ...request.Option) (*cloudwatchlogs.FilterLogEventsOutput, error)
}
type streamNames struct {
group string
names []*string
}
func (sn *streamNames) request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput {
base := &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &sn.group,
StartTime: aws.Int64(st.UnixMilli()),
EndTime: aws.Int64(et.UnixMilli()),
Limit: aws.Int64(int64(limit)),
}
if len(sn.names) > 0 {
base.LogStreamNames = sn.names
}
if nextToken != "" {
base.NextToken = aws.String(nextToken)
}
return base
}
func (sn *streamNames) groupName() string {
return sn.group
}
type streamPrefix struct {
group string
prefix *string
}
func (sp *streamPrefix) request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput |
func (sp *streamPrefix) groupName() string {
return sp.group
}
type groupRequest interface {
request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput
groupName() string
}
func newLogsReceiver(cfg *Config, logger *zap.Logger, consumer consumer.Logs) *logsReceiver {
groups := []groupRequest{}
for logGroupName, sc := range cfg.Logs.Groups.NamedConfigs {
for _, prefix := range sc.Prefixes {
groups = append(groups, &streamPrefix{group: logGroupName, prefix: prefix})
}
if len(sc.Names) > 0 {
groups = append(groups, &streamNames{group: logGroupName, names: sc.Names})
}
}
// safeguard from using both
autodiscover := cfg.Logs.Groups.AutodiscoverConfig
if len(cfg.Logs.Groups.NamedConfigs) > 0 {
autodiscover = nil
}
return &logsReceiver{
region: cfg.Region,
profile: cfg.Profile,
consumer: consumer,
maxEventsPerRequest: cfg.Logs.MaxEventsPerRequest,
imdsEndpoint: cfg.IMDSEndpoint,
autodiscover: autodiscover,
pollInterval: cfg.Logs.PollInterval,
nextStartTime: time.Now().Add(-cfg.Logs.PollInterval),
groupRequests: groups,
logger: logger,
wg: &sync.WaitGroup{},
doneChan: make(chan bool),
}
}
func (l *logsReceiver) Start(ctx context.Context, _ component.Host) error {
l.logger.Debug("starting to poll for Cloudwatch logs")
l.wg.Add(1)
go l.startPolling(ctx)
return nil
}
func (l *logsReceiver) Shutdown(_ context.Context) error {
l.logger.Debug("shutting down logs receiver")
close(l.doneChan)
l.wg.Wait()
return nil
}
func (l *logsReceiver) startPolling(ctx context.Context) {
defer l.wg.Done()
t := time.NewTicker(l.pollInterval)
for {
select {
case <-ctx.Done():
return
case <-l.doneChan:
return
case <-t.C:
if l.autodiscover != nil {
group, err := l.discoverGroups(ctx, l.autodiscover)
if err != nil {
l.logger.Error("unable to perform discovery of log groups", zap.Error(err))
continue
}
l.groupRequests = group
}
err := l.poll(ctx)
if err != nil {
l.logger.Error("there was an error during the poll", zap.Error(err))
}
}
}
}
func (l *logsReceiver) poll(ctx context.Context) error {
var errs error
startTime := l.nextStartTime
endTime := time.Now()
for _, r := range l.groupRequests {
if err := l.pollForLogs(ctx, r, startTime, endTime); err != nil {
errs = errors.Join(errs, err)
}
}
l.nextStartTime = endTime
return errs
}
func (l *logsReceiver) pollForLogs(ctx context.Context, pc groupRequest, startTime, endTime time.Time) error {
err := l.ensureSession()
if err != nil {
return err
}
nextToken := aws.String("")
for nextToken != nil {
select {
// if done, we want to stop processing paginated stream of events
case _, ok := <-l.doneChan:
if !ok {
return nil
}
default:
input := pc.request(l.maxEventsPerRequest, *nextToken, &startTime, &endTime)
resp, err := l.client.FilterLogEventsWithContext(ctx, input)
if err != nil {
l.logger.Error("unable to retrieve logs from cloudwatch", zap.String("log group", pc.groupName()), zap.Error(err))
break
}
observedTime := pcommon.NewTimestampFromTime(time.Now())
logs := l.processEvents(observedTime, pc.groupName(), resp)
if logs.LogRecordCount() > 0 {
if err = l.consumer.ConsumeLogs(ctx, logs); err != nil {
l.logger.Error("unable to consume logs", zap.Error(err))
break
}
}
nextToken = resp.NextToken
}
}
return nil
}
func (l *logsReceiver) processEvents(now pcommon.Timestamp, logGroupName string, output *cloudwatchlogs.FilterLogEventsOutput) plog.Logs {
logs := plog.NewLogs()
resourceMap := map[string](map[string]*plog.ResourceLogs){}
for _, e := range output.Events {
if e.Timestamp == nil {
l.logger.Error("unable to determine timestamp of event as the timestamp is nil")
continue
}
if e.EventId == nil {
l.logger.Error("no event ID was present on the event, skipping entry")
continue
}
if e.Message == nil {
l.logger.Error("no message was present on the event", zap.String("event.id", *e.EventId))
continue
}
group, ok := resourceMap[logGroupName]
if !ok {
group = map[string]*plog.ResourceLogs{}
resourceMap[logGroupName] = group
}
logStreamName := noStreamName
if e.LogStreamName != nil {
logStreamName = *e.LogStreamName
}
resourceLogs, ok := group[logStreamName]
if !ok {
rl := logs.ResourceLogs().AppendEmpty()
resourceLogs = &rl
resourceAttributes := resourceLogs.Resource().Attributes()
resourceAttributes.PutStr("aws.region", l.region)
resourceAttributes.PutStr("cloudwatch.log.group.name", logGroupName)
resourceAttributes.PutStr("cloudwatch.log.stream", logStreamName)
group[logStreamName] = resourceLogs
// Ensure one scopeLogs is initialized so we can handle in standardized way going forward.
_ = resourceLogs.ScopeLogs().AppendEmpty()
}
// Now we know resourceLogs is initialized and has one scopeLogs so we don't have to handle any special cases.
logRecord := resourceLogs.ScopeLogs().At(0).LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(now)
ts := time.UnixMilli(*e.Timestamp)
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
logRecord.Body().SetStr(*e.Message)
logRecord.Attributes().PutStr("id", *e.EventId)
}
return logs
}
func (l *logsReceiver) discoverGroups(ctx context.Context, auto *AutodiscoverConfig) ([]groupRequest, error) {
l.logger.Debug("attempting to discover log groups.", zap.Int("limit", auto.Limit))
groups := []groupRequest{}
err := l.ensureSession()
if err != nil {
return groups, fmt.Errorf("unable to establish a session to auto discover log groups: %w", err)
}
numGroups := 0
var nextToken = aws.String("")
for nextToken != nil {
if numGroups >= auto.Limit {
break
}
req := &cloudwatchlogs.DescribeLogGroupsInput{
Limit: aws.Int64(maxLogGroupsPerDiscovery),
}
if auto.Prefix != "" {
req.LogGroupNamePrefix = &auto.Prefix
}
dlgResults, err := l.client.DescribeLogGroupsWithContext(ctx, req)
if err != nil {
return groups, fmt.Errorf("unable to list log groups: %w", err)
}
for _, lg := range dlgResults.LogGroups {
if numGroups == auto.Limit {
l.logger.Debug("reached limit of the number of log groups to discover."+
"To increase the number of groups able to be discovered, please increase the autodiscover limit field.",
zap.Int("groups_discovered", numGroups), zap.Int("limit", auto.Limit))
break
}
numGroups++
l.logger.Debug("discovered log group", zap.String("log group", lg.GoString()))
// default behavior is to collect all if not stream filtered
if len(auto.Streams.Names) == 0 && len(auto.Streams.Prefixes) == 0 {
groups = append(groups, &streamNames{group: *lg.LogGroupName})
continue
}
for _, prefix := range auto.Streams.Prefixes {
groups = append(groups, &streamPrefix{group: *lg.LogGroupName, prefix: prefix})
}
if len(auto.Streams.Names) > 0 {
groups = append(groups, &streamNames{group: *lg.LogGroupName, names: auto.Streams.Names})
}
}
nextToken = dlgResults.NextToken
}
return groups, nil
}
func (l *logsReceiver) ensureSession() error {
if l.client != nil {
return nil
}
awsConfig := aws.NewConfig().WithRegion(l.region)
options := session.Options{
Config: *awsConfig,
}
if l.imdsEndpoint != "" {
options.EC2IMDSEndpoint = l.imdsEndpoint
}
if l.profile != "" {
options.Profile = l.profile
}
s, err := session.NewSessionWithOptions(options)
l.client = cloudwatchlogs.New(s)
return err
}
| {
base := &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &sp.group,
StartTime: aws.Int64(st.UnixMilli()),
EndTime: aws.Int64(et.UnixMilli()),
Limit: aws.Int64(int64(limit)),
LogStreamNamePrefix: sp.prefix,
}
if nextToken != "" {
base.NextToken = aws.String(nextToken)
}
return base
} | identifier_body |
logs.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awscloudwatchreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver"
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/zap"
)
const (
noStreamName = "THIS IS INVALID STREAM"
)
type logsReceiver struct {
region string
profile string
imdsEndpoint string
pollInterval time.Duration
maxEventsPerRequest int
nextStartTime time.Time
groupRequests []groupRequest
autodiscover *AutodiscoverConfig
logger *zap.Logger
client client
consumer consumer.Logs
wg *sync.WaitGroup
doneChan chan bool
}
const maxLogGroupsPerDiscovery = int64(50)
type client interface {
DescribeLogGroupsWithContext(ctx context.Context, input *cloudwatchlogs.DescribeLogGroupsInput, opts ...request.Option) (*cloudwatchlogs.DescribeLogGroupsOutput, error)
FilterLogEventsWithContext(ctx context.Context, input *cloudwatchlogs.FilterLogEventsInput, opts ...request.Option) (*cloudwatchlogs.FilterLogEventsOutput, error)
}
type streamNames struct {
group string
names []*string
}
func (sn *streamNames) request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput {
base := &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &sn.group,
StartTime: aws.Int64(st.UnixMilli()),
EndTime: aws.Int64(et.UnixMilli()),
Limit: aws.Int64(int64(limit)),
}
if len(sn.names) > 0 {
base.LogStreamNames = sn.names
}
if nextToken != "" {
base.NextToken = aws.String(nextToken)
}
return base
}
func (sn *streamNames) groupName() string {
return sn.group
}
type streamPrefix struct {
group string
prefix *string
}
func (sp *streamPrefix) request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput {
base := &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &sp.group,
StartTime: aws.Int64(st.UnixMilli()),
EndTime: aws.Int64(et.UnixMilli()),
Limit: aws.Int64(int64(limit)),
LogStreamNamePrefix: sp.prefix,
}
if nextToken != "" {
base.NextToken = aws.String(nextToken)
}
return base
}
func (sp *streamPrefix) groupName() string {
return sp.group
}
type groupRequest interface {
request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput
groupName() string
}
func newLogsReceiver(cfg *Config, logger *zap.Logger, consumer consumer.Logs) *logsReceiver {
groups := []groupRequest{}
for logGroupName, sc := range cfg.Logs.Groups.NamedConfigs {
for _, prefix := range sc.Prefixes {
groups = append(groups, &streamPrefix{group: logGroupName, prefix: prefix})
}
if len(sc.Names) > 0 {
groups = append(groups, &streamNames{group: logGroupName, names: sc.Names})
}
}
// safeguard from using both
autodiscover := cfg.Logs.Groups.AutodiscoverConfig
if len(cfg.Logs.Groups.NamedConfigs) > 0 {
autodiscover = nil
}
return &logsReceiver{
region: cfg.Region,
profile: cfg.Profile,
consumer: consumer,
maxEventsPerRequest: cfg.Logs.MaxEventsPerRequest,
imdsEndpoint: cfg.IMDSEndpoint,
autodiscover: autodiscover,
pollInterval: cfg.Logs.PollInterval,
nextStartTime: time.Now().Add(-cfg.Logs.PollInterval),
groupRequests: groups,
logger: logger,
wg: &sync.WaitGroup{},
doneChan: make(chan bool),
}
}
func (l *logsReceiver) Start(ctx context.Context, _ component.Host) error {
l.logger.Debug("starting to poll for Cloudwatch logs")
l.wg.Add(1)
go l.startPolling(ctx)
return nil
}
func (l *logsReceiver) Shutdown(_ context.Context) error {
l.logger.Debug("shutting down logs receiver")
close(l.doneChan)
l.wg.Wait()
return nil
}
func (l *logsReceiver) startPolling(ctx context.Context) {
defer l.wg.Done()
t := time.NewTicker(l.pollInterval)
for {
select {
case <-ctx.Done():
return
case <-l.doneChan:
return
case <-t.C:
if l.autodiscover != nil {
group, err := l.discoverGroups(ctx, l.autodiscover)
if err != nil {
l.logger.Error("unable to perform discovery of log groups", zap.Error(err))
continue
}
l.groupRequests = group
}
err := l.poll(ctx)
if err != nil {
l.logger.Error("there was an error during the poll", zap.Error(err))
}
}
}
} |
func (l *logsReceiver) poll(ctx context.Context) error {
var errs error
startTime := l.nextStartTime
endTime := time.Now()
for _, r := range l.groupRequests {
if err := l.pollForLogs(ctx, r, startTime, endTime); err != nil {
errs = errors.Join(errs, err)
}
}
l.nextStartTime = endTime
return errs
}
func (l *logsReceiver) pollForLogs(ctx context.Context, pc groupRequest, startTime, endTime time.Time) error {
err := l.ensureSession()
if err != nil {
return err
}
nextToken := aws.String("")
for nextToken != nil {
select {
// if done, we want to stop processing paginated stream of events
case _, ok := <-l.doneChan:
if !ok {
return nil
}
default:
input := pc.request(l.maxEventsPerRequest, *nextToken, &startTime, &endTime)
resp, err := l.client.FilterLogEventsWithContext(ctx, input)
if err != nil {
l.logger.Error("unable to retrieve logs from cloudwatch", zap.String("log group", pc.groupName()), zap.Error(err))
break
}
observedTime := pcommon.NewTimestampFromTime(time.Now())
logs := l.processEvents(observedTime, pc.groupName(), resp)
if logs.LogRecordCount() > 0 {
if err = l.consumer.ConsumeLogs(ctx, logs); err != nil {
l.logger.Error("unable to consume logs", zap.Error(err))
break
}
}
nextToken = resp.NextToken
}
}
return nil
}
func (l *logsReceiver) processEvents(now pcommon.Timestamp, logGroupName string, output *cloudwatchlogs.FilterLogEventsOutput) plog.Logs {
logs := plog.NewLogs()
resourceMap := map[string](map[string]*plog.ResourceLogs){}
for _, e := range output.Events {
if e.Timestamp == nil {
l.logger.Error("unable to determine timestamp of event as the timestamp is nil")
continue
}
if e.EventId == nil {
l.logger.Error("no event ID was present on the event, skipping entry")
continue
}
if e.Message == nil {
l.logger.Error("no message was present on the event", zap.String("event.id", *e.EventId))
continue
}
group, ok := resourceMap[logGroupName]
if !ok {
group = map[string]*plog.ResourceLogs{}
resourceMap[logGroupName] = group
}
logStreamName := noStreamName
if e.LogStreamName != nil {
logStreamName = *e.LogStreamName
}
resourceLogs, ok := group[logStreamName]
if !ok {
rl := logs.ResourceLogs().AppendEmpty()
resourceLogs = &rl
resourceAttributes := resourceLogs.Resource().Attributes()
resourceAttributes.PutStr("aws.region", l.region)
resourceAttributes.PutStr("cloudwatch.log.group.name", logGroupName)
resourceAttributes.PutStr("cloudwatch.log.stream", logStreamName)
group[logStreamName] = resourceLogs
// Ensure one scopeLogs is initialized so we can handle in standardized way going forward.
_ = resourceLogs.ScopeLogs().AppendEmpty()
}
// Now we know resourceLogs is initialized and has one scopeLogs so we don't have to handle any special cases.
logRecord := resourceLogs.ScopeLogs().At(0).LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(now)
ts := time.UnixMilli(*e.Timestamp)
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
logRecord.Body().SetStr(*e.Message)
logRecord.Attributes().PutStr("id", *e.EventId)
}
return logs
}
func (l *logsReceiver) discoverGroups(ctx context.Context, auto *AutodiscoverConfig) ([]groupRequest, error) {
l.logger.Debug("attempting to discover log groups.", zap.Int("limit", auto.Limit))
groups := []groupRequest{}
err := l.ensureSession()
if err != nil {
return groups, fmt.Errorf("unable to establish a session to auto discover log groups: %w", err)
}
numGroups := 0
var nextToken = aws.String("")
for nextToken != nil {
if numGroups >= auto.Limit {
break
}
req := &cloudwatchlogs.DescribeLogGroupsInput{
Limit: aws.Int64(maxLogGroupsPerDiscovery),
}
if auto.Prefix != "" {
req.LogGroupNamePrefix = &auto.Prefix
}
dlgResults, err := l.client.DescribeLogGroupsWithContext(ctx, req)
if err != nil {
return groups, fmt.Errorf("unable to list log groups: %w", err)
}
for _, lg := range dlgResults.LogGroups {
if numGroups == auto.Limit {
l.logger.Debug("reached limit of the number of log groups to discover."+
"To increase the number of groups able to be discovered, please increase the autodiscover limit field.",
zap.Int("groups_discovered", numGroups), zap.Int("limit", auto.Limit))
break
}
numGroups++
l.logger.Debug("discovered log group", zap.String("log group", lg.GoString()))
// default behavior is to collect all if not stream filtered
if len(auto.Streams.Names) == 0 && len(auto.Streams.Prefixes) == 0 {
groups = append(groups, &streamNames{group: *lg.LogGroupName})
continue
}
for _, prefix := range auto.Streams.Prefixes {
groups = append(groups, &streamPrefix{group: *lg.LogGroupName, prefix: prefix})
}
if len(auto.Streams.Names) > 0 {
groups = append(groups, &streamNames{group: *lg.LogGroupName, names: auto.Streams.Names})
}
}
nextToken = dlgResults.NextToken
}
return groups, nil
}
func (l *logsReceiver) ensureSession() error {
if l.client != nil {
return nil
}
awsConfig := aws.NewConfig().WithRegion(l.region)
options := session.Options{
Config: *awsConfig,
}
if l.imdsEndpoint != "" {
options.EC2IMDSEndpoint = l.imdsEndpoint
}
if l.profile != "" {
options.Profile = l.profile
}
s, err := session.NewSessionWithOptions(options)
l.client = cloudwatchlogs.New(s)
return err
} | random_line_split | |
logs.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awscloudwatchreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver"
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/zap"
)
const (
noStreamName = "THIS IS INVALID STREAM"
)
type logsReceiver struct {
region string
profile string
imdsEndpoint string
pollInterval time.Duration
maxEventsPerRequest int
nextStartTime time.Time
groupRequests []groupRequest
autodiscover *AutodiscoverConfig
logger *zap.Logger
client client
consumer consumer.Logs
wg *sync.WaitGroup
doneChan chan bool
}
const maxLogGroupsPerDiscovery = int64(50)
type client interface {
DescribeLogGroupsWithContext(ctx context.Context, input *cloudwatchlogs.DescribeLogGroupsInput, opts ...request.Option) (*cloudwatchlogs.DescribeLogGroupsOutput, error)
FilterLogEventsWithContext(ctx context.Context, input *cloudwatchlogs.FilterLogEventsInput, opts ...request.Option) (*cloudwatchlogs.FilterLogEventsOutput, error)
}
type streamNames struct {
group string
names []*string
}
func (sn *streamNames) request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput {
base := &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &sn.group,
StartTime: aws.Int64(st.UnixMilli()),
EndTime: aws.Int64(et.UnixMilli()),
Limit: aws.Int64(int64(limit)),
}
if len(sn.names) > 0 {
base.LogStreamNames = sn.names
}
if nextToken != "" {
base.NextToken = aws.String(nextToken)
}
return base
}
func (sn *streamNames) groupName() string {
return sn.group
}
type streamPrefix struct {
group string
prefix *string
}
func (sp *streamPrefix) request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput {
base := &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &sp.group,
StartTime: aws.Int64(st.UnixMilli()),
EndTime: aws.Int64(et.UnixMilli()),
Limit: aws.Int64(int64(limit)),
LogStreamNamePrefix: sp.prefix,
}
if nextToken != "" {
base.NextToken = aws.String(nextToken)
}
return base
}
func (sp *streamPrefix) groupName() string {
return sp.group
}
type groupRequest interface {
request(limit int, nextToken string, st, et *time.Time) *cloudwatchlogs.FilterLogEventsInput
groupName() string
}
func newLogsReceiver(cfg *Config, logger *zap.Logger, consumer consumer.Logs) *logsReceiver {
groups := []groupRequest{}
for logGroupName, sc := range cfg.Logs.Groups.NamedConfigs {
for _, prefix := range sc.Prefixes {
groups = append(groups, &streamPrefix{group: logGroupName, prefix: prefix})
}
if len(sc.Names) > 0 {
groups = append(groups, &streamNames{group: logGroupName, names: sc.Names})
}
}
// safeguard from using both
autodiscover := cfg.Logs.Groups.AutodiscoverConfig
if len(cfg.Logs.Groups.NamedConfigs) > 0 {
autodiscover = nil
}
return &logsReceiver{
region: cfg.Region,
profile: cfg.Profile,
consumer: consumer,
maxEventsPerRequest: cfg.Logs.MaxEventsPerRequest,
imdsEndpoint: cfg.IMDSEndpoint,
autodiscover: autodiscover,
pollInterval: cfg.Logs.PollInterval,
nextStartTime: time.Now().Add(-cfg.Logs.PollInterval),
groupRequests: groups,
logger: logger,
wg: &sync.WaitGroup{},
doneChan: make(chan bool),
}
}
func (l *logsReceiver) Start(ctx context.Context, _ component.Host) error {
l.logger.Debug("starting to poll for Cloudwatch logs")
l.wg.Add(1)
go l.startPolling(ctx)
return nil
}
func (l *logsReceiver) Shutdown(_ context.Context) error {
l.logger.Debug("shutting down logs receiver")
close(l.doneChan)
l.wg.Wait()
return nil
}
func (l *logsReceiver) startPolling(ctx context.Context) {
defer l.wg.Done()
t := time.NewTicker(l.pollInterval)
for {
select {
case <-ctx.Done():
return
case <-l.doneChan:
return
case <-t.C:
if l.autodiscover != nil {
group, err := l.discoverGroups(ctx, l.autodiscover)
if err != nil {
l.logger.Error("unable to perform discovery of log groups", zap.Error(err))
continue
}
l.groupRequests = group
}
err := l.poll(ctx)
if err != nil {
l.logger.Error("there was an error during the poll", zap.Error(err))
}
}
}
}
func (l *logsReceiver) | (ctx context.Context) error {
var errs error
startTime := l.nextStartTime
endTime := time.Now()
for _, r := range l.groupRequests {
if err := l.pollForLogs(ctx, r, startTime, endTime); err != nil {
errs = errors.Join(errs, err)
}
}
l.nextStartTime = endTime
return errs
}
func (l *logsReceiver) pollForLogs(ctx context.Context, pc groupRequest, startTime, endTime time.Time) error {
err := l.ensureSession()
if err != nil {
return err
}
nextToken := aws.String("")
for nextToken != nil {
select {
// if done, we want to stop processing paginated stream of events
case _, ok := <-l.doneChan:
if !ok {
return nil
}
default:
input := pc.request(l.maxEventsPerRequest, *nextToken, &startTime, &endTime)
resp, err := l.client.FilterLogEventsWithContext(ctx, input)
if err != nil {
l.logger.Error("unable to retrieve logs from cloudwatch", zap.String("log group", pc.groupName()), zap.Error(err))
break
}
observedTime := pcommon.NewTimestampFromTime(time.Now())
logs := l.processEvents(observedTime, pc.groupName(), resp)
if logs.LogRecordCount() > 0 {
if err = l.consumer.ConsumeLogs(ctx, logs); err != nil {
l.logger.Error("unable to consume logs", zap.Error(err))
break
}
}
nextToken = resp.NextToken
}
}
return nil
}
func (l *logsReceiver) processEvents(now pcommon.Timestamp, logGroupName string, output *cloudwatchlogs.FilterLogEventsOutput) plog.Logs {
logs := plog.NewLogs()
resourceMap := map[string](map[string]*plog.ResourceLogs){}
for _, e := range output.Events {
if e.Timestamp == nil {
l.logger.Error("unable to determine timestamp of event as the timestamp is nil")
continue
}
if e.EventId == nil {
l.logger.Error("no event ID was present on the event, skipping entry")
continue
}
if e.Message == nil {
l.logger.Error("no message was present on the event", zap.String("event.id", *e.EventId))
continue
}
group, ok := resourceMap[logGroupName]
if !ok {
group = map[string]*plog.ResourceLogs{}
resourceMap[logGroupName] = group
}
logStreamName := noStreamName
if e.LogStreamName != nil {
logStreamName = *e.LogStreamName
}
resourceLogs, ok := group[logStreamName]
if !ok {
rl := logs.ResourceLogs().AppendEmpty()
resourceLogs = &rl
resourceAttributes := resourceLogs.Resource().Attributes()
resourceAttributes.PutStr("aws.region", l.region)
resourceAttributes.PutStr("cloudwatch.log.group.name", logGroupName)
resourceAttributes.PutStr("cloudwatch.log.stream", logStreamName)
group[logStreamName] = resourceLogs
// Ensure one scopeLogs is initialized so we can handle in standardized way going forward.
_ = resourceLogs.ScopeLogs().AppendEmpty()
}
// Now we know resourceLogs is initialized and has one scopeLogs so we don't have to handle any special cases.
logRecord := resourceLogs.ScopeLogs().At(0).LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(now)
ts := time.UnixMilli(*e.Timestamp)
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
logRecord.Body().SetStr(*e.Message)
logRecord.Attributes().PutStr("id", *e.EventId)
}
return logs
}
func (l *logsReceiver) discoverGroups(ctx context.Context, auto *AutodiscoverConfig) ([]groupRequest, error) {
l.logger.Debug("attempting to discover log groups.", zap.Int("limit", auto.Limit))
groups := []groupRequest{}
err := l.ensureSession()
if err != nil {
return groups, fmt.Errorf("unable to establish a session to auto discover log groups: %w", err)
}
numGroups := 0
var nextToken = aws.String("")
for nextToken != nil {
if numGroups >= auto.Limit {
break
}
req := &cloudwatchlogs.DescribeLogGroupsInput{
Limit: aws.Int64(maxLogGroupsPerDiscovery),
}
if auto.Prefix != "" {
req.LogGroupNamePrefix = &auto.Prefix
}
dlgResults, err := l.client.DescribeLogGroupsWithContext(ctx, req)
if err != nil {
return groups, fmt.Errorf("unable to list log groups: %w", err)
}
for _, lg := range dlgResults.LogGroups {
if numGroups == auto.Limit {
l.logger.Debug("reached limit of the number of log groups to discover."+
"To increase the number of groups able to be discovered, please increase the autodiscover limit field.",
zap.Int("groups_discovered", numGroups), zap.Int("limit", auto.Limit))
break
}
numGroups++
l.logger.Debug("discovered log group", zap.String("log group", lg.GoString()))
// default behavior is to collect all if not stream filtered
if len(auto.Streams.Names) == 0 && len(auto.Streams.Prefixes) == 0 {
groups = append(groups, &streamNames{group: *lg.LogGroupName})
continue
}
for _, prefix := range auto.Streams.Prefixes {
groups = append(groups, &streamPrefix{group: *lg.LogGroupName, prefix: prefix})
}
if len(auto.Streams.Names) > 0 {
groups = append(groups, &streamNames{group: *lg.LogGroupName, names: auto.Streams.Names})
}
}
nextToken = dlgResults.NextToken
}
return groups, nil
}
func (l *logsReceiver) ensureSession() error {
if l.client != nil {
return nil
}
awsConfig := aws.NewConfig().WithRegion(l.region)
options := session.Options{
Config: *awsConfig,
}
if l.imdsEndpoint != "" {
options.EC2IMDSEndpoint = l.imdsEndpoint
}
if l.profile != "" {
options.Profile = l.profile
}
s, err := session.NewSessionWithOptions(options)
l.client = cloudwatchlogs.New(s)
return err
}
| poll | identifier_name |
compliancescan_types.go | package v1alpha1
import (
"errors"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// ComplianceScanRescanAnnotation indicates that a ComplianceScan
// should be re-run
const ComplianceScanRescanAnnotation = "compliance.openshift.io/rescan"
// ComplianceScanLabel serves as an indicator for which ComplianceScan
// owns the referenced object
const ComplianceScanLabel = "compliance.openshift.io/scan-name"
// ScriptLabel defines that the object is a script for a scan object
const ScriptLabel = "complianceoperator.openshift.io/scan-script"
// ResultLabel defines that the object is a result of a scan
const ResultLabel = "complianceoperator.openshift.io/scan-result"
// ScanFinalizer is a finalizer for ComplianceScans. It gets automatically
// added by the ComplianceScan controller in order to delete resources.
const ScanFinalizer = "scan.finalizers.compliance.openshift.io"
// DefaultRawStorageSize specifies the default storage size where the raw
// results will be stored at
const DefaultRawStorageSize = "1Gi"
const DefaultStorageRotation = 3
var ErrUnkownScanType = errors.New("Unknown scan type")
// Represents the status of the compliance scan run.
type ComplianceScanStatusPhase string
const (
// PhasePending represents the scan pending to be scheduled
PhasePending ComplianceScanStatusPhase = "PENDING"
// PhaseLaunching represents being scheduled and launching pods to run the scans
PhaseLaunching ComplianceScanStatusPhase = "LAUNCHING"
// PhaseRunning represents the scan being ran by the pods and waiting for the results
PhaseRunning ComplianceScanStatusPhase = "RUNNING"
// PhaseAggregating represents the scan aggregating the results
PhaseAggregating ComplianceScanStatusPhase = "AGGREGATING"
// PhaseDone represents the scan pods being done and the results being available
PhaseDone ComplianceScanStatusPhase = "DONE"
)
func stateCompare(lowPhase ComplianceScanStatusPhase, scanPhase ComplianceScanStatusPhase) ComplianceScanStatusPhase {
orderedStates := make(map[ComplianceScanStatusPhase]int)
orderedStates[PhasePending] = 0
orderedStates[PhaseLaunching] = 1
orderedStates[PhaseRunning] = 2
orderedStates[PhaseAggregating] = 3
orderedStates[PhaseDone] = 4
if orderedStates[lowPhase] > orderedStates[scanPhase] {
return scanPhase
}
return lowPhase
}
// Represents the result of the compliance scan
type ComplianceScanStatusResult string
// CmScanResultAnnotation holds the processed scanner result
const CmScanResultAnnotation = "compliance.openshift.io/scan-result"
// CmScanResultErrMsg holds the processed scanner error message
const CmScanResultErrMsg = "compliance.openshift.io/scan-error-msg"
const (
// ResultNot available represents the compliance scan not having finished yet
ResultNotAvailable ComplianceScanStatusResult = "NOT-AVAILABLE"
// ResultCompliant represents the compliance scan having succeeded
ResultCompliant ComplianceScanStatusResult = "COMPLIANT"
// ResultNotApplicable represents the compliance scan having no useful results after finished
ResultNotApplicable ComplianceScanStatusResult = "NOT-APPLICABLE"
// ResultError represents a compliance scan pod having failed to run the scan or encountered an error
ResultError ComplianceScanStatusResult = "ERROR"
// ResultNonCompliant represents the compliance scan having found a gap
ResultNonCompliant ComplianceScanStatusResult = "NON-COMPLIANT"
// ResultInconsistent represents checks differing across the machines
ResultInconsistent ComplianceScanStatusResult = "INCONSISTENT"
ScanTypeNode ComplianceScanType = "Node"
ScanTypePlatform ComplianceScanType = "Platform"
)
func resultCompare(lowResult ComplianceScanStatusResult, scanResult ComplianceScanStatusResult) ComplianceScanStatusResult {
orderedResults := make(map[ComplianceScanStatusResult]int)
orderedResults[ResultNotAvailable] = 0
orderedResults[ResultError] = 1
orderedResults[ResultInconsistent] = 2
orderedResults[ResultNonCompliant] = 3
orderedResults[ResultNotApplicable] = 4
orderedResults[ResultCompliant] = 5
if orderedResults[lowResult] > orderedResults[scanResult] {
return scanResult
}
return lowResult
}
// TailoringConfigMapRef is a reference to a ConfigMap that contains the
// tailoring file. It assumes a key called `tailoring.xml` which will
// have the tailoring contents.
type TailoringConfigMapRef struct {
// Name of the ConfigMap being referenced
Name string `json:"name"`
}
// ComplianceScanType
// +k8s:openapi-gen=true
type ComplianceScanType string
// When changing the defaults, remember to change also the DefaultRawStorageSize and
// DefaultStorageRotation constants
type RawResultStorageSettings struct {
// Specifies the amount of storage to ask for storing the raw results. Note that
// if re-scans happen, the new results will also need to be stored. Defaults to 1Gi.
// +kubebuilder:validation:Default=1Gi
// +kubebuilder:default="1Gi"
Size string `json:"size,omitempty"`
// Specifies the amount of scans for which the raw results will be stored.
// Older results will get rotated, and it's the responsibility of administrators
// to store these results elsewhere before rotation happens. Note that a rotation
// policy of '0' disables rotation entirely. Defaults to 3.
// +kubebuilder:default=3
Rotation uint16 `json:"rotation,omitempty"`
// Specifies the StorageClassName to use when creating the PersistentVolumeClaim
// to hold the raw results. By default this is null, which will attempt to use the
// default storage class configured in the cluster. If there is no default class specified
// then this needs to be set.
// +nullable
StorageClassName *string `json:"storageClassName,omitempty"`
// Specifies the access modes that the PersistentVolume will be created with.
// The persistent volume will hold the raw results of the scan.
// +kubebuilder:default={"ReadWriteOnce"}
PVAccessModes []corev1.PersistentVolumeAccessMode `json:"pvAccessModes,omitempty"`
// By setting this, it's possible to configure where the result server instances
// are run. These instances will mount a Persistent Volume to store the raw
// results, so special care should be taken to schedule these in trusted nodes.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Specifies tolerations needed for the result server to run on the nodes. This is useful
// in case the target set of nodes have custom taints that don't allow certain
// workloads to run. Defaults to allowing scheduling on master nodes.
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
}
// ComplianceScanSettings groups together settings of a ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanSettings struct {
// Enable debug logging of workloads and OpenSCAP
Debug bool `json:"debug,omitempty"`
// Specifies settings that pertain to raw result storage.
RawResultStorage RawResultStorageSettings `json:"rawResultStorage,omitempty"`
// Defines that no external resources in the Data Stream should be used. External
// resources could be, for instance, CVE feeds. This is useful for disconnected
// installations without access to a proxy.
NoExternalResources bool `json:"noExternalResources,omitempty"`
// It is recommended to set the proxy via the config.openshift.io/Proxy object
// Defines a proxy for the scan to get external resources from. This is useful for
// disconnected installations with access to a proxy.
HTTPSProxy string `json:"httpsProxy,omitempty"`
// Specifies tolerations needed for the scan to run on the nodes. This is useful
// in case the target set of nodes have custom taints that don't allow certain
// workloads to run. Defaults to allowing scheduling on all nodes.
// +kubebuilder:default={{operator: "Exists"}}
ScanTolerations []corev1.Toleration `json:"scanTolerations,omitempty"`
// Defines whether the scan should proceed if we're not able to
// scan all the nodes or not. `true` means that the operator
// should be strict and error out. `false` means that we don't
// need to be strict and we can proceed.
// +kubebuilder:default=true
StrictNodeScan *bool `json:"strictNodeScan,omitempty"`
// Specifies what to do with remediations of Enforcement type. If left empty,
// this defaults to "off" which doesn't create nor apply any enforcement remediations.
// If set to "all" this creates any enforcement remediations it encounters.
// Subsequently, this can also be set to a specific type. e.g. setting it to
// "gatekeeper" will apply any enforcement remediations relevant to the
// Gatekeeper OPA system.
// These objects will annotated in the content itself with:
// complianceascode.io/enforcement-type: <type>
RemediationEnforcement string `json:"remediationEnforcement,omitempty"`
// Determines whether to hide or show results that are not applicable.
// +kubebuilder:default=false
ShowNotApplicable bool `json:"showNotApplicable,omitempty"`
}
// ComplianceScanSpec defines the desired state of ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanSpec struct {
// The type of Compliance scan.
// +kubebuilder:default=Node
ScanType ComplianceScanType `json:"scanType,omitempty"`
// Is the image with the content (Data Stream), that will be used to run
// OpenSCAP.
ContentImage string `json:"contentImage,omitempty"`
// Is the profile in the data stream to be used. This is the collection of
// rules that will be checked for.
Profile string `json:"profile,omitempty"`
// A Rule can be specified if the scan should check only for a specific
// rule. Note that when leaving this empty, the scan will check for all the
// rules for a specific profile.
Rule string `json:"rule,omitempty"`
// Is the path to the file that contains the content (the data stream).
// Note that the path needs to be relative to the `/` (root) directory, as
// it is in the ContentImage
Content string `json:"content,omitempty"`
// By setting this, it's possible to only run the scan on certain nodes in
// the cluster. Note that when applying remediations generated from the
// scan, this should match the selector of the MachineConfigPool you want
// to apply the remediations to.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Is a reference to a ConfigMap that contains the
// tailoring file. It assumes a key called `tailoring.xml` which will
// have the tailoring contents.
TailoringConfigMap *TailoringConfigMapRef `json:"tailoringConfigMap,omitempty"`
ComplianceScanSettings `json:",inline"`
}
// ComplianceScanStatus defines the observed state of ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanStatus struct {
// Is the phase where the scan is at. Normally, one must wait for the scan
// to reach the phase DONE.
Phase ComplianceScanStatusPhase `json:"phase,omitempty"`
// Once the scan reaches the phase DONE, this will contain the result of
// the scan. Where COMPLIANT means that the scan succeeded; NON-COMPLIANT
// means that there were rule violations; and ERROR means that the scan
// couldn't complete due to an issue.
Result ComplianceScanStatusResult `json:"result,omitempty"`
// If there are issues on the scan, this will be filled up with an error
// message.
ErrorMessage string `json:"errormsg,omitempty"`
// Specifies the current index of the scan. Given multiple scans, this marks the
// amount that have been executed.
CurrentIndex int64 `json:"currentIndex,omitempty"`
// Specifies the object that's storing the raw results for the scan.
ResultsStorage StorageReference `json:"resultsStorage,omitempty"`
// If there are warnings on the scan, this will be filled up with warning
// messages.
Warnings string `json:"warnings,omitempty"`
}
// StorageReference stores a reference to where certain objects are being stored
type StorageReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComplianceScan represents a scan with a certain configuration that will be
// applied to objects of a certain entity in the host. These could be nodes
// that apply to a certain nodeSelector, or the cluster itself.
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=compliancescans,scope=Namespaced,shortName=scans;scan
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Result",type="string",JSONPath=`.status.result`
type ComplianceScan struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// The spec is the configuration for the compliance scan.
Spec ComplianceScanSpec `json:"spec,omitempty"`
// The status will give valuable information on what's going on with the
// scan; and, more importantly, if the scan is successful (compliant) or
// not (non-compliant)
Status ComplianceScanStatus `json:"status,omitempty"`
}
// NeedsRescan indicates whether a ComplianceScan needs to
// rescan or not
func (cs *ComplianceScan) NeedsRescan() bool {
annotations := cs.GetAnnotations()
if annotations == nil {
return false
}
_, needsRescan := annotations[ComplianceScanRescanAnnotation]
return needsRescan
}
// GetScanTypeIfValid returns scan type if the scan has a valid one, else it returns
// an error
func (cs *ComplianceScan) GetScanTypeIfValid() (ComplianceScanType, error) {
if strings.ToLower(string(cs.Spec.ScanType)) == strings.ToLower(string(ScanTypePlatform)) {
return ScanTypePlatform, nil
}
if strings.ToLower(string(cs.Spec.ScanType)) == strings.ToLower(string(ScanTypeNode)) {
return ScanTypeNode, nil
}
return "", ErrUnkownScanType
}
// GetScanType get's the scan type for a scan
func (cs *ComplianceScan) | () ComplianceScanType {
scantype, err := cs.GetScanTypeIfValid()
if err != nil {
// This shouldn't happen
panic(err)
}
return scantype
}
// Returns whether remediation enforcement is off or not
func (cs *ComplianceScan) RemediationEnforcementIsOff() bool {
return (strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementEmpty) ||
strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementOff))
}
// Returns whether remediation enforcement is off or not
func (cs *ComplianceScan) RemediationEnforcementTypeMatches(etype string) bool {
return (strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementAll) ||
strings.EqualFold(cs.Spec.RemediationEnforcement, etype))
}
// GetScanType get's the scan type for a scan
func (cs *ComplianceScan) IsStrictNodeScan() bool {
// strictNodeScan should be true by default
if cs.Spec.StrictNodeScan == nil {
return true
}
return *cs.Spec.StrictNodeScan
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComplianceScanList contains a list of ComplianceScan
type ComplianceScanList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ComplianceScan `json:"items"`
}
func init() {
SchemeBuilder.Register(&ComplianceScan{}, &ComplianceScanList{})
}
| GetScanType | identifier_name |
compliancescan_types.go | package v1alpha1
import (
"errors"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// ComplianceScanRescanAnnotation indicates that a ComplianceScan
// should be re-run
const ComplianceScanRescanAnnotation = "compliance.openshift.io/rescan"
// ComplianceScanLabel serves as an indicator for which ComplianceScan
// owns the referenced object
const ComplianceScanLabel = "compliance.openshift.io/scan-name"
// ScriptLabel defines that the object is a script for a scan object
const ScriptLabel = "complianceoperator.openshift.io/scan-script"
// ResultLabel defines that the object is a result of a scan
const ResultLabel = "complianceoperator.openshift.io/scan-result"
// ScanFinalizer is a finalizer for ComplianceScans. It gets automatically
// added by the ComplianceScan controller in order to delete resources.
const ScanFinalizer = "scan.finalizers.compliance.openshift.io"
// DefaultRawStorageSize specifies the default storage size where the raw
// results will be stored at
const DefaultRawStorageSize = "1Gi"
const DefaultStorageRotation = 3
var ErrUnkownScanType = errors.New("Unknown scan type")
// Represents the status of the compliance scan run.
type ComplianceScanStatusPhase string
const (
// PhasePending represents the scan pending to be scheduled
PhasePending ComplianceScanStatusPhase = "PENDING"
// PhaseLaunching represents being scheduled and launching pods to run the scans
PhaseLaunching ComplianceScanStatusPhase = "LAUNCHING"
// PhaseRunning represents the scan being ran by the pods and waiting for the results
PhaseRunning ComplianceScanStatusPhase = "RUNNING"
// PhaseAggregating represents the scan aggregating the results
PhaseAggregating ComplianceScanStatusPhase = "AGGREGATING"
// PhaseDone represents the scan pods being done and the results being available
PhaseDone ComplianceScanStatusPhase = "DONE"
)
func stateCompare(lowPhase ComplianceScanStatusPhase, scanPhase ComplianceScanStatusPhase) ComplianceScanStatusPhase {
orderedStates := make(map[ComplianceScanStatusPhase]int)
orderedStates[PhasePending] = 0
orderedStates[PhaseLaunching] = 1
orderedStates[PhaseRunning] = 2
orderedStates[PhaseAggregating] = 3
orderedStates[PhaseDone] = 4
if orderedStates[lowPhase] > orderedStates[scanPhase] {
return scanPhase
}
return lowPhase
}
// Represents the result of the compliance scan
type ComplianceScanStatusResult string
// CmScanResultAnnotation holds the processed scanner result
const CmScanResultAnnotation = "compliance.openshift.io/scan-result"
// CmScanResultErrMsg holds the processed scanner error message
const CmScanResultErrMsg = "compliance.openshift.io/scan-error-msg"
const (
// ResultNot available represents the compliance scan not having finished yet
ResultNotAvailable ComplianceScanStatusResult = "NOT-AVAILABLE"
// ResultCompliant represents the compliance scan having succeeded
ResultCompliant ComplianceScanStatusResult = "COMPLIANT"
// ResultNotApplicable represents the compliance scan having no useful results after finished
ResultNotApplicable ComplianceScanStatusResult = "NOT-APPLICABLE"
// ResultError represents a compliance scan pod having failed to run the scan or encountered an error
ResultError ComplianceScanStatusResult = "ERROR"
// ResultNonCompliant represents the compliance scan having found a gap
ResultNonCompliant ComplianceScanStatusResult = "NON-COMPLIANT"
// ResultInconsistent represents checks differing across the machines
ResultInconsistent ComplianceScanStatusResult = "INCONSISTENT"
ScanTypeNode ComplianceScanType = "Node"
ScanTypePlatform ComplianceScanType = "Platform"
)
func resultCompare(lowResult ComplianceScanStatusResult, scanResult ComplianceScanStatusResult) ComplianceScanStatusResult {
orderedResults := make(map[ComplianceScanStatusResult]int)
orderedResults[ResultNotAvailable] = 0
orderedResults[ResultError] = 1
orderedResults[ResultInconsistent] = 2
orderedResults[ResultNonCompliant] = 3
orderedResults[ResultNotApplicable] = 4
orderedResults[ResultCompliant] = 5
if orderedResults[lowResult] > orderedResults[scanResult] {
return scanResult
}
return lowResult
}
// TailoringConfigMapRef is a reference to a ConfigMap that contains the
// tailoring file. It assumes a key called `tailoring.xml` which will
// have the tailoring contents.
type TailoringConfigMapRef struct {
// Name of the ConfigMap being referenced
Name string `json:"name"`
}
// ComplianceScanType
// +k8s:openapi-gen=true
type ComplianceScanType string
// When changing the defaults, remember to change also the DefaultRawStorageSize and
// DefaultStorageRotation constants
type RawResultStorageSettings struct {
// Specifies the amount of storage to ask for storing the raw results. Note that
// if re-scans happen, the new results will also need to be stored. Defaults to 1Gi.
// +kubebuilder:validation:Default=1Gi
// +kubebuilder:default="1Gi"
Size string `json:"size,omitempty"`
// Specifies the amount of scans for which the raw results will be stored.
// Older results will get rotated, and it's the responsibility of administrators
// to store these results elsewhere before rotation happens. Note that a rotation
// policy of '0' disables rotation entirely. Defaults to 3.
// +kubebuilder:default=3
Rotation uint16 `json:"rotation,omitempty"`
// Specifies the StorageClassName to use when creating the PersistentVolumeClaim
// to hold the raw results. By default this is null, which will attempt to use the
// default storage class configured in the cluster. If there is no default class specified
// then this needs to be set.
// +nullable
StorageClassName *string `json:"storageClassName,omitempty"`
// Specifies the access modes that the PersistentVolume will be created with.
// The persistent volume will hold the raw results of the scan.
// +kubebuilder:default={"ReadWriteOnce"}
PVAccessModes []corev1.PersistentVolumeAccessMode `json:"pvAccessModes,omitempty"`
// By setting this, it's possible to configure where the result server instances
// are run. These instances will mount a Persistent Volume to store the raw
// results, so special care should be taken to schedule these in trusted nodes.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Specifies tolerations needed for the result server to run on the nodes. This is useful
// in case the target set of nodes have custom taints that don't allow certain
// workloads to run. Defaults to allowing scheduling on master nodes.
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
}
// ComplianceScanSettings groups together settings of a ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanSettings struct {
// Enable debug logging of workloads and OpenSCAP
Debug bool `json:"debug,omitempty"`
// Specifies settings that pertain to raw result storage.
RawResultStorage RawResultStorageSettings `json:"rawResultStorage,omitempty"`
// Defines that no external resources in the Data Stream should be used. External
// resources could be, for instance, CVE feeds. This is useful for disconnected
// installations without access to a proxy.
NoExternalResources bool `json:"noExternalResources,omitempty"`
// It is recommended to set the proxy via the config.openshift.io/Proxy object
// Defines a proxy for the scan to get external resources from. This is useful for
// disconnected installations with access to a proxy.
HTTPSProxy string `json:"httpsProxy,omitempty"`
// Specifies tolerations needed for the scan to run on the nodes. This is useful
// in case the target set of nodes have custom taints that don't allow certain
// workloads to run. Defaults to allowing scheduling on all nodes.
// +kubebuilder:default={{operator: "Exists"}}
ScanTolerations []corev1.Toleration `json:"scanTolerations,omitempty"`
// Defines whether the scan should proceed if we're not able to
// scan all the nodes or not. `true` means that the operator
// should be strict and error out. `false` means that we don't
// need to be strict and we can proceed.
// +kubebuilder:default=true
StrictNodeScan *bool `json:"strictNodeScan,omitempty"`
// Specifies what to do with remediations of Enforcement type. If left empty,
// this defaults to "off" which doesn't create nor apply any enforcement remediations.
// If set to "all" this creates any enforcement remediations it encounters.
// Subsequently, this can also be set to a specific type. e.g. setting it to
// "gatekeeper" will apply any enforcement remediations relevant to the
// Gatekeeper OPA system.
// These objects will annotated in the content itself with:
// complianceascode.io/enforcement-type: <type>
RemediationEnforcement string `json:"remediationEnforcement,omitempty"`
// Determines whether to hide or show results that are not applicable.
// +kubebuilder:default=false
ShowNotApplicable bool `json:"showNotApplicable,omitempty"`
}
// ComplianceScanSpec defines the desired state of ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanSpec struct {
// The type of Compliance scan.
// +kubebuilder:default=Node
ScanType ComplianceScanType `json:"scanType,omitempty"`
// Is the image with the content (Data Stream), that will be used to run
// OpenSCAP.
ContentImage string `json:"contentImage,omitempty"`
// Is the profile in the data stream to be used. This is the collection of
// rules that will be checked for.
Profile string `json:"profile,omitempty"`
// A Rule can be specified if the scan should check only for a specific
// rule. Note that when leaving this empty, the scan will check for all the
// rules for a specific profile.
Rule string `json:"rule,omitempty"` | // Is the path to the file that contains the content (the data stream).
// Note that the path needs to be relative to the `/` (root) directory, as
// it is in the ContentImage
Content string `json:"content,omitempty"`
// By setting this, it's possible to only run the scan on certain nodes in
// the cluster. Note that when applying remediations generated from the
// scan, this should match the selector of the MachineConfigPool you want
// to apply the remediations to.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Is a reference to a ConfigMap that contains the
// tailoring file. It assumes a key called `tailoring.xml` which will
// have the tailoring contents.
TailoringConfigMap *TailoringConfigMapRef `json:"tailoringConfigMap,omitempty"`
ComplianceScanSettings `json:",inline"`
}
// ComplianceScanStatus defines the observed state of ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanStatus struct {
// Is the phase where the scan is at. Normally, one must wait for the scan
// to reach the phase DONE.
Phase ComplianceScanStatusPhase `json:"phase,omitempty"`
// Once the scan reaches the phase DONE, this will contain the result of
// the scan. Where COMPLIANT means that the scan succeeded; NON-COMPLIANT
// means that there were rule violations; and ERROR means that the scan
// couldn't complete due to an issue.
Result ComplianceScanStatusResult `json:"result,omitempty"`
// If there are issues on the scan, this will be filled up with an error
// message.
ErrorMessage string `json:"errormsg,omitempty"`
// Specifies the current index of the scan. Given multiple scans, this marks the
// amount that have been executed.
CurrentIndex int64 `json:"currentIndex,omitempty"`
// Specifies the object that's storing the raw results for the scan.
ResultsStorage StorageReference `json:"resultsStorage,omitempty"`
// If there are warnings on the scan, this will be filled up with warning
// messages.
Warnings string `json:"warnings,omitempty"`
}
// StorageReference stores a reference to where certain objects are being stored
type StorageReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComplianceScan represents a scan with a certain configuration that will be
// applied to objects of a certain entity in the host. These could be nodes
// that apply to a certain nodeSelector, or the cluster itself.
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=compliancescans,scope=Namespaced,shortName=scans;scan
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Result",type="string",JSONPath=`.status.result`
type ComplianceScan struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// The spec is the configuration for the compliance scan.
Spec ComplianceScanSpec `json:"spec,omitempty"`
// The status will give valuable information on what's going on with the
// scan; and, more importantly, if the scan is successful (compliant) or
// not (non-compliant)
Status ComplianceScanStatus `json:"status,omitempty"`
}
// NeedsRescan indicates whether a ComplianceScan needs to
// rescan or not
func (cs *ComplianceScan) NeedsRescan() bool {
annotations := cs.GetAnnotations()
if annotations == nil {
return false
}
_, needsRescan := annotations[ComplianceScanRescanAnnotation]
return needsRescan
}
// GetScanTypeIfValid returns scan type if the scan has a valid one, else it returns
// an error
func (cs *ComplianceScan) GetScanTypeIfValid() (ComplianceScanType, error) {
if strings.ToLower(string(cs.Spec.ScanType)) == strings.ToLower(string(ScanTypePlatform)) {
return ScanTypePlatform, nil
}
if strings.ToLower(string(cs.Spec.ScanType)) == strings.ToLower(string(ScanTypeNode)) {
return ScanTypeNode, nil
}
return "", ErrUnkownScanType
}
// GetScanType get's the scan type for a scan
func (cs *ComplianceScan) GetScanType() ComplianceScanType {
scantype, err := cs.GetScanTypeIfValid()
if err != nil {
// This shouldn't happen
panic(err)
}
return scantype
}
// Returns whether remediation enforcement is off or not
func (cs *ComplianceScan) RemediationEnforcementIsOff() bool {
return (strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementEmpty) ||
strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementOff))
}
// Returns whether remediation enforcement is off or not
func (cs *ComplianceScan) RemediationEnforcementTypeMatches(etype string) bool {
return (strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementAll) ||
strings.EqualFold(cs.Spec.RemediationEnforcement, etype))
}
// GetScanType get's the scan type for a scan
func (cs *ComplianceScan) IsStrictNodeScan() bool {
// strictNodeScan should be true by default
if cs.Spec.StrictNodeScan == nil {
return true
}
return *cs.Spec.StrictNodeScan
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComplianceScanList contains a list of ComplianceScan
type ComplianceScanList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ComplianceScan `json:"items"`
}
func init() {
SchemeBuilder.Register(&ComplianceScan{}, &ComplianceScanList{})
} | random_line_split | |
compliancescan_types.go | package v1alpha1
import (
"errors"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// ComplianceScanRescanAnnotation indicates that a ComplianceScan
// should be re-run
const ComplianceScanRescanAnnotation = "compliance.openshift.io/rescan"
// ComplianceScanLabel serves as an indicator for which ComplianceScan
// owns the referenced object
const ComplianceScanLabel = "compliance.openshift.io/scan-name"
// ScriptLabel defines that the object is a script for a scan object
const ScriptLabel = "complianceoperator.openshift.io/scan-script"
// ResultLabel defines that the object is a result of a scan
const ResultLabel = "complianceoperator.openshift.io/scan-result"
// ScanFinalizer is a finalizer for ComplianceScans. It gets automatically
// added by the ComplianceScan controller in order to delete resources.
const ScanFinalizer = "scan.finalizers.compliance.openshift.io"
// DefaultRawStorageSize specifies the default storage size where the raw
// results will be stored at
const DefaultRawStorageSize = "1Gi"
const DefaultStorageRotation = 3
var ErrUnkownScanType = errors.New("Unknown scan type")
// Represents the status of the compliance scan run.
type ComplianceScanStatusPhase string
const (
// PhasePending represents the scan pending to be scheduled
PhasePending ComplianceScanStatusPhase = "PENDING"
// PhaseLaunching represents being scheduled and launching pods to run the scans
PhaseLaunching ComplianceScanStatusPhase = "LAUNCHING"
// PhaseRunning represents the scan being ran by the pods and waiting for the results
PhaseRunning ComplianceScanStatusPhase = "RUNNING"
// PhaseAggregating represents the scan aggregating the results
PhaseAggregating ComplianceScanStatusPhase = "AGGREGATING"
// PhaseDone represents the scan pods being done and the results being available
PhaseDone ComplianceScanStatusPhase = "DONE"
)
func stateCompare(lowPhase ComplianceScanStatusPhase, scanPhase ComplianceScanStatusPhase) ComplianceScanStatusPhase {
orderedStates := make(map[ComplianceScanStatusPhase]int)
orderedStates[PhasePending] = 0
orderedStates[PhaseLaunching] = 1
orderedStates[PhaseRunning] = 2
orderedStates[PhaseAggregating] = 3
orderedStates[PhaseDone] = 4
if orderedStates[lowPhase] > orderedStates[scanPhase] {
return scanPhase
}
return lowPhase
}
// Represents the result of the compliance scan
type ComplianceScanStatusResult string
// CmScanResultAnnotation holds the processed scanner result
const CmScanResultAnnotation = "compliance.openshift.io/scan-result"
// CmScanResultErrMsg holds the processed scanner error message
const CmScanResultErrMsg = "compliance.openshift.io/scan-error-msg"
const (
// ResultNot available represents the compliance scan not having finished yet
ResultNotAvailable ComplianceScanStatusResult = "NOT-AVAILABLE"
// ResultCompliant represents the compliance scan having succeeded
ResultCompliant ComplianceScanStatusResult = "COMPLIANT"
// ResultNotApplicable represents the compliance scan having no useful results after finished
ResultNotApplicable ComplianceScanStatusResult = "NOT-APPLICABLE"
// ResultError represents a compliance scan pod having failed to run the scan or encountered an error
ResultError ComplianceScanStatusResult = "ERROR"
// ResultNonCompliant represents the compliance scan having found a gap
ResultNonCompliant ComplianceScanStatusResult = "NON-COMPLIANT"
// ResultInconsistent represents checks differing across the machines
ResultInconsistent ComplianceScanStatusResult = "INCONSISTENT"
ScanTypeNode ComplianceScanType = "Node"
ScanTypePlatform ComplianceScanType = "Platform"
)
func resultCompare(lowResult ComplianceScanStatusResult, scanResult ComplianceScanStatusResult) ComplianceScanStatusResult {
orderedResults := make(map[ComplianceScanStatusResult]int)
orderedResults[ResultNotAvailable] = 0
orderedResults[ResultError] = 1
orderedResults[ResultInconsistent] = 2
orderedResults[ResultNonCompliant] = 3
orderedResults[ResultNotApplicable] = 4
orderedResults[ResultCompliant] = 5
if orderedResults[lowResult] > orderedResults[scanResult] {
return scanResult
}
return lowResult
}
// TailoringConfigMapRef is a reference to a ConfigMap that contains the
// tailoring file. It assumes a key called `tailoring.xml` which will
// have the tailoring contents.
type TailoringConfigMapRef struct {
// Name of the ConfigMap being referenced
Name string `json:"name"`
}
// ComplianceScanType
// +k8s:openapi-gen=true
type ComplianceScanType string
// When changing the defaults, remember to change also the DefaultRawStorageSize and
// DefaultStorageRotation constants
type RawResultStorageSettings struct {
// Specifies the amount of storage to ask for storing the raw results. Note that
// if re-scans happen, the new results will also need to be stored. Defaults to 1Gi.
// +kubebuilder:validation:Default=1Gi
// +kubebuilder:default="1Gi"
Size string `json:"size,omitempty"`
// Specifies the amount of scans for which the raw results will be stored.
// Older results will get rotated, and it's the responsibility of administrators
// to store these results elsewhere before rotation happens. Note that a rotation
// policy of '0' disables rotation entirely. Defaults to 3.
// +kubebuilder:default=3
Rotation uint16 `json:"rotation,omitempty"`
// Specifies the StorageClassName to use when creating the PersistentVolumeClaim
// to hold the raw results. By default this is null, which will attempt to use the
// default storage class configured in the cluster. If there is no default class specified
// then this needs to be set.
// +nullable
StorageClassName *string `json:"storageClassName,omitempty"`
// Specifies the access modes that the PersistentVolume will be created with.
// The persistent volume will hold the raw results of the scan.
// +kubebuilder:default={"ReadWriteOnce"}
PVAccessModes []corev1.PersistentVolumeAccessMode `json:"pvAccessModes,omitempty"`
// By setting this, it's possible to configure where the result server instances
// are run. These instances will mount a Persistent Volume to store the raw
// results, so special care should be taken to schedule these in trusted nodes.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Specifies tolerations needed for the result server to run on the nodes. This is useful
// in case the target set of nodes have custom taints that don't allow certain
// workloads to run. Defaults to allowing scheduling on master nodes.
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
}
// ComplianceScanSettings groups together settings of a ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanSettings struct {
// Enable debug logging of workloads and OpenSCAP
Debug bool `json:"debug,omitempty"`
// Specifies settings that pertain to raw result storage.
RawResultStorage RawResultStorageSettings `json:"rawResultStorage,omitempty"`
// Defines that no external resources in the Data Stream should be used. External
// resources could be, for instance, CVE feeds. This is useful for disconnected
// installations without access to a proxy.
NoExternalResources bool `json:"noExternalResources,omitempty"`
// It is recommended to set the proxy via the config.openshift.io/Proxy object
// Defines a proxy for the scan to get external resources from. This is useful for
// disconnected installations with access to a proxy.
HTTPSProxy string `json:"httpsProxy,omitempty"`
// Specifies tolerations needed for the scan to run on the nodes. This is useful
// in case the target set of nodes have custom taints that don't allow certain
// workloads to run. Defaults to allowing scheduling on all nodes.
// +kubebuilder:default={{operator: "Exists"}}
ScanTolerations []corev1.Toleration `json:"scanTolerations,omitempty"`
// Defines whether the scan should proceed if we're not able to
// scan all the nodes or not. `true` means that the operator
// should be strict and error out. `false` means that we don't
// need to be strict and we can proceed.
// +kubebuilder:default=true
StrictNodeScan *bool `json:"strictNodeScan,omitempty"`
// Specifies what to do with remediations of Enforcement type. If left empty,
// this defaults to "off" which doesn't create nor apply any enforcement remediations.
// If set to "all" this creates any enforcement remediations it encounters.
// Subsequently, this can also be set to a specific type. e.g. setting it to
// "gatekeeper" will apply any enforcement remediations relevant to the
// Gatekeeper OPA system.
// These objects will annotated in the content itself with:
// complianceascode.io/enforcement-type: <type>
RemediationEnforcement string `json:"remediationEnforcement,omitempty"`
// Determines whether to hide or show results that are not applicable.
// +kubebuilder:default=false
ShowNotApplicable bool `json:"showNotApplicable,omitempty"`
}
// ComplianceScanSpec defines the desired state of ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanSpec struct {
// The type of Compliance scan.
// +kubebuilder:default=Node
ScanType ComplianceScanType `json:"scanType,omitempty"`
// Is the image with the content (Data Stream), that will be used to run
// OpenSCAP.
ContentImage string `json:"contentImage,omitempty"`
// Is the profile in the data stream to be used. This is the collection of
// rules that will be checked for.
Profile string `json:"profile,omitempty"`
// A Rule can be specified if the scan should check only for a specific
// rule. Note that when leaving this empty, the scan will check for all the
// rules for a specific profile.
Rule string `json:"rule,omitempty"`
// Is the path to the file that contains the content (the data stream).
// Note that the path needs to be relative to the `/` (root) directory, as
// it is in the ContentImage
Content string `json:"content,omitempty"`
// By setting this, it's possible to only run the scan on certain nodes in
// the cluster. Note that when applying remediations generated from the
// scan, this should match the selector of the MachineConfigPool you want
// to apply the remediations to.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Is a reference to a ConfigMap that contains the
// tailoring file. It assumes a key called `tailoring.xml` which will
// have the tailoring contents.
TailoringConfigMap *TailoringConfigMapRef `json:"tailoringConfigMap,omitempty"`
ComplianceScanSettings `json:",inline"`
}
// ComplianceScanStatus defines the observed state of ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanStatus struct {
// Is the phase where the scan is at. Normally, one must wait for the scan
// to reach the phase DONE.
Phase ComplianceScanStatusPhase `json:"phase,omitempty"`
// Once the scan reaches the phase DONE, this will contain the result of
// the scan. Where COMPLIANT means that the scan succeeded; NON-COMPLIANT
// means that there were rule violations; and ERROR means that the scan
// couldn't complete due to an issue.
Result ComplianceScanStatusResult `json:"result,omitempty"`
// If there are issues on the scan, this will be filled up with an error
// message.
ErrorMessage string `json:"errormsg,omitempty"`
// Specifies the current index of the scan. Given multiple scans, this marks the
// amount that have been executed.
CurrentIndex int64 `json:"currentIndex,omitempty"`
// Specifies the object that's storing the raw results for the scan.
ResultsStorage StorageReference `json:"resultsStorage,omitempty"`
// If there are warnings on the scan, this will be filled up with warning
// messages.
Warnings string `json:"warnings,omitempty"`
}
// StorageReference stores a reference to where certain objects are being stored
type StorageReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComplianceScan represents a scan with a certain configuration that will be
// applied to objects of a certain entity in the host. These could be nodes
// that apply to a certain nodeSelector, or the cluster itself.
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=compliancescans,scope=Namespaced,shortName=scans;scan
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Result",type="string",JSONPath=`.status.result`
type ComplianceScan struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// The spec is the configuration for the compliance scan.
Spec ComplianceScanSpec `json:"spec,omitempty"`
// The status will give valuable information on what's going on with the
// scan; and, more importantly, if the scan is successful (compliant) or
// not (non-compliant)
Status ComplianceScanStatus `json:"status,omitempty"`
}
// NeedsRescan indicates whether a ComplianceScan needs to
// rescan or not
func (cs *ComplianceScan) NeedsRescan() bool {
annotations := cs.GetAnnotations()
if annotations == nil {
return false
}
_, needsRescan := annotations[ComplianceScanRescanAnnotation]
return needsRescan
}
// GetScanTypeIfValid returns scan type if the scan has a valid one, else it returns
// an error
func (cs *ComplianceScan) GetScanTypeIfValid() (ComplianceScanType, error) |
// GetScanType get's the scan type for a scan
func (cs *ComplianceScan) GetScanType() ComplianceScanType {
scantype, err := cs.GetScanTypeIfValid()
if err != nil {
// This shouldn't happen
panic(err)
}
return scantype
}
// Returns whether remediation enforcement is off or not
func (cs *ComplianceScan) RemediationEnforcementIsOff() bool {
return (strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementEmpty) ||
strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementOff))
}
// Returns whether remediation enforcement is off or not
func (cs *ComplianceScan) RemediationEnforcementTypeMatches(etype string) bool {
return (strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementAll) ||
strings.EqualFold(cs.Spec.RemediationEnforcement, etype))
}
// GetScanType get's the scan type for a scan
func (cs *ComplianceScan) IsStrictNodeScan() bool {
// strictNodeScan should be true by default
if cs.Spec.StrictNodeScan == nil {
return true
}
return *cs.Spec.StrictNodeScan
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComplianceScanList contains a list of ComplianceScan
type ComplianceScanList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ComplianceScan `json:"items"`
}
func init() {
SchemeBuilder.Register(&ComplianceScan{}, &ComplianceScanList{})
}
| {
if strings.ToLower(string(cs.Spec.ScanType)) == strings.ToLower(string(ScanTypePlatform)) {
return ScanTypePlatform, nil
}
if strings.ToLower(string(cs.Spec.ScanType)) == strings.ToLower(string(ScanTypeNode)) {
return ScanTypeNode, nil
}
return "", ErrUnkownScanType
} | identifier_body |
compliancescan_types.go | package v1alpha1
import (
"errors"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// ComplianceScanRescanAnnotation indicates that a ComplianceScan
// should be re-run
const ComplianceScanRescanAnnotation = "compliance.openshift.io/rescan"
// ComplianceScanLabel serves as an indicator for which ComplianceScan
// owns the referenced object
const ComplianceScanLabel = "compliance.openshift.io/scan-name"
// ScriptLabel defines that the object is a script for a scan object
const ScriptLabel = "complianceoperator.openshift.io/scan-script"
// ResultLabel defines that the object is a result of a scan
const ResultLabel = "complianceoperator.openshift.io/scan-result"
// ScanFinalizer is a finalizer for ComplianceScans. It gets automatically
// added by the ComplianceScan controller in order to delete resources.
const ScanFinalizer = "scan.finalizers.compliance.openshift.io"
// DefaultRawStorageSize specifies the default storage size where the raw
// results will be stored at
const DefaultRawStorageSize = "1Gi"
const DefaultStorageRotation = 3
var ErrUnkownScanType = errors.New("Unknown scan type")
// Represents the status of the compliance scan run.
type ComplianceScanStatusPhase string
const (
// PhasePending represents the scan pending to be scheduled
PhasePending ComplianceScanStatusPhase = "PENDING"
// PhaseLaunching represents being scheduled and launching pods to run the scans
PhaseLaunching ComplianceScanStatusPhase = "LAUNCHING"
// PhaseRunning represents the scan being ran by the pods and waiting for the results
PhaseRunning ComplianceScanStatusPhase = "RUNNING"
// PhaseAggregating represents the scan aggregating the results
PhaseAggregating ComplianceScanStatusPhase = "AGGREGATING"
// PhaseDone represents the scan pods being done and the results being available
PhaseDone ComplianceScanStatusPhase = "DONE"
)
func stateCompare(lowPhase ComplianceScanStatusPhase, scanPhase ComplianceScanStatusPhase) ComplianceScanStatusPhase {
orderedStates := make(map[ComplianceScanStatusPhase]int)
orderedStates[PhasePending] = 0
orderedStates[PhaseLaunching] = 1
orderedStates[PhaseRunning] = 2
orderedStates[PhaseAggregating] = 3
orderedStates[PhaseDone] = 4
if orderedStates[lowPhase] > orderedStates[scanPhase] {
return scanPhase
}
return lowPhase
}
// Represents the result of the compliance scan
type ComplianceScanStatusResult string
// CmScanResultAnnotation holds the processed scanner result
const CmScanResultAnnotation = "compliance.openshift.io/scan-result"
// CmScanResultErrMsg holds the processed scanner error message
const CmScanResultErrMsg = "compliance.openshift.io/scan-error-msg"
const (
// ResultNot available represents the compliance scan not having finished yet
ResultNotAvailable ComplianceScanStatusResult = "NOT-AVAILABLE"
// ResultCompliant represents the compliance scan having succeeded
ResultCompliant ComplianceScanStatusResult = "COMPLIANT"
// ResultNotApplicable represents the compliance scan having no useful results after finished
ResultNotApplicable ComplianceScanStatusResult = "NOT-APPLICABLE"
// ResultError represents a compliance scan pod having failed to run the scan or encountered an error
ResultError ComplianceScanStatusResult = "ERROR"
// ResultNonCompliant represents the compliance scan having found a gap
ResultNonCompliant ComplianceScanStatusResult = "NON-COMPLIANT"
// ResultInconsistent represents checks differing across the machines
ResultInconsistent ComplianceScanStatusResult = "INCONSISTENT"
ScanTypeNode ComplianceScanType = "Node"
ScanTypePlatform ComplianceScanType = "Platform"
)
func resultCompare(lowResult ComplianceScanStatusResult, scanResult ComplianceScanStatusResult) ComplianceScanStatusResult {
orderedResults := make(map[ComplianceScanStatusResult]int)
orderedResults[ResultNotAvailable] = 0
orderedResults[ResultError] = 1
orderedResults[ResultInconsistent] = 2
orderedResults[ResultNonCompliant] = 3
orderedResults[ResultNotApplicable] = 4
orderedResults[ResultCompliant] = 5
if orderedResults[lowResult] > orderedResults[scanResult] {
return scanResult
}
return lowResult
}
// TailoringConfigMapRef is a reference to a ConfigMap that contains the
// tailoring file. It assumes a key called `tailoring.xml` which will
// have the tailoring contents.
type TailoringConfigMapRef struct {
// Name of the ConfigMap being referenced
Name string `json:"name"`
}
// ComplianceScanType
// +k8s:openapi-gen=true
type ComplianceScanType string
// When changing the defaults, remember to change also the DefaultRawStorageSize and
// DefaultStorageRotation constants
type RawResultStorageSettings struct {
// Specifies the amount of storage to ask for storing the raw results. Note that
// if re-scans happen, the new results will also need to be stored. Defaults to 1Gi.
// +kubebuilder:validation:Default=1Gi
// +kubebuilder:default="1Gi"
Size string `json:"size,omitempty"`
// Specifies the amount of scans for which the raw results will be stored.
// Older results will get rotated, and it's the responsibility of administrators
// to store these results elsewhere before rotation happens. Note that a rotation
// policy of '0' disables rotation entirely. Defaults to 3.
// +kubebuilder:default=3
Rotation uint16 `json:"rotation,omitempty"`
// Specifies the StorageClassName to use when creating the PersistentVolumeClaim
// to hold the raw results. By default this is null, which will attempt to use the
// default storage class configured in the cluster. If there is no default class specified
// then this needs to be set.
// +nullable
StorageClassName *string `json:"storageClassName,omitempty"`
// Specifies the access modes that the PersistentVolume will be created with.
// The persistent volume will hold the raw results of the scan.
// +kubebuilder:default={"ReadWriteOnce"}
PVAccessModes []corev1.PersistentVolumeAccessMode `json:"pvAccessModes,omitempty"`
// By setting this, it's possible to configure where the result server instances
// are run. These instances will mount a Persistent Volume to store the raw
// results, so special care should be taken to schedule these in trusted nodes.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Specifies tolerations needed for the result server to run on the nodes. This is useful
// in case the target set of nodes have custom taints that don't allow certain
// workloads to run. Defaults to allowing scheduling on master nodes.
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
}
// ComplianceScanSettings groups together settings of a ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanSettings struct {
// Enable debug logging of workloads and OpenSCAP
Debug bool `json:"debug,omitempty"`
// Specifies settings that pertain to raw result storage.
RawResultStorage RawResultStorageSettings `json:"rawResultStorage,omitempty"`
// Defines that no external resources in the Data Stream should be used. External
// resources could be, for instance, CVE feeds. This is useful for disconnected
// installations without access to a proxy.
NoExternalResources bool `json:"noExternalResources,omitempty"`
// It is recommended to set the proxy via the config.openshift.io/Proxy object
// Defines a proxy for the scan to get external resources from. This is useful for
// disconnected installations with access to a proxy.
HTTPSProxy string `json:"httpsProxy,omitempty"`
// Specifies tolerations needed for the scan to run on the nodes. This is useful
// in case the target set of nodes have custom taints that don't allow certain
// workloads to run. Defaults to allowing scheduling on all nodes.
// +kubebuilder:default={{operator: "Exists"}}
ScanTolerations []corev1.Toleration `json:"scanTolerations,omitempty"`
// Defines whether the scan should proceed if we're not able to
// scan all the nodes or not. `true` means that the operator
// should be strict and error out. `false` means that we don't
// need to be strict and we can proceed.
// +kubebuilder:default=true
StrictNodeScan *bool `json:"strictNodeScan,omitempty"`
// Specifies what to do with remediations of Enforcement type. If left empty,
// this defaults to "off" which doesn't create nor apply any enforcement remediations.
// If set to "all" this creates any enforcement remediations it encounters.
// Subsequently, this can also be set to a specific type. e.g. setting it to
// "gatekeeper" will apply any enforcement remediations relevant to the
// Gatekeeper OPA system.
// These objects will annotated in the content itself with:
// complianceascode.io/enforcement-type: <type>
RemediationEnforcement string `json:"remediationEnforcement,omitempty"`
// Determines whether to hide or show results that are not applicable.
// +kubebuilder:default=false
ShowNotApplicable bool `json:"showNotApplicable,omitempty"`
}
// ComplianceScanSpec defines the desired state of ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanSpec struct {
// The type of Compliance scan.
// +kubebuilder:default=Node
ScanType ComplianceScanType `json:"scanType,omitempty"`
// Is the image with the content (Data Stream), that will be used to run
// OpenSCAP.
ContentImage string `json:"contentImage,omitempty"`
// Is the profile in the data stream to be used. This is the collection of
// rules that will be checked for.
Profile string `json:"profile,omitempty"`
// A Rule can be specified if the scan should check only for a specific
// rule. Note that when leaving this empty, the scan will check for all the
// rules for a specific profile.
Rule string `json:"rule,omitempty"`
// Is the path to the file that contains the content (the data stream).
// Note that the path needs to be relative to the `/` (root) directory, as
// it is in the ContentImage
Content string `json:"content,omitempty"`
// By setting this, it's possible to only run the scan on certain nodes in
// the cluster. Note that when applying remediations generated from the
// scan, this should match the selector of the MachineConfigPool you want
// to apply the remediations to.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Is a reference to a ConfigMap that contains the
// tailoring file. It assumes a key called `tailoring.xml` which will
// have the tailoring contents.
TailoringConfigMap *TailoringConfigMapRef `json:"tailoringConfigMap,omitempty"`
ComplianceScanSettings `json:",inline"`
}
// ComplianceScanStatus defines the observed state of ComplianceScan
// +k8s:openapi-gen=true
type ComplianceScanStatus struct {
// Is the phase where the scan is at. Normally, one must wait for the scan
// to reach the phase DONE.
Phase ComplianceScanStatusPhase `json:"phase,omitempty"`
// Once the scan reaches the phase DONE, this will contain the result of
// the scan. Where COMPLIANT means that the scan succeeded; NON-COMPLIANT
// means that there were rule violations; and ERROR means that the scan
// couldn't complete due to an issue.
Result ComplianceScanStatusResult `json:"result,omitempty"`
// If there are issues on the scan, this will be filled up with an error
// message.
ErrorMessage string `json:"errormsg,omitempty"`
// Specifies the current index of the scan. Given multiple scans, this marks the
// amount that have been executed.
CurrentIndex int64 `json:"currentIndex,omitempty"`
// Specifies the object that's storing the raw results for the scan.
ResultsStorage StorageReference `json:"resultsStorage,omitempty"`
// If there are warnings on the scan, this will be filled up with warning
// messages.
Warnings string `json:"warnings,omitempty"`
}
// StorageReference stores a reference to where certain objects are being stored
type StorageReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComplianceScan represents a scan with a certain configuration that will be
// applied to objects of a certain entity in the host. These could be nodes
// that apply to a certain nodeSelector, or the cluster itself.
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=compliancescans,scope=Namespaced,shortName=scans;scan
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Result",type="string",JSONPath=`.status.result`
type ComplianceScan struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// The spec is the configuration for the compliance scan.
Spec ComplianceScanSpec `json:"spec,omitempty"`
// The status will give valuable information on what's going on with the
// scan; and, more importantly, if the scan is successful (compliant) or
// not (non-compliant)
Status ComplianceScanStatus `json:"status,omitempty"`
}
// NeedsRescan indicates whether a ComplianceScan needs to
// rescan or not
func (cs *ComplianceScan) NeedsRescan() bool {
annotations := cs.GetAnnotations()
if annotations == nil {
return false
}
_, needsRescan := annotations[ComplianceScanRescanAnnotation]
return needsRescan
}
// GetScanTypeIfValid returns scan type if the scan has a valid one, else it returns
// an error
func (cs *ComplianceScan) GetScanTypeIfValid() (ComplianceScanType, error) {
if strings.ToLower(string(cs.Spec.ScanType)) == strings.ToLower(string(ScanTypePlatform)) {
return ScanTypePlatform, nil
}
if strings.ToLower(string(cs.Spec.ScanType)) == strings.ToLower(string(ScanTypeNode)) |
return "", ErrUnkownScanType
}
// GetScanType get's the scan type for a scan
func (cs *ComplianceScan) GetScanType() ComplianceScanType {
scantype, err := cs.GetScanTypeIfValid()
if err != nil {
// This shouldn't happen
panic(err)
}
return scantype
}
// Returns whether remediation enforcement is off or not
func (cs *ComplianceScan) RemediationEnforcementIsOff() bool {
return (strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementEmpty) ||
strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementOff))
}
// Returns whether remediation enforcement is off or not
func (cs *ComplianceScan) RemediationEnforcementTypeMatches(etype string) bool {
return (strings.EqualFold(cs.Spec.RemediationEnforcement, RemediationEnforcementAll) ||
strings.EqualFold(cs.Spec.RemediationEnforcement, etype))
}
// GetScanType get's the scan type for a scan
func (cs *ComplianceScan) IsStrictNodeScan() bool {
// strictNodeScan should be true by default
if cs.Spec.StrictNodeScan == nil {
return true
}
return *cs.Spec.StrictNodeScan
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComplianceScanList contains a list of ComplianceScan
type ComplianceScanList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ComplianceScan `json:"items"`
}
func init() {
SchemeBuilder.Register(&ComplianceScan{}, &ComplianceScanList{})
}
| {
return ScanTypeNode, nil
} | conditional_block |
tf_train.py | import tensorflow as tf
import random
import cPickle
import numpy as np
import os
#change current directory
os.chdir("..")
ABS_PATH = os.path.abspath(os.curdir)
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ABS_PATH))
from sklearn import svm
from scipy.sparse import csr_matrix
from features.vectorizer import PolitenessFeatureVectorizer
##############################################################################
# constants
DATA_DIR = "data"
SRC_FILENAME = "training-dataExp.p"
TEST_SIZE = 3283
VAL_SIZE = 1
##############################################################################
def get_data():
filename = os.path.join(os.path.abspath(os.curdir)+"/"+DATA_DIR, SRC_FILENAME)
all_documents = cPickle.load(open(filename, "r"))
all_documents.sort(key=lambda x: x['type'])
all_documents = all_documents[::-1]
# discard test data
requests = all_documents[:-TEST_SIZE]
# For good luck
random.shuffle(requests)
print "%d documents loaded" % len(requests)
#save_to_filename(requests, "requests_data.p")
return requests
def save_to_filename(data, filename):
# Save test documents
filename = os.path.join(os.path.abspath(ABS_PATH+"/"+DATA_DIR),
filename)
cPickle.dump(data, open(filename, 'w'))
def get_features(requests):
vectorizer = PolitenessFeatureVectorizer()
fks = False
X, y = [], []
for req in requests:
# get unigram, bigram features + politeness strategy features
# in this specific document
# vectorizer returns {feature-name: bool_value} dict
# a matrix of zeros and ones
fs = vectorizer.features(req)
if not fks:
fks = sorted(fs.keys())
# get features vector
fv = [fs[k] for k in fks]
# If politeness score > 0.0,
# the doc is polite, class = 1
if req['score'] > 0.0:
l = 1
else:
l = 0
X.append(fv)
y.append(l)
# Single-row sparse matrix
# where np.asarray converts the input to an array.
#X = csr_matrix(np.asarray(X))
X = np.asarray(X)
# format
y = np.asarray(y)
y_ = np.zeros((len(y), 2))
for i in range(len(y)):
if y[i] == 1:
y_[i][1] = 1
else:
y_[i][0] = 1
y = y_
return X, y
def next_batch(X, y, CURR_BATCH, batch_size):
# get the sizes
(train_size, feature_size) = X.shape
end_batch = CURR_BATCH + batch_size
if train_size < CURR_BATCH:
|
elif train_size < CURR_BATCH + batch_size:
end_batch = train_size
batch_xs = X[CURR_BATCH:end_batch]
batch_ys = y[CURR_BATCH:end_batch]
CURR_BATCH = CURR_BATCH + batch_size
return batch_xs, batch_ys, CURR_BATCH
def hidden_layers(_X, _weights, _biases, params):
if params["n_layers"] == 1:
hidden = params["func1"](tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
else:
hidden_1 = params["func1"](tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
hidden = params["func2"](tf.add(tf.matmul(hidden_1, _weights['h2']), _biases['b2']))
return tf.matmul(hidden, _weights['out']) + _biases['out']
# return tf.nn.softmax(tf.matmul(hidden, _weights['out']) + _biases['out'])
def weights_and_biases(params):
if params["n_layers"] == 1:
weights = {
'h1': tf.Variable(tf.random_normal([params["n_input"], params["n_hidden_1"]])),
'out': tf.Variable(tf.random_normal([params["n_hidden_1"], params["n_classes"]]))
}
biases = {
'b1': tf.Variable(tf.random_normal([params["n_hidden_1"]])),
'out': tf.Variable(tf.random_normal([params["n_classes"]]))
}
else:
weights = {
'h1': tf.Variable(tf.random_normal([params["n_input"], params["n_hidden_1"]])),
'h2': tf.Variable(tf.random_normal([params["n_hidden_1"], params["n_hidden_2"]])),
'out': tf.Variable(tf.random_normal([params["n_hidden_2"], params["n_classes"]]))
}
biases = {
'b1': tf.Variable(tf.random_normal([params["n_hidden_1"]])),
'b2': tf.Variable(tf.random_normal([params["n_hidden_2"]])),
'out': tf.Variable(tf.random_normal([params["n_classes"]]))
}
return weights, biases
def tf_train(params):
learning_rate = params["learning_rate"]
training_epochs = params["training_epochs"]
batch_size = params["batch_size"]
display_step = params["display_step"]
X_train, y_train = params["X_train"], params["y_train"]
X_val, y_val = params["X_val"], params["y_val"]
# print "Train data size ", len(y_train)
# print "Validation data size ", len(y_val)
# get the sizes
(train_size, n_input) = X_train.shape
params["n_input"] = n_input
n_classes = params["n_classes"] = 2
n_hidden_1 = params["n_hidden_1"]
if params["n_layers"] == 2:
n_hidden_2 = params["n_hidden_2"]
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Store layers weight & bias
weights, biases = weights_and_biases(params)
# Construct model
logits = hidden_layers(x, weights, biases, params)
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))
pred = tf.nn.softmax(logits)
cost = tf.reduce_mean(tf.nn.l2_loss(pred - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# AdamOptimizer GradientDescentOptimizer
# Initializing the variables
init = tf.initialize_all_variables()
errors = []
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
mse = 0.
curr_batch = 0
# Loop over all batches
while curr_batch < train_size:
batch_xs, batch_ys, curr_batch = next_batch(X_train, y_train, curr_batch, batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
mse += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/len(batch_ys)
# print sess.run(pred, feed_dict={x: batch_xs, y: batch_ys})[:10]
errors.append(mse)
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%03d' % (epoch+1), "cost=", "{:.9f}".format(mse)
print "Optimization Finished!"
# tf.nn.softmax(pred)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
result = accuracy.eval({x: X_train, y: y_train})
print "Train data accuracy:", result
result = accuracy.eval({x: X_val, y: y_val})
print "*** Validation data accuracy: ", result
plot(errors)
return result
def get_subsets(data, count, fold_length):
if count == 0:
return data[fold_length:], data[0:fold_length]
end = count+fold_length
val_set = data[count:end]
train_set = np.concatenate((data[0:count], data[end:]))
return np.asarray(train_set), np.asarray(val_set)
def cross_validator(n_folds, params, requests):
X, y = get_features(requests)
total_accuracy = 0.
count = 0
fold_length = len(requests)/n_folds
curr_fold = 1
while count < len(requests)-fold_length+1:
print "Fold # %d" % curr_fold
if count == 0:
train_requests = requests[fold_length:]
else:
end = count+fold_length
train_requests = np.concatenate((
requests[0:count], requests[end:]))
PolitenessFeatureVectorizer.generate_bow_features(train_requests)
params["X_train"], params["X_val"] = get_subsets(X, count, fold_length)
params["y_train"], params["y_val"] = get_subsets(y, count, fold_length)
count += fold_length
curr_fold += 1
total_accuracy += tf_train(params)
# take average of all accuracies
print "****** Average Accuracy for all folds: ", total_accuracy/n_folds
temp = str(params["training_epochs"])
temp += "/" + str(params["n_hidden_1"])
if params["n_layers"] == 2:
temp += "/" + str(params["n_hidden_2"])
print temp
print "----------------------------------------"
return total_accuracy/n_folds
def grid_search():
print "Starting grid_search for TF in /tensorflow"
params = {}
lr_options = [ 0.0015, 0.01, 0.005, 0.001 ]
te_options = [ 10, 50, 80, 100, 150 ]
bs_options = [ 100 ]
n_hidden_1 = [ 562 ]
n_hidden_2 = [ 562 ]
l1_functions = [ tf.nn.relu ]
l2_functions = [ tf.nn.relu ]
# number of layers
params["n_layers"] = 1
# get requests
requests = get_data()
num_folds = 10
print "Running with: ", num_folds, lr_options, te_options
results = {}
if params["n_layers"] == 1:
print "1 layer"
for te in range(len(te_options)):
for lr in range(len(lr_options)):
for nh1 in range(len(n_hidden_1)):
params["func1"] = l1_functions[0]
params["n_hidden_1"] = n_hidden_1[nh1]
# set out all the hyperparameters
params["learning_rate"] = lr_options[lr]
params["training_epochs"] = te_options[te]
params["batch_size"] = bs_options[0]
params["display_step"] = params["training_epochs"]
temp = str(n_hidden_1[nh1])+ "/" + str(lr_options[lr])
temp += "/" +str(te_options[te])
results[temp] = cross_validator(num_folds, params, requests)
else:
print "2 layers"
for te in range(len(te_options)):
for nh2 in range(len(n_hidden_2)):
for nh1 in range(len(n_hidden_1)):
for lr in range(len(lr_options)):
if n_hidden_2[nh2] > n_hidden_1[nh1]:
continue
params["func1"] = l1_functions[0]
params["func2"] = l2_functions[0]
params["n_hidden_1"] = n_hidden_1[nh1]
params["n_hidden_2"] = n_hidden_2[nh2]
# set out all the hyperparameters
params["learning_rate"] = lr_options[lr]
params["training_epochs"] = te_options[te]
params["batch_size"] = bs_options[0]
params["display_step"] = params["training_epochs"]
temp = str(n_hidden_1[nh1])+ "/" + str(n_hidden_2[nh2])
temp += "/" +str(te_options[te])
temp += "/"+str(lr_options[lr])
results[temp] = cross_validator(num_folds, params, requests)
print results
import operator
best = max(results.iteritems(), key=operator.itemgetter(1))[0]
print "Best Result %s with the score = %f" % (best, results[best])
def plot(errors):
import matplotlib.pyplot as plt # for plotting
plt.plot(errors)
plt.xlabel('#epochs')
plt.ylabel('MSE')
import pylab
pylab.show()
##############################################################################
if __name__ == "__main__":
"""
train the politeness model, using tensorflow
"""
grid_search()
| return | conditional_block |
tf_train.py | import tensorflow as tf
import random
import cPickle
import numpy as np
import os
#change current directory
os.chdir("..")
ABS_PATH = os.path.abspath(os.curdir)
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ABS_PATH))
from sklearn import svm
from scipy.sparse import csr_matrix
from features.vectorizer import PolitenessFeatureVectorizer
##############################################################################
# constants
DATA_DIR = "data"
SRC_FILENAME = "training-dataExp.p"
TEST_SIZE = 3283
VAL_SIZE = 1
##############################################################################
def get_data():
filename = os.path.join(os.path.abspath(os.curdir)+"/"+DATA_DIR, SRC_FILENAME)
all_documents = cPickle.load(open(filename, "r"))
all_documents.sort(key=lambda x: x['type'])
all_documents = all_documents[::-1]
# discard test data
requests = all_documents[:-TEST_SIZE]
# For good luck
random.shuffle(requests)
print "%d documents loaded" % len(requests)
#save_to_filename(requests, "requests_data.p")
return requests
def save_to_filename(data, filename):
# Save test documents
filename = os.path.join(os.path.abspath(ABS_PATH+"/"+DATA_DIR),
filename)
cPickle.dump(data, open(filename, 'w'))
def get_features(requests):
vectorizer = PolitenessFeatureVectorizer()
fks = False
X, y = [], []
for req in requests:
# get unigram, bigram features + politeness strategy features
# in this specific document
# vectorizer returns {feature-name: bool_value} dict
# a matrix of zeros and ones
fs = vectorizer.features(req)
if not fks:
fks = sorted(fs.keys())
# get features vector
fv = [fs[k] for k in fks]
# If politeness score > 0.0,
# the doc is polite, class = 1
if req['score'] > 0.0:
l = 1
else:
l = 0
X.append(fv)
y.append(l)
# Single-row sparse matrix
# where np.asarray converts the input to an array.
#X = csr_matrix(np.asarray(X))
X = np.asarray(X)
# format
y = np.asarray(y)
y_ = np.zeros((len(y), 2))
for i in range(len(y)):
if y[i] == 1:
y_[i][1] = 1
else:
y_[i][0] = 1
y = y_
return X, y
def next_batch(X, y, CURR_BATCH, batch_size):
# get the sizes
(train_size, feature_size) = X.shape
end_batch = CURR_BATCH + batch_size
if train_size < CURR_BATCH:
return
elif train_size < CURR_BATCH + batch_size:
end_batch = train_size
batch_xs = X[CURR_BATCH:end_batch]
batch_ys = y[CURR_BATCH:end_batch]
CURR_BATCH = CURR_BATCH + batch_size
return batch_xs, batch_ys, CURR_BATCH
def hidden_layers(_X, _weights, _biases, params):
if params["n_layers"] == 1:
hidden = params["func1"](tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
else:
hidden_1 = params["func1"](tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
hidden = params["func2"](tf.add(tf.matmul(hidden_1, _weights['h2']), _biases['b2']))
return tf.matmul(hidden, _weights['out']) + _biases['out']
# return tf.nn.softmax(tf.matmul(hidden, _weights['out']) + _biases['out'])
def weights_and_biases(params):
if params["n_layers"] == 1:
weights = {
'h1': tf.Variable(tf.random_normal([params["n_input"], params["n_hidden_1"]])),
'out': tf.Variable(tf.random_normal([params["n_hidden_1"], params["n_classes"]]))
}
biases = {
'b1': tf.Variable(tf.random_normal([params["n_hidden_1"]])),
'out': tf.Variable(tf.random_normal([params["n_classes"]]))
}
else:
weights = {
'h1': tf.Variable(tf.random_normal([params["n_input"], params["n_hidden_1"]])),
'h2': tf.Variable(tf.random_normal([params["n_hidden_1"], params["n_hidden_2"]])),
'out': tf.Variable(tf.random_normal([params["n_hidden_2"], params["n_classes"]]))
}
biases = {
'b1': tf.Variable(tf.random_normal([params["n_hidden_1"]])),
'b2': tf.Variable(tf.random_normal([params["n_hidden_2"]])),
'out': tf.Variable(tf.random_normal([params["n_classes"]]))
}
return weights, biases
def tf_train(params):
learning_rate = params["learning_rate"]
training_epochs = params["training_epochs"]
batch_size = params["batch_size"]
display_step = params["display_step"]
X_train, y_train = params["X_train"], params["y_train"]
X_val, y_val = params["X_val"], params["y_val"]
# print "Train data size ", len(y_train)
# print "Validation data size ", len(y_val)
# get the sizes
(train_size, n_input) = X_train.shape
params["n_input"] = n_input
n_classes = params["n_classes"] = 2
n_hidden_1 = params["n_hidden_1"]
if params["n_layers"] == 2:
n_hidden_2 = params["n_hidden_2"]
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Store layers weight & bias
weights, biases = weights_and_biases(params)
# Construct model
logits = hidden_layers(x, weights, biases, params)
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))
pred = tf.nn.softmax(logits)
cost = tf.reduce_mean(tf.nn.l2_loss(pred - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# AdamOptimizer GradientDescentOptimizer
# Initializing the variables
init = tf.initialize_all_variables()
errors = []
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
mse = 0.
curr_batch = 0
# Loop over all batches
while curr_batch < train_size:
batch_xs, batch_ys, curr_batch = next_batch(X_train, y_train, curr_batch, batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
mse += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/len(batch_ys)
# print sess.run(pred, feed_dict={x: batch_xs, y: batch_ys})[:10]
errors.append(mse)
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%03d' % (epoch+1), "cost=", "{:.9f}".format(mse)
print "Optimization Finished!"
# tf.nn.softmax(pred)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
result = accuracy.eval({x: X_train, y: y_train})
print "Train data accuracy:", result
result = accuracy.eval({x: X_val, y: y_val})
print "*** Validation data accuracy: ", result
plot(errors)
return result
def get_subsets(data, count, fold_length):
if count == 0:
return data[fold_length:], data[0:fold_length]
end = count+fold_length
val_set = data[count:end]
train_set = np.concatenate((data[0:count], data[end:]))
return np.asarray(train_set), np.asarray(val_set)
def cross_validator(n_folds, params, requests):
X, y = get_features(requests)
total_accuracy = 0.
count = 0
fold_length = len(requests)/n_folds
curr_fold = 1
while count < len(requests)-fold_length+1:
print "Fold # %d" % curr_fold
if count == 0:
train_requests = requests[fold_length:]
else:
end = count+fold_length
train_requests = np.concatenate((
requests[0:count], requests[end:]))
PolitenessFeatureVectorizer.generate_bow_features(train_requests)
params["X_train"], params["X_val"] = get_subsets(X, count, fold_length)
params["y_train"], params["y_val"] = get_subsets(y, count, fold_length)
count += fold_length
curr_fold += 1
total_accuracy += tf_train(params)
# take average of all accuracies
print "****** Average Accuracy for all folds: ", total_accuracy/n_folds
temp = str(params["training_epochs"])
temp += "/" + str(params["n_hidden_1"])
if params["n_layers"] == 2:
temp += "/" + str(params["n_hidden_2"])
print temp
print "----------------------------------------"
return total_accuracy/n_folds
def | ():
print "Starting grid_search for TF in /tensorflow"
params = {}
lr_options = [ 0.0015, 0.01, 0.005, 0.001 ]
te_options = [ 10, 50, 80, 100, 150 ]
bs_options = [ 100 ]
n_hidden_1 = [ 562 ]
n_hidden_2 = [ 562 ]
l1_functions = [ tf.nn.relu ]
l2_functions = [ tf.nn.relu ]
# number of layers
params["n_layers"] = 1
# get requests
requests = get_data()
num_folds = 10
print "Running with: ", num_folds, lr_options, te_options
results = {}
if params["n_layers"] == 1:
print "1 layer"
for te in range(len(te_options)):
for lr in range(len(lr_options)):
for nh1 in range(len(n_hidden_1)):
params["func1"] = l1_functions[0]
params["n_hidden_1"] = n_hidden_1[nh1]
# set out all the hyperparameters
params["learning_rate"] = lr_options[lr]
params["training_epochs"] = te_options[te]
params["batch_size"] = bs_options[0]
params["display_step"] = params["training_epochs"]
temp = str(n_hidden_1[nh1])+ "/" + str(lr_options[lr])
temp += "/" +str(te_options[te])
results[temp] = cross_validator(num_folds, params, requests)
else:
print "2 layers"
for te in range(len(te_options)):
for nh2 in range(len(n_hidden_2)):
for nh1 in range(len(n_hidden_1)):
for lr in range(len(lr_options)):
if n_hidden_2[nh2] > n_hidden_1[nh1]:
continue
params["func1"] = l1_functions[0]
params["func2"] = l2_functions[0]
params["n_hidden_1"] = n_hidden_1[nh1]
params["n_hidden_2"] = n_hidden_2[nh2]
# set out all the hyperparameters
params["learning_rate"] = lr_options[lr]
params["training_epochs"] = te_options[te]
params["batch_size"] = bs_options[0]
params["display_step"] = params["training_epochs"]
temp = str(n_hidden_1[nh1])+ "/" + str(n_hidden_2[nh2])
temp += "/" +str(te_options[te])
temp += "/"+str(lr_options[lr])
results[temp] = cross_validator(num_folds, params, requests)
print results
import operator
best = max(results.iteritems(), key=operator.itemgetter(1))[0]
print "Best Result %s with the score = %f" % (best, results[best])
def plot(errors):
import matplotlib.pyplot as plt # for plotting
plt.plot(errors)
plt.xlabel('#epochs')
plt.ylabel('MSE')
import pylab
pylab.show()
##############################################################################
if __name__ == "__main__":
"""
train the politeness model, using tensorflow
"""
grid_search()
| grid_search | identifier_name |
tf_train.py | import tensorflow as tf
import random
import cPickle
import numpy as np
import os
#change current directory
os.chdir("..")
ABS_PATH = os.path.abspath(os.curdir)
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ABS_PATH))
from sklearn import svm
from scipy.sparse import csr_matrix
from features.vectorizer import PolitenessFeatureVectorizer
##############################################################################
# constants
DATA_DIR = "data"
SRC_FILENAME = "training-dataExp.p"
TEST_SIZE = 3283
VAL_SIZE = 1
##############################################################################
def get_data():
filename = os.path.join(os.path.abspath(os.curdir)+"/"+DATA_DIR, SRC_FILENAME)
all_documents = cPickle.load(open(filename, "r"))
all_documents.sort(key=lambda x: x['type'])
all_documents = all_documents[::-1]
# discard test data
requests = all_documents[:-TEST_SIZE]
# For good luck
random.shuffle(requests)
print "%d documents loaded" % len(requests)
#save_to_filename(requests, "requests_data.p")
return requests
def save_to_filename(data, filename):
# Save test documents
filename = os.path.join(os.path.abspath(ABS_PATH+"/"+DATA_DIR),
filename)
cPickle.dump(data, open(filename, 'w'))
def get_features(requests):
vectorizer = PolitenessFeatureVectorizer()
fks = False
X, y = [], []
for req in requests:
# get unigram, bigram features + politeness strategy features
# in this specific document
# vectorizer returns {feature-name: bool_value} dict
# a matrix of zeros and ones
fs = vectorizer.features(req)
if not fks:
fks = sorted(fs.keys())
# get features vector
fv = [fs[k] for k in fks]
# If politeness score > 0.0,
# the doc is polite, class = 1
if req['score'] > 0.0:
l = 1
else:
l = 0
X.append(fv)
y.append(l)
# Single-row sparse matrix
# where np.asarray converts the input to an array.
#X = csr_matrix(np.asarray(X))
X = np.asarray(X)
# format
y = np.asarray(y)
y_ = np.zeros((len(y), 2))
for i in range(len(y)):
if y[i] == 1:
y_[i][1] = 1
else:
y_[i][0] = 1
y = y_
return X, y
def next_batch(X, y, CURR_BATCH, batch_size):
# get the sizes
(train_size, feature_size) = X.shape
end_batch = CURR_BATCH + batch_size
if train_size < CURR_BATCH:
return
elif train_size < CURR_BATCH + batch_size:
end_batch = train_size
batch_xs = X[CURR_BATCH:end_batch]
batch_ys = y[CURR_BATCH:end_batch]
CURR_BATCH = CURR_BATCH + batch_size
return batch_xs, batch_ys, CURR_BATCH
def hidden_layers(_X, _weights, _biases, params):
if params["n_layers"] == 1:
hidden = params["func1"](tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
else:
hidden_1 = params["func1"](tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
hidden = params["func2"](tf.add(tf.matmul(hidden_1, _weights['h2']), _biases['b2']))
return tf.matmul(hidden, _weights['out']) + _biases['out']
# return tf.nn.softmax(tf.matmul(hidden, _weights['out']) + _biases['out'])
def weights_and_biases(params):
if params["n_layers"] == 1:
weights = {
'h1': tf.Variable(tf.random_normal([params["n_input"], params["n_hidden_1"]])),
'out': tf.Variable(tf.random_normal([params["n_hidden_1"], params["n_classes"]]))
}
biases = {
'b1': tf.Variable(tf.random_normal([params["n_hidden_1"]])),
'out': tf.Variable(tf.random_normal([params["n_classes"]]))
}
else:
weights = {
'h1': tf.Variable(tf.random_normal([params["n_input"], params["n_hidden_1"]])),
'h2': tf.Variable(tf.random_normal([params["n_hidden_1"], params["n_hidden_2"]])),
'out': tf.Variable(tf.random_normal([params["n_hidden_2"], params["n_classes"]]))
}
biases = {
'b1': tf.Variable(tf.random_normal([params["n_hidden_1"]])),
'b2': tf.Variable(tf.random_normal([params["n_hidden_2"]])),
'out': tf.Variable(tf.random_normal([params["n_classes"]]))
}
return weights, biases
def tf_train(params):
learning_rate = params["learning_rate"]
training_epochs = params["training_epochs"]
batch_size = params["batch_size"]
display_step = params["display_step"]
X_train, y_train = params["X_train"], params["y_train"]
X_val, y_val = params["X_val"], params["y_val"]
# print "Train data size ", len(y_train)
# print "Validation data size ", len(y_val)
# get the sizes
(train_size, n_input) = X_train.shape
params["n_input"] = n_input
n_classes = params["n_classes"] = 2
n_hidden_1 = params["n_hidden_1"]
if params["n_layers"] == 2:
n_hidden_2 = params["n_hidden_2"]
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Store layers weight & bias
weights, biases = weights_and_biases(params)
# Construct model
logits = hidden_layers(x, weights, biases, params)
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))
pred = tf.nn.softmax(logits)
cost = tf.reduce_mean(tf.nn.l2_loss(pred - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# AdamOptimizer GradientDescentOptimizer
# Initializing the variables
init = tf.initialize_all_variables()
errors = []
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
mse = 0.
curr_batch = 0
# Loop over all batches
while curr_batch < train_size:
batch_xs, batch_ys, curr_batch = next_batch(X_train, y_train, curr_batch, batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
mse += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/len(batch_ys)
# print sess.run(pred, feed_dict={x: batch_xs, y: batch_ys})[:10]
errors.append(mse)
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%03d' % (epoch+1), "cost=", "{:.9f}".format(mse)
print "Optimization Finished!"
# tf.nn.softmax(pred)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
result = accuracy.eval({x: X_train, y: y_train})
print "Train data accuracy:", result
result = accuracy.eval({x: X_val, y: y_val})
print "*** Validation data accuracy: ", result
plot(errors)
return result
def get_subsets(data, count, fold_length):
if count == 0:
return data[fold_length:], data[0:fold_length]
end = count+fold_length
val_set = data[count:end]
train_set = np.concatenate((data[0:count], data[end:]))
return np.asarray(train_set), np.asarray(val_set)
def cross_validator(n_folds, params, requests):
X, y = get_features(requests)
total_accuracy = 0.
count = 0
fold_length = len(requests)/n_folds
curr_fold = 1
while count < len(requests)-fold_length+1:
print "Fold # %d" % curr_fold
if count == 0:
train_requests = requests[fold_length:]
else:
end = count+fold_length
train_requests = np.concatenate((
requests[0:count], requests[end:]))
PolitenessFeatureVectorizer.generate_bow_features(train_requests)
params["X_train"], params["X_val"] = get_subsets(X, count, fold_length)
params["y_train"], params["y_val"] = get_subsets(y, count, fold_length)
count += fold_length
curr_fold += 1
total_accuracy += tf_train(params)
# take average of all accuracies
print "****** Average Accuracy for all folds: ", total_accuracy/n_folds
temp = str(params["training_epochs"])
temp += "/" + str(params["n_hidden_1"])
if params["n_layers"] == 2:
temp += "/" + str(params["n_hidden_2"])
print temp
print "----------------------------------------"
return total_accuracy/n_folds
def grid_search():
print "Starting grid_search for TF in /tensorflow"
params = {}
lr_options = [ 0.0015, 0.01, 0.005, 0.001 ]
te_options = [ 10, 50, 80, 100, 150 ]
bs_options = [ 100 ]
n_hidden_1 = [ 562 ]
n_hidden_2 = [ 562 ]
l1_functions = [ tf.nn.relu ]
l2_functions = [ tf.nn.relu ]
# number of layers
params["n_layers"] = 1
# get requests
requests = get_data()
num_folds = 10
print "Running with: ", num_folds, lr_options, te_options
results = {}
if params["n_layers"] == 1:
print "1 layer"
for te in range(len(te_options)):
for lr in range(len(lr_options)):
for nh1 in range(len(n_hidden_1)):
params["func1"] = l1_functions[0]
params["n_hidden_1"] = n_hidden_1[nh1]
# set out all the hyperparameters
params["learning_rate"] = lr_options[lr]
params["training_epochs"] = te_options[te]
params["batch_size"] = bs_options[0]
params["display_step"] = params["training_epochs"]
temp = str(n_hidden_1[nh1])+ "/" + str(lr_options[lr])
temp += "/" +str(te_options[te])
results[temp] = cross_validator(num_folds, params, requests)
else:
print "2 layers"
for te in range(len(te_options)):
for nh2 in range(len(n_hidden_2)):
for nh1 in range(len(n_hidden_1)):
for lr in range(len(lr_options)):
if n_hidden_2[nh2] > n_hidden_1[nh1]:
continue
params["func1"] = l1_functions[0]
params["func2"] = l2_functions[0]
params["n_hidden_1"] = n_hidden_1[nh1]
params["n_hidden_2"] = n_hidden_2[nh2]
# set out all the hyperparameters
params["learning_rate"] = lr_options[lr]
params["training_epochs"] = te_options[te]
params["batch_size"] = bs_options[0]
params["display_step"] = params["training_epochs"]
temp = str(n_hidden_1[nh1])+ "/" + str(n_hidden_2[nh2])
temp += "/" +str(te_options[te])
temp += "/"+str(lr_options[lr])
results[temp] = cross_validator(num_folds, params, requests)
print results
import operator
best = max(results.iteritems(), key=operator.itemgetter(1))[0]
print "Best Result %s with the score = %f" % (best, results[best])
def plot(errors):
|
##############################################################################
if __name__ == "__main__":
"""
train the politeness model, using tensorflow
"""
grid_search()
| import matplotlib.pyplot as plt # for plotting
plt.plot(errors)
plt.xlabel('#epochs')
plt.ylabel('MSE')
import pylab
pylab.show() | identifier_body |
tf_train.py | import tensorflow as tf
import random
import cPickle
import numpy as np
import os
#change current directory
os.chdir("..")
ABS_PATH = os.path.abspath(os.curdir)
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ABS_PATH))
from sklearn import svm
from scipy.sparse import csr_matrix
from features.vectorizer import PolitenessFeatureVectorizer
##############################################################################
# constants
DATA_DIR = "data"
SRC_FILENAME = "training-dataExp.p"
TEST_SIZE = 3283
VAL_SIZE = 1
##############################################################################
def get_data():
filename = os.path.join(os.path.abspath(os.curdir)+"/"+DATA_DIR, SRC_FILENAME)
all_documents = cPickle.load(open(filename, "r"))
all_documents.sort(key=lambda x: x['type'])
all_documents = all_documents[::-1]
# discard test data
requests = all_documents[:-TEST_SIZE]
# For good luck
random.shuffle(requests)
print "%d documents loaded" % len(requests)
#save_to_filename(requests, "requests_data.p")
return requests
def save_to_filename(data, filename):
# Save test documents
filename = os.path.join(os.path.abspath(ABS_PATH+"/"+DATA_DIR),
filename)
cPickle.dump(data, open(filename, 'w'))
def get_features(requests):
vectorizer = PolitenessFeatureVectorizer()
fks = False
X, y = [], []
for req in requests:
# get unigram, bigram features + politeness strategy features
# in this specific document
# vectorizer returns {feature-name: bool_value} dict
# a matrix of zeros and ones
fs = vectorizer.features(req)
if not fks:
fks = sorted(fs.keys())
# get features vector
fv = [fs[k] for k in fks]
# If politeness score > 0.0,
# the doc is polite, class = 1
if req['score'] > 0.0:
l = 1
else:
l = 0
X.append(fv)
y.append(l)
# Single-row sparse matrix
# where np.asarray converts the input to an array.
#X = csr_matrix(np.asarray(X))
X = np.asarray(X)
# format
y = np.asarray(y)
y_ = np.zeros((len(y), 2))
for i in range(len(y)):
if y[i] == 1:
y_[i][1] = 1
else:
y_[i][0] = 1
y = y_
return X, y
def next_batch(X, y, CURR_BATCH, batch_size):
# get the sizes
(train_size, feature_size) = X.shape
end_batch = CURR_BATCH + batch_size
if train_size < CURR_BATCH:
return
elif train_size < CURR_BATCH + batch_size:
end_batch = train_size
batch_xs = X[CURR_BATCH:end_batch]
batch_ys = y[CURR_BATCH:end_batch]
CURR_BATCH = CURR_BATCH + batch_size
return batch_xs, batch_ys, CURR_BATCH
def hidden_layers(_X, _weights, _biases, params):
if params["n_layers"] == 1:
hidden = params["func1"](tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
else:
hidden_1 = params["func1"](tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
hidden = params["func2"](tf.add(tf.matmul(hidden_1, _weights['h2']), _biases['b2']))
return tf.matmul(hidden, _weights['out']) + _biases['out']
# return tf.nn.softmax(tf.matmul(hidden, _weights['out']) + _biases['out'])
def weights_and_biases(params):
if params["n_layers"] == 1:
weights = {
'h1': tf.Variable(tf.random_normal([params["n_input"], params["n_hidden_1"]])),
'out': tf.Variable(tf.random_normal([params["n_hidden_1"], params["n_classes"]]))
}
biases = {
'b1': tf.Variable(tf.random_normal([params["n_hidden_1"]])),
'out': tf.Variable(tf.random_normal([params["n_classes"]]))
}
else:
weights = {
'h1': tf.Variable(tf.random_normal([params["n_input"], params["n_hidden_1"]])),
'h2': tf.Variable(tf.random_normal([params["n_hidden_1"], params["n_hidden_2"]])),
'out': tf.Variable(tf.random_normal([params["n_hidden_2"], params["n_classes"]]))
}
biases = {
'b1': tf.Variable(tf.random_normal([params["n_hidden_1"]])),
'b2': tf.Variable(tf.random_normal([params["n_hidden_2"]])),
'out': tf.Variable(tf.random_normal([params["n_classes"]]))
}
return weights, biases
def tf_train(params):
learning_rate = params["learning_rate"]
training_epochs = params["training_epochs"]
batch_size = params["batch_size"]
display_step = params["display_step"]
X_train, y_train = params["X_train"], params["y_train"]
X_val, y_val = params["X_val"], params["y_val"]
# print "Train data size ", len(y_train)
# print "Validation data size ", len(y_val)
# get the sizes
(train_size, n_input) = X_train.shape
params["n_input"] = n_input
n_classes = params["n_classes"] = 2
n_hidden_1 = params["n_hidden_1"]
if params["n_layers"] == 2:
n_hidden_2 = params["n_hidden_2"]
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Store layers weight & bias
weights, biases = weights_and_biases(params)
# Construct model
logits = hidden_layers(x, weights, biases, params)
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))
pred = tf.nn.softmax(logits)
cost = tf.reduce_mean(tf.nn.l2_loss(pred - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# AdamOptimizer GradientDescentOptimizer
# Initializing the variables
init = tf.initialize_all_variables()
errors = []
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
mse = 0.
curr_batch = 0
# Loop over all batches
while curr_batch < train_size:
batch_xs, batch_ys, curr_batch = next_batch(X_train, y_train, curr_batch, batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
mse += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/len(batch_ys)
# print sess.run(pred, feed_dict={x: batch_xs, y: batch_ys})[:10]
errors.append(mse)
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%03d' % (epoch+1), "cost=", "{:.9f}".format(mse)
print "Optimization Finished!"
# tf.nn.softmax(pred)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
result = accuracy.eval({x: X_train, y: y_train})
print "Train data accuracy:", result
result = accuracy.eval({x: X_val, y: y_val})
print "*** Validation data accuracy: ", result
plot(errors)
return result
def get_subsets(data, count, fold_length):
if count == 0:
return data[fold_length:], data[0:fold_length]
end = count+fold_length
val_set = data[count:end]
train_set = np.concatenate((data[0:count], data[end:]))
return np.asarray(train_set), np.asarray(val_set)
def cross_validator(n_folds, params, requests):
X, y = get_features(requests)
total_accuracy = 0.
count = 0
fold_length = len(requests)/n_folds
curr_fold = 1
while count < len(requests)-fold_length+1:
print "Fold # %d" % curr_fold
if count == 0:
train_requests = requests[fold_length:]
else:
end = count+fold_length
train_requests = np.concatenate((
requests[0:count], requests[end:]))
PolitenessFeatureVectorizer.generate_bow_features(train_requests)
params["X_train"], params["X_val"] = get_subsets(X, count, fold_length)
params["y_train"], params["y_val"] = get_subsets(y, count, fold_length)
count += fold_length
curr_fold += 1
total_accuracy += tf_train(params)
# take average of all accuracies
print "****** Average Accuracy for all folds: ", total_accuracy/n_folds
temp = str(params["training_epochs"])
temp += "/" + str(params["n_hidden_1"])
if params["n_layers"] == 2:
temp += "/" + str(params["n_hidden_2"])
print temp
print "----------------------------------------"
return total_accuracy/n_folds
def grid_search():
print "Starting grid_search for TF in /tensorflow"
params = {}
lr_options = [ 0.0015, 0.01, 0.005, 0.001 ]
te_options = [ 10, 50, 80, 100, 150 ]
bs_options = [ 100 ]
n_hidden_1 = [ 562 ]
n_hidden_2 = [ 562 ]
l1_functions = [ tf.nn.relu ]
l2_functions = [ tf.nn.relu ]
# number of layers
params["n_layers"] = 1
# get requests
requests = get_data()
num_folds = 10
print "Running with: ", num_folds, lr_options, te_options
results = {}
if params["n_layers"] == 1:
print "1 layer"
for te in range(len(te_options)):
for lr in range(len(lr_options)):
for nh1 in range(len(n_hidden_1)):
params["func1"] = l1_functions[0]
params["n_hidden_1"] = n_hidden_1[nh1]
# set out all the hyperparameters
params["learning_rate"] = lr_options[lr]
params["training_epochs"] = te_options[te]
params["batch_size"] = bs_options[0]
params["display_step"] = params["training_epochs"]
temp = str(n_hidden_1[nh1])+ "/" + str(lr_options[lr])
temp += "/" +str(te_options[te])
results[temp] = cross_validator(num_folds, params, requests)
else:
print "2 layers"
for te in range(len(te_options)):
for nh2 in range(len(n_hidden_2)):
for nh1 in range(len(n_hidden_1)):
for lr in range(len(lr_options)):
if n_hidden_2[nh2] > n_hidden_1[nh1]:
continue
params["func1"] = l1_functions[0]
params["func2"] = l2_functions[0]
params["n_hidden_1"] = n_hidden_1[nh1]
params["n_hidden_2"] = n_hidden_2[nh2]
# set out all the hyperparameters
params["learning_rate"] = lr_options[lr]
params["training_epochs"] = te_options[te]
params["batch_size"] = bs_options[0]
params["display_step"] = params["training_epochs"]
temp = str(n_hidden_1[nh1])+ "/" + str(n_hidden_2[nh2])
temp += "/" +str(te_options[te])
temp += "/"+str(lr_options[lr])
results[temp] = cross_validator(num_folds, params, requests)
print results
import operator
best = max(results.iteritems(), key=operator.itemgetter(1))[0]
print "Best Result %s with the score = %f" % (best, results[best])
def plot(errors):
import matplotlib.pyplot as plt # for plotting
plt.plot(errors)
plt.xlabel('#epochs')
plt.ylabel('MSE') | import pylab
pylab.show()
##############################################################################
if __name__ == "__main__":
"""
train the politeness model, using tensorflow
"""
grid_search() | random_line_split | |
core1500.js | export default [{
kan: '出口',
tran: 'でぐち',
english: 'exit',
sentences: '出口はあそこです。でぐち は あそこ です。The exit\'s over there.',
id: 1
},
{
kan: '登る',
tran: 'のぼる',
english: 'climb, ascend',
sentences: '私たちは昨年、富士山に登りました。わたし たち は さくねん、ふじさん に のぼりました。We climbed Mount Fuji last year.',
id: 2
},
{
kan: '真っ白',
tran: 'まっしろ',
english: 'pure-white',
sentences: '外は雪で真っ白だった。そと は ゆき で まっしろ だった。Everything outside was white with snow.',
id: 3
},
{
kan: '下ろす',
tran: 'おろす',
english: 'bring down, take down',
sentences: '棚からその箱を下ろしてください。たな から その はこ を おろして ください。Please take the box down from the shelf.',
id: 4
},
{
kan: '貸し出す',
tran: 'かしだす',
english: 'lend, lend out',
sentences: 'その図書館は海外の雑誌も貸し出している。その としょかん は かいがい の ざっし も かしだして いる。That library lends out foreign magazines, too.',
id: 5
},
{
kan: 'サッカー',
tran: 'null',
english: 'soccer, football',
sentences: '彼はサッカーの選手です。かれ は サッカー の せんしゅ です。He\'s a soccer player.',
id: 6
},
{
kan: '暖房',
tran: 'だんぼう',
english: 'heating',
sentences: '冬は暖房が必要です。ふゆ は だんぼう が ひつよう です。Heating is necessary in winter.',
id: 7
},
{
kan: '手袋',
tran: 'てぶくろ',
english: 'gloves',
sentences: '寒いので手袋をしました。さむい ので てぶくろ を しました。I wore gloves because it was cold.',
id: 8
},
{
kan: '留守',
tran: 'るす',
english: 'not at home',
sentences: '父は今、留守です。ちち は いま、るす です。My father isn\'t at home now.',
id: 9
},
{
kan: '嘘',
tran: 'うそ',
english: 'lie',
sentences: '嘘をついてはいけません。うそ を ついて は いけません。Do not lie.',
id: 10
},
{
kan: '遠慮',
tran: 'えんりょ',
english: 'restraint, hold back',
sentences: '私は遠慮します。わたし は えんりょ します。No thanks, I\'ll pass.',
id: 11
},
{
kan: '折る',
tran: 'おる',
english: 'break (something), fold (something)',
sentences: '祖父は足の骨を折りました。そふ は あし の ほね を おりました。My grandfather broke a bone in his foot.',
id: 12
},
{
kan: 'スープ',
tran: 'null',
english: 'soup',
sentences: '母がコーンスープを作っている。はは が コーン スープ を つくって いる。My mother is making some corn chowder.',
id: 13
},
{
kan: 'バナナ',
tran: 'null',
english: 'banana',
sentences: '私は毎朝バナナを食べます。わたし は まいあさ バナナ を たべます。I eat a banana every morning.',
id: 14
},
{
kan: '可哀相',
tran: 'かわいそう',
english: 'poor, pitiful',
sentences: 'その可哀相な子供たちは食べるものがない。その かわいそうな こども たち は たべる もの が ない。Those poor children have nothing to eat.',
id: 15
},
{
kan: 'パトカー',
tran: 'null',
english: 'police car',
sentences: 'あそこにパトカーがいる。あそこ に パトカー が いる。There\'s a police car over there.',
id: 16
},
{
kan: 'ハンバーガー',
tran: 'null',
english: 'hamburger',
sentences: '今日の昼ご飯はハンバーガーでした。きょう の ひる ごはん は ハンバーガー でした。I had a hamburger for lunch today.',
id: 17
},
{
kan: '毛布',
tran: 'もうふ',
english: 'blanket',
sentences: 'この毛布は暖かい。この もうふ は あたたかい。This blanket is warm.',
id: 18
},
{
kan: 'エスカレーター',
tran: 'null',
english: 'escalator',
sentences: '3階までエスカレーターで行きましょう。3 かい まで エスカレーター で いきましょう。Let\'s take the escalator up to the third floor.',
id: 19
},
{
kan: 'お嬢さん',
tran: 'おじょうさん',
english: 'young lady, daughter',
sentences: 'お嬢さんはおいくつですか。おじょうさん は おいくつ です か。How old is your daughter?',
id: 20
},
{
kan: '手帳',
tran: 'てちょう',
english: 'pocket notebook',
sentences: '新しい手帳を買いました。あたらしい てちょう を かいました。I bought a new pocket notebook.',
id: 21
},
{
kan: 'タオル',
tran: 'null',
english: 'towel',
sentences: '私はタオルで顔をふいた。わたし は タオル で かお を ふいた。I wiped my face with a towel.',
id: 22
},
{
kan: '売店',
tran: 'ばいてん',
english: 'booth, shop',
sentences: '駅の売店で雑誌を買った。えき の ばいてん で ざっし を かった。I bought a magazine at a shop in the station.',
id: 23
},
{
kan: 'パチンコ',
tran: 'null',
english: 'pachinko (Japanese pinball)',
sentences: '彼は毎日パチンコをしています。かれ は まいにち パチンコ を して います。He plays pachinko every day.',
id: 24
},
{
kan: '謝る',
tran: 'あやまる',
english: 'apologize',
sentences: '彼は直ぐに謝りました。かれ は すぐ に あやまりました。He apologized at once.',
id: 25
},
{
kan: 'ケーキ',
tran: 'null',
english: 'cake',
sentences: '誕生日にケーキを食べました。たんじょうび に ケーキ を たべました。We ate cake on his birthday.',
id: 26
},
{
kan: '天気予報',
tran: 'てんきよほう',
english: 'weather forecast',
sentences: '明日の天気予報は雨です。あした の てんき よほう は あめ です。Tomorrow\'s weather forecast is for rain.',
id: 27
},
{
kan: '変',
tran: 'へん',
english: 'strange, weird',
sentences: '変な音が聞こえます。へんな おと が きこえます。I hear a strange sound.',
id: 28
},
{
kan: '一生懸命',
tran: 'いっしょうけんめい',
english: 'hard-working, doing one\'s best',
sentences: '彼は毎日一生懸命働いている。かれ は まいにち いっしょうけんめい はたらいて いる。He works hard every day.',
id: 29
},
{
kan: '間違う',
tran: 'まちがう',
english: 'make a mistake, be wrong',
sentences: 'あなたは間違っている。あなた は まちがって いる。You\'re wrong.',
id: 30
},
{
kan: '事務室',
tran: 'じむしつ',
english: 'clerk\'s office, office room',
sentences: '事務室でコピーを取ってきます。じむしつ で コピー を とって きます。I\'m going to make copies in the office.',
id: 31
},
{
kan: 'スーツ',
tran: 'null',
english: 'suit',
sentences: 'あのスーツはそんなに高くない。あの スーツ は そんな に たかくない。The suit is not that pricey.',
id: 32
},
{
kan: 'チケット',
tran: 'null',
english: 'ticket (loan word)',
sentences: 'この遊園地のチケットは3000円です。この ゆうえんち の チケット は 3000 えん です。A ticket for this amusement park is 3000 yen.',
id: 33
},
{
kan: 'チョコレート',
tran: 'null',
english: 'chocolate',
sentences: '妹はチョコレートが大好きです。いもうと は チョコレート が だいすき です。My little sister loves chocolate.',
id: 34
},
{
kan: '脱ぐ',
tran: 'ぬぐ',
english: 'take off (clothes)',
sentences: '靴を脱いでください。くつ を ぬいで ください。Please take off your shoes.',
id: 35
},
{
kan: '年賀状',
tran: 'ねんがじょう',
english: 'New Year\'s card',
sentences: '昨日、年賀状を出しました。きのう、ねんがじょう を だしました。I sent my New Year\'s cards yesterday.',
id: 36
},
{
kan: '乗り物',
tran: 'のりもの',
english: 'vehicle, transportation',
sentences: '自転車は便利な乗り物です。じてんしゃ は べんりな のりもの です。Bicycles are a convenient form of transport.',
id: 37
},
{
kan: '布団',
tran: 'ふとん',
english: 'Japanese-style padded mattress',
sentences: '母が布団を干している。はは が ふとん を ほして いる。My mother is airing the futons.',
id: 38
},
{
kan: '役に立つ',
tran: 'やくにたつ',
english: 'be useful, be helpful',
sentences: '私は人々の役に立ちたいと思っています。わたし は ひとびと の やくにたちたい と おもって います。I want to be of service to others.',
id: 39
},
{
kan: '破れる',
tran: 'やぶれる',
english: 'be torn, be ripped up',
sentences: 'シャツが破れている。シャツ が やぶれて いる。My shirt is ripped up.',
id: 40
},
{
kan: '用事',
tran: 'ようじ',
english: 'errand, business',
sentences: '父は用事で出掛けています。ちち は ようじ で でかけて います。My father is out running errands.',
id: 41
},
{
kan: '写す',
tran: 'うつす',
english: 'copy, photograph',
sentences: '彼は友達の答えを写した。かれ は ともだち の こたえ を うつした。He copied his friend\'s answers.',
id: 42
},
{
kan: '腕時計',
tran: 'うでどけい',
english: 'wrist watch',
sentences: '私の腕時計は遅れている。わたし の うでどけい は おくれて いる。My watch is running slow.',
id: 43
},
{
kan: '贈り物',
tran: 'おくりもの',
english: 'present, gift',
sentences: 'すてきな贈り物をどうもありがとう。すてきな おくりもの を どうも ありがとう。Thank you for the wonderful gift.',
id: 44
},
{
kan: 'チャンネル',
tran: 'null',
english: 'channel',
sentences: 'テレビのチャンネルを変えてください。テレビ の チャンネル を かえて ください。Please change the TV channel.',
id: 45
},
{
kan: '冷やす',
tran: 'ひやす',
english: 'chill (something), cool down',
sentences: '頭を冷やしなさい。あたま を ひやしなさい。Cool down.',
id: 46
},
{
kan: '片付ける',
tran: 'かたづける',
english: 'put in order, clean up',
sentences: '早く部屋を片付けなさい。はやく へや を かたづけなさい。Hurry and clean up your room.',
id: 47
},
{
kan: '乾杯',
tran: 'かんぱい',
english: 'toast, drink (in celebration)',
sentences: 'お二人の未来に乾杯しましょう。おふたり の みらい に かんぱい しましょう。Let\'s give a toast to the couple\'s future.',
id: 48
},
{
kan: '停車',
tran: 'ていしゃ',
english: 'stop (of a train)',
sentences: 'この電車は東京まで停車いたしません。この でんしゃ は とうきょう まで ていしゃ いたしません。This train won\'t stop until it reaches Tokyo.',
id: 49
},
{
kan: '鳴く',
tran: 'なく',
english: 'sing, cry (of animals)',
sentences: 'どこかで猫が鳴いている。どこ か で ねこ が ないて いる。A cat is meowing somewhere.',
id: 50
},
{
kan: 'ガソリンスタンド',
tran: 'null',
english: 'gas station',
sentences: 'この近くにガソリンスタンドはありますか。この ちかく に ガソリン スタンド は あります か。Is there a gas station nearby?',
id: 51
},
{
kan: '片道',
tran: 'かたみち',
english: 'one way (trip)',
sentences: '東京まで片道切符を買った。とうきょう まで かたみち きっぷ を かった。I bought a one-way ticket to Tokyo.',
id: 52
},
{
kan: '交番',
tran: 'こうばん',
english: 'police box',
sentences: 'あそこの交番で道を聞きましょう。あそこ の こうばん で みち を ききましょう。Let\'s ask for directions at that police box.',
id: 53
},
{
kan: '大分',
tran: 'だいぶ',
english: 'greatly, fairly, highly',
sentences: '大分ピアノが上手くなりました。だいぶ ピアノ が うまく なりました。You\'ve become fairly good at the piano.',
id: 54
},
{
kan: 'デート',
tran: 'null',
english: 'date, romantic meeting',
sentences: 'デートで遊園地に行きました。デート で ゆうえんち に いきました。I went to an amusement park on a date.',
id: 55
},
{
kan: '梅雨',
tran: 'つゆ',
english: 'rainy season',
sentences: '梅雨は6月頃です。つゆ は 6 がつ ごろ です。The rainy season comes around June.', | tran: 'まっすぐ',
english: 'straight',
sentences: 'この道を真っ直ぐ行ってください。この みち を まっすぐ いって ください。Please go straight along this road.',
id: 57
},
{
kan: 'レモン',
tran: 'null',
english: 'lemon',
sentences: '紅茶にレモンを入れて飲んだ。こうちゃ に レモン を いれて のんだ。I put lemon in my tea and drank it.',
id: 58
},
{
kan: '上着',
tran: 'うわぎ',
english: 'outerwear, coat, jacket',
sentences: '暑いので上着を脱ぎました。あつい ので うわぎ を ぬぎました。I took my jacket off because it was hot.',
id: 59
},
{
kan: '偉い',
tran: 'えらい',
english: 'great, eminent',
sentences: '彼は偉い学者です。かれ は えらい がくしゃ です。He\'s a great scholar.',
id: 60
},
{
kan: '書き方',
tran: 'かきかた',
english: 'how to write, way of writing',
sentences: '彼はその漢字の書き方が分からない。かれ は その かんじ の かき かた が わからない。He doesn\'t know how to write that Chinese character.',
id: 61
},
{
kan: '炊く',
tran: 'たく',
english: 'cook (rice)',
sentences: '母は毎朝ご飯を炊く。はは は まいあさ ごはん を たく。My mother cooks rice every morning.',
id: 62
},
{
kan: 'チーズ',
tran: 'null',
english: 'cheese',
sentences: 'チーズを一切れ食べました。チーズ を ひと きれ たべました。I ate a slice of cheese.',
id: 63
},
{
kan: 'ドライブ',
tran: 'null',
english: 'drive (loan word)',
sentences: '今日は群馬までドライブしました。きょう は ぐんま まで ドライブ しました。I drove to Gunma today.',
id: 64
},
{
kan: '踏切',
tran: 'ふみきり',
english: 'railroad crossing',
sentences: 'その踏切は長い。その ふみきり は ながい。You have to expect a long wait at this railroad crossing.',
id: 65
},
{
kan: 'ラケット',
tran: 'null',
english: 'racket',
sentences: 'テニスのラケットを買いました。テニス の ラケット を かいました。I bought a tennis racket.',
id: 66
},
{
kan: '受付',
tran: 'うけつけ',
english: 'receptionist, information office',
sentences: '受付は9時からです。うけつけ は 9 じ から です。Registration starts from nine o\'clock.',
id: 67
},
{
kan: '怒る',
tran: 'おこる',
english: 'get angry, scold',
sentences: '彼女が嘘をついたので、彼は怒った。かのじょ が うそ を ついた ので、かれ は おこった。He was angry because she lied tohim.',
id: 68
},
{
kan: '化粧',
tran: 'けしょう',
english: 'makeup',
sentences: '彼女は化粧が上手い。かのじょ は けしょう が うまい。She\'s good at makeup.',
id: 69
},
{
kan: '都合',
tran: 'つごう',
english: 'convenience, availability',
sentences: '今日は都合が悪くて行けません。きょう は つごう が わるくて いけません。I\'m not available today.',
id: 70
},
{
kan: 'ひげ',
tran: 'null',
english: 'beard, moustache',
sentences: '父はひげをはやしています。ちち は ひげ を はやして います。My father\'s growing a beard.',
id: 71
},
{
kan: '本棚',
tran: 'ほんだな',
english: 'bookshelf, bookcase',
sentences: 'これはとても大きな本棚ですね。これ は とても おおきな ほんだな です ね。This is a very big bookshelf.',
id: 72
},
{
kan: '真っ暗',
tran: 'まっくら',
english: 'pitch-dark',
sentences: '外は真っ暗です。そと は まっくら です。It\'s pitch-dark outside.',
id: 73
},
{
kan: '沸く',
tran: 'わく',
english: 'boil, get loud (crowd applause etc.)',
sentences: 'お風呂が沸きました。おふろ が わきました。The bath is ready.',
id: 74
},
{
kan: '売り切れる',
tran: 'うりきれる',
english: 'sell out, go out of stock',
sentences: 'その本は直ぐ売り切れた。その ほん は すぐ うりきれた。The books sold out quickly.',
id: 75
},
{
kan: '押し入れ',
tran: 'おしいれ',
english: 'sliding-door closet',
sentences: '布団を押し入れにしまいました。ふとん を おしいれ に しまいました。I put my futon in the closet.',
id: 76
},
{
kan: '革',
tran: 'かわ',
english: 'leather',
sentences: '革のベルトを買いました。かわ の ベルト を かいました。I bought a leather belt.',
id: 77
},
{
kan: 'ぐっすり',
tran: 'null',
english: 'soundly',
sentences: '赤ちゃんがぐっすり寝ている。あかちゃん が ぐっすり ねて いる。The baby\'s sleeping soundly.',
id: 78
},
{
kan: '紅茶',
tran: 'こうちゃ',
english: 'black tea',
sentences: '温かい紅茶が飲みたい。あたたかい こうちゃ が のみたい。I want to drink hot tea.',
id: 79
},
{
kan: '邪魔',
tran: 'じゃま',
english: 'disturbance, blocking',
sentences: '邪魔です、どいてください。じゃま です、どいて ください。You\'re blocking my way, so please move.',
id: 80
},
{
kan: 'ソース',
tran: 'null',
english: 'sauce',
sentences: 'ソースはどれですか。ソース は どれ です か。Which is the sauce?',
id: 81
},
{
kan: '足す',
tran: 'たす',
english: 'add (things of the same kind)',
sentences: '母は味噌汁に水を足した。はは は みそしる に みず を たした。My mother added water to the miso soup.',
id: 82
},
{
kan: 'トマト',
tran: 'null',
english: 'tomato',
sentences: '私はトマトが大好きです。わたし は トマト が だいすき です。I love tomatoes.',
id: 83
},
{
kan: 'バター',
tran: 'null',
english: 'butter',
sentences: 'パンにバターをぬって食べました。パン に バター を ぬって たべました。I spread some butter on the bread and ate it.',
id: 84
},
{
kan: '発車',
tran: 'はっしゃ',
english: 'departure (of a vehicle)',
sentences: 'バスが発車します。バス が はっしゃ します。The bus is leaving.',
id: 85
},
{
kan: 'バレーボール',
tran: 'null',
english: 'volleyball',
sentences: '妹はバレーボールが得意です。いもうと は バレーボール が とくい です。My little sister is good at volleyball.',
id: 86
},
{
kan: 'おかず',
tran: 'null',
english: 'dishes to go with the rice',
sentences: '晩ご飯のおかずは何?ばん ごはん の おかず は なに?What are we having for dinner?',
id: 87
},
{
kan: 'カレンダー',
tran: 'null',
english: 'calendar',
sentences: 'カレンダーに予定を書いた。カレンダー に よてい を かいた。I wrote my schedule on the calendar.',
id: 88
},
{
kan: 'とにかく',
tran: 'null',
english: 'in any case, anyway',
sentences: 'とにかく現場へ行ってみましょう。とにかく げんば へ いって みましょう。Anyway, let\'s go to the site.',
id: 89
},
{
kan: '楽しみ',
tran: 'たのしみ',
english: 'enjoyment, something looked forward to',
sentences: '旅行は父の老後の楽しみです。りょこう は ちち の ろうご の たのしみ です。My father looks forward to traveling in his old age.',
id: 90
},
{
kan: 'おしゃべり',
tran: 'null',
english: 'chatter, idle talk',
sentences: '彼女たちはおしゃべりに夢中です。かのじょ たち は おしゃべり に むちゅう です 。Those women are absorbed in conversation.',
id: 91
},
{
kan: '読書',
tran: 'どくしょ',
english: 'reading books',
sentences: '私の趣味は読書です。わたし の しゅみ は どくしょ です。My hobby is reading.',
id: 92
},
{
kan: '寒気',
tran: 'さむけ',
english: 'chill',
sentences: '何だか寒気がします。なんだか さむけ が します。I feel chilly somehow.',
id: 93
},
{
kan: '夕焼け',
tran: 'ゆうやけ',
english: 'sunset glow',
sentences: '今日は夕焼けがきれいです。きょう は ゆうやけ が きれい です。The sunset glow is beautiful today.',
id: 94
},
{
kan: '痛める',
tran: 'いためる',
english: 'damage, hurt',
sentences: '彼は柔道で腰を痛めたんだ。かれ は じゅうどう で こし を いためた ん だ。He hurt his lower back doing judo.',
id: 95
},
{
kan: 'くすぐったい',
tran: 'null',
english: 'ticklish',
sentences: '犬に顔をなめられてくすぐったいよ。いぬ に かお を なめられて くすぐったい よ。It tickles to have my face licked by a dog.',
id: 96
},
{
kan: '泥棒',
tran: 'どろぼう',
english: 'thief, burglar',
sentences: '近所に泥棒が入った。きんじょ に どろぼう が はいった。There was a burglary in the neighborhood.',
id: 97
},
{
kan: '襖',
tran: 'ふすま',
english: 'paper sliding door',
sentences: '襖を閉めてください。ふすま を しめて ください。Please close the paper sliding door.',
id: 98
},
{
kan: '美人',
tran: 'びじん',
english: 'beautiful woman',
sentences: '彼のお母さんは美人です。かれ の おかあさん は びじん です。His mother\'s a beautiful woman.',
id: 99
},
{
kan: 'タイプライター',
tran: 'null',
english: 'typewriter',
sentences: '母は古いタイプライターを持っています。はは は ふるい タイプライター を もって います。My mother has an old typewriter.',
id: 100
}] | id: 56
},
{
kan: '真っ直ぐ', | random_line_split |
ha_tracker.go | package distributor
import (
"context"
"errors"
"flag"
"fmt"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"github.com/weaveworks/common/httpgrpc"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/weaveworks/common/mtime"
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
)
var (
electedReplicaChanges = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_changes_total",
Help: "The total number of times the elected replica has changed for a user ID/cluster.",
}, []string{"user", "cluster"})
electedReplicaTimestamp = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_timestamp_seconds",
Help: "The timestamp stored for the currently elected replica, from the KVStore.",
}, []string{"user", "cluster"})
electedReplicaPropagationTime = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_change_propagation_time_seconds",
Help: "The time it for the distributor to update the replica change.",
Buckets: prometheus.DefBuckets,
})
kvCASCalls = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "cortex",
Name: "ha_tracker_kv_store_cas_total",
Help: "The total number of CAS calls to the KV store for a user ID/cluster.",
}, []string{"user", "cluster"})
errNegativeUpdateTimeoutJitterMax = errors.New("HA tracker max update timeout jitter shouldn't be negative")
errInvalidFailoverTimeout = "HA Tracker failover timeout (%v) must be at least 1s greater than update timeout - max jitter (%v)"
)
// ProtoReplicaDescFactory makes new InstanceDescs
func ProtoReplicaDescFactory() proto.Message {
return NewReplicaDesc()
}
// NewReplicaDesc returns an empty *distributor.ReplicaDesc.
func NewReplicaDesc() *ReplicaDesc {
return &ReplicaDesc{}
}
// Track the replica we're accepting samples from
// for each HA cluster we know about.
type haTracker struct {
logger log.Logger
cfg HATrackerConfig
client kv.Client
updateTimeoutJitter time.Duration
// Replicas we are accepting samples from.
electedLock sync.RWMutex
elected map[string]ReplicaDesc
done chan struct{}
cancel context.CancelFunc
}
// HATrackerConfig contains the configuration require to
// create a HA Tracker.
type HATrackerConfig struct {
EnableHATracker bool `yaml:"enable_ha_tracker,omitempty"`
// We should only update the timestamp if the difference
// between the stored timestamp and the time we received a sample at
// is more than this duration.
UpdateTimeout time.Duration `yaml:"ha_tracker_update_timeout"`
UpdateTimeoutJitterMax time.Duration `yaml:"ha_tracker_update_timeout_jitter_max"`
// We should only failover to accepting samples from a replica
// other than the replica written in the KVStore if the difference
// between the stored timestamp and the time we received a sample is
// more than this duration
FailoverTimeout time.Duration `yaml:"ha_tracker_failover_timeout"`
KVStore kv.Config
}
// RegisterFlags adds the flags required to config this to the given FlagSet.
func (cfg *HATrackerConfig) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.EnableHATracker,
"distributor.ha-tracker.enable",
false,
"Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels).")
f.DurationVar(&cfg.UpdateTimeout,
"distributor.ha-tracker.update-timeout",
15*time.Second,
"Update the timestamp in the KV store for a given cluster/replica only after this amount of time has passed since the current stored timestamp.")
f.DurationVar(&cfg.UpdateTimeoutJitterMax,
"distributor.ha-tracker.update-timeout-jitter-max",
5*time.Second,
"To spread the HA deduping heartbeats out over time.")
f.DurationVar(&cfg.FailoverTimeout,
"distributor.ha-tracker.failover-timeout",
30*time.Second,
"If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout")
// We want the ability to use different Consul instances for the ring and for HA cluster tracking.
cfg.KVStore.RegisterFlagsWithPrefix("distributor.ha-tracker.", f)
}
// Validate config and returns error on failure
func (cfg *HATrackerConfig) Validate() error {
if cfg.UpdateTimeoutJitterMax < 0 {
return errNegativeUpdateTimeoutJitterMax
}
minFailureTimeout := cfg.UpdateTimeout + cfg.UpdateTimeoutJitterMax + time.Second
if cfg.FailoverTimeout < minFailureTimeout {
return fmt.Errorf(errInvalidFailoverTimeout, cfg.FailoverTimeout, minFailureTimeout)
}
return nil
}
// NewClusterTracker returns a new HA cluster tracker using either Consul
// or in-memory KV store.
func newClusterTracker(cfg HATrackerConfig) (*haTracker, error) {
codec := codec.Proto{Factory: ProtoReplicaDescFactory}
var jitter time.Duration
if cfg.UpdateTimeoutJitterMax > 0 {
jitter = time.Duration(rand.Int63n(int64(2*cfg.UpdateTimeoutJitterMax))) - cfg.UpdateTimeoutJitterMax
}
ctx, cancel := context.WithCancel(context.Background())
t := haTracker{
logger: util.Logger,
cfg: cfg,
updateTimeoutJitter: jitter,
done: make(chan struct{}),
elected: map[string]ReplicaDesc{},
cancel: cancel,
}
if cfg.EnableHATracker {
client, err := kv.NewClient(cfg.KVStore, codec)
if err != nil {
return nil, err
}
t.client = client
go t.loop(ctx)
}
return &t, nil
}
// Follows pattern used by ring for WatchKey.
func (c *haTracker) loop(ctx context.Context) {
defer close(c.done)
// The KVStore config we gave when creating c should have contained a prefix,
// which would have given us a prefixed KVStore client. So, we can pass empty string here.
c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool {
replica := value.(*ReplicaDesc)
c.electedLock.Lock()
defer c.electedLock.Unlock()
chunks := strings.SplitN(key, "/", 2)
// The prefix has already been stripped, so a valid key would look like cluster/replica,
// and a key without a / such as `ring` would be invalid.
if len(chunks) != 2 {
return true
}
if replica.Replica != c.elected[key].Replica {
electedReplicaChanges.WithLabelValues(chunks[0], chunks[1]).Inc()
}
c.elected[key] = *replica
electedReplicaTimestamp.WithLabelValues(chunks[0], chunks[1]).Set(float64(replica.ReceivedAt / 1000))
electedReplicaPropagationTime.Observe(time.Since(timestamp.Time(replica.ReceivedAt)).Seconds())
return true
})
}
// Stop ends calls the trackers cancel function, which will end the loop for WatchPrefix.
func (c *haTracker) stop() {
if c.cfg.EnableHATracker {
c.cancel()
<-c.done
}
}
// CheckReplica checks the cluster and replica against the backing KVStore and local cache in the
// tracker c to see if we should accept the incomming sample. It will return an error if the sample
// should not be accepted. Note that internally this function does checks against the stored values
// and may modify the stored data, for example to failover between replicas after a certain period of time.
// A 202 response code is returned (from checkKVstore) if we shouldn't store this sample but are
// accepting samples from another replica for the cluster, so that there isn't a bunch of error's returned
// to customers clients.
func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica string) error {
// If HA tracking isn't enabled then accept the sample
if !c.cfg.EnableHATracker {
return nil
}
key := fmt.Sprintf("%s/%s", userID, cluster)
now := mtime.Now()
c.electedLock.RLock()
entry, ok := c.elected[key]
c.electedLock.RUnlock()
if ok && now.Sub(timestamp.Time(entry.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter {
if entry.Replica != replica {
return replicasNotMatchError(replica, entry.Replica)
}
return nil
}
err := c.checkKVStore(ctx, key, replica, now)
kvCASCalls.WithLabelValues(userID, cluster).Inc()
if err != nil {
// The callback within checkKVStore will return a 202 if the sample is being deduped,
// otherwise there may have been an actual error CAS'ing that we should log.
if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() != 202 {
level.Error(util.Logger).Log("msg", "rejecting sample", "error", err)
}
}
return err
}
func (c *haTracker) checkKVStore(ctx context.Context, key, replica string, now time.Time) error {
return c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) {
if desc, ok := in.(*ReplicaDesc); ok {
// We don't need to CAS and update the timestamp in the KV store if the timestamp we've received
// this sample at is less than updateTimeout amount of time since the timestamp in the KV store.
if desc.Replica == replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter {
return nil, false, nil
}
// We shouldn't failover to accepting a new replica if the timestamp we've received this sample at
// is less than failOver timeout amount of time since the timestamp in the KV store.
if desc.Replica != replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.FailoverTimeout {
// Return a 202.
return nil, false, replicasNotMatchError(replica, desc.Replica)
} | // from this replica. Invalid could mean that the timestamp in the KV store was
// out of date based on the update and failover timeouts when compared to now.
return &ReplicaDesc{
Replica: replica, ReceivedAt: timestamp.FromTime(now),
}, true, nil
})
}
func replicasNotMatchError(replica, elected string) error {
return httpgrpc.Errorf(http.StatusAccepted, "replicas did not mach, rejecting sample: replica=%s, elected=%s", replica, elected)
}
// Modifies the labels parameter in place, removing labels that match
// the replica or cluster label and returning their values. Returns an error
// if we find one but not both of the labels.
func findHALabels(replicaLabel, clusterLabel string, labels []client.LabelAdapter) (string, string) {
var cluster, replica string
var pair client.LabelAdapter
for _, pair = range labels {
if pair.Name == replicaLabel {
replica = string(pair.Value)
}
if pair.Name == clusterLabel {
cluster = string(pair.Value)
}
}
return cluster, replica
} | }
// There was either invalid or no data for the key, so we now accept samples | random_line_split |
ha_tracker.go | package distributor
import (
"context"
"errors"
"flag"
"fmt"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"github.com/weaveworks/common/httpgrpc"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/weaveworks/common/mtime"
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
)
var (
electedReplicaChanges = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_changes_total",
Help: "The total number of times the elected replica has changed for a user ID/cluster.",
}, []string{"user", "cluster"})
electedReplicaTimestamp = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_timestamp_seconds",
Help: "The timestamp stored for the currently elected replica, from the KVStore.",
}, []string{"user", "cluster"})
electedReplicaPropagationTime = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_change_propagation_time_seconds",
Help: "The time it for the distributor to update the replica change.",
Buckets: prometheus.DefBuckets,
})
kvCASCalls = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "cortex",
Name: "ha_tracker_kv_store_cas_total",
Help: "The total number of CAS calls to the KV store for a user ID/cluster.",
}, []string{"user", "cluster"})
errNegativeUpdateTimeoutJitterMax = errors.New("HA tracker max update timeout jitter shouldn't be negative")
errInvalidFailoverTimeout = "HA Tracker failover timeout (%v) must be at least 1s greater than update timeout - max jitter (%v)"
)
// ProtoReplicaDescFactory makes new InstanceDescs
func ProtoReplicaDescFactory() proto.Message {
return NewReplicaDesc()
}
// NewReplicaDesc returns an empty *distributor.ReplicaDesc.
func NewReplicaDesc() *ReplicaDesc {
return &ReplicaDesc{}
}
// Track the replica we're accepting samples from
// for each HA cluster we know about.
type haTracker struct {
logger log.Logger
cfg HATrackerConfig
client kv.Client
updateTimeoutJitter time.Duration
// Replicas we are accepting samples from.
electedLock sync.RWMutex
elected map[string]ReplicaDesc
done chan struct{}
cancel context.CancelFunc
}
// HATrackerConfig contains the configuration require to
// create a HA Tracker.
type HATrackerConfig struct {
EnableHATracker bool `yaml:"enable_ha_tracker,omitempty"`
// We should only update the timestamp if the difference
// between the stored timestamp and the time we received a sample at
// is more than this duration.
UpdateTimeout time.Duration `yaml:"ha_tracker_update_timeout"`
UpdateTimeoutJitterMax time.Duration `yaml:"ha_tracker_update_timeout_jitter_max"`
// We should only failover to accepting samples from a replica
// other than the replica written in the KVStore if the difference
// between the stored timestamp and the time we received a sample is
// more than this duration
FailoverTimeout time.Duration `yaml:"ha_tracker_failover_timeout"`
KVStore kv.Config
}
// RegisterFlags adds the flags required to config this to the given FlagSet.
func (cfg *HATrackerConfig) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.EnableHATracker,
"distributor.ha-tracker.enable",
false,
"Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels).")
f.DurationVar(&cfg.UpdateTimeout,
"distributor.ha-tracker.update-timeout",
15*time.Second,
"Update the timestamp in the KV store for a given cluster/replica only after this amount of time has passed since the current stored timestamp.")
f.DurationVar(&cfg.UpdateTimeoutJitterMax,
"distributor.ha-tracker.update-timeout-jitter-max",
5*time.Second,
"To spread the HA deduping heartbeats out over time.")
f.DurationVar(&cfg.FailoverTimeout,
"distributor.ha-tracker.failover-timeout",
30*time.Second,
"If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout")
// We want the ability to use different Consul instances for the ring and for HA cluster tracking.
cfg.KVStore.RegisterFlagsWithPrefix("distributor.ha-tracker.", f)
}
// Validate config and returns error on failure
func (cfg *HATrackerConfig) Validate() error {
if cfg.UpdateTimeoutJitterMax < 0 {
return errNegativeUpdateTimeoutJitterMax
}
minFailureTimeout := cfg.UpdateTimeout + cfg.UpdateTimeoutJitterMax + time.Second
if cfg.FailoverTimeout < minFailureTimeout {
return fmt.Errorf(errInvalidFailoverTimeout, cfg.FailoverTimeout, minFailureTimeout)
}
return nil
}
// NewClusterTracker returns a new HA cluster tracker using either Consul
// or in-memory KV store.
func newClusterTracker(cfg HATrackerConfig) (*haTracker, error) {
codec := codec.Proto{Factory: ProtoReplicaDescFactory}
var jitter time.Duration
if cfg.UpdateTimeoutJitterMax > 0 {
jitter = time.Duration(rand.Int63n(int64(2*cfg.UpdateTimeoutJitterMax))) - cfg.UpdateTimeoutJitterMax
}
ctx, cancel := context.WithCancel(context.Background())
t := haTracker{
logger: util.Logger,
cfg: cfg,
updateTimeoutJitter: jitter,
done: make(chan struct{}),
elected: map[string]ReplicaDesc{},
cancel: cancel,
}
if cfg.EnableHATracker {
client, err := kv.NewClient(cfg.KVStore, codec)
if err != nil {
return nil, err
}
t.client = client
go t.loop(ctx)
}
return &t, nil
}
// Follows pattern used by ring for WatchKey.
func (c *haTracker) loop(ctx context.Context) {
defer close(c.done)
// The KVStore config we gave when creating c should have contained a prefix,
// which would have given us a prefixed KVStore client. So, we can pass empty string here.
c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool {
replica := value.(*ReplicaDesc)
c.electedLock.Lock()
defer c.electedLock.Unlock()
chunks := strings.SplitN(key, "/", 2)
// The prefix has already been stripped, so a valid key would look like cluster/replica,
// and a key without a / such as `ring` would be invalid.
if len(chunks) != 2 {
return true
}
if replica.Replica != c.elected[key].Replica {
electedReplicaChanges.WithLabelValues(chunks[0], chunks[1]).Inc()
}
c.elected[key] = *replica
electedReplicaTimestamp.WithLabelValues(chunks[0], chunks[1]).Set(float64(replica.ReceivedAt / 1000))
electedReplicaPropagationTime.Observe(time.Since(timestamp.Time(replica.ReceivedAt)).Seconds())
return true
})
}
// Stop ends calls the trackers cancel function, which will end the loop for WatchPrefix.
func (c *haTracker) stop() {
if c.cfg.EnableHATracker {
c.cancel()
<-c.done
}
}
// CheckReplica checks the cluster and replica against the backing KVStore and local cache in the
// tracker c to see if we should accept the incomming sample. It will return an error if the sample
// should not be accepted. Note that internally this function does checks against the stored values
// and may modify the stored data, for example to failover between replicas after a certain period of time.
// A 202 response code is returned (from checkKVstore) if we shouldn't store this sample but are
// accepting samples from another replica for the cluster, so that there isn't a bunch of error's returned
// to customers clients.
func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica string) error {
// If HA tracking isn't enabled then accept the sample
if !c.cfg.EnableHATracker {
return nil
}
key := fmt.Sprintf("%s/%s", userID, cluster)
now := mtime.Now()
c.electedLock.RLock()
entry, ok := c.elected[key]
c.electedLock.RUnlock()
if ok && now.Sub(timestamp.Time(entry.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter {
if entry.Replica != replica {
return replicasNotMatchError(replica, entry.Replica)
}
return nil
}
err := c.checkKVStore(ctx, key, replica, now)
kvCASCalls.WithLabelValues(userID, cluster).Inc()
if err != nil {
// The callback within checkKVStore will return a 202 if the sample is being deduped,
// otherwise there may have been an actual error CAS'ing that we should log.
if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() != 202 {
level.Error(util.Logger).Log("msg", "rejecting sample", "error", err)
}
}
return err
}
func (c *haTracker) checkKVStore(ctx context.Context, key, replica string, now time.Time) error {
return c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) {
if desc, ok := in.(*ReplicaDesc); ok {
// We don't need to CAS and update the timestamp in the KV store if the timestamp we've received
// this sample at is less than updateTimeout amount of time since the timestamp in the KV store.
if desc.Replica == replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter {
return nil, false, nil
}
// We shouldn't failover to accepting a new replica if the timestamp we've received this sample at
// is less than failOver timeout amount of time since the timestamp in the KV store.
if desc.Replica != replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.FailoverTimeout {
// Return a 202.
return nil, false, replicasNotMatchError(replica, desc.Replica)
}
}
// There was either invalid or no data for the key, so we now accept samples
// from this replica. Invalid could mean that the timestamp in the KV store was
// out of date based on the update and failover timeouts when compared to now.
return &ReplicaDesc{
Replica: replica, ReceivedAt: timestamp.FromTime(now),
}, true, nil
})
}
func | (replica, elected string) error {
return httpgrpc.Errorf(http.StatusAccepted, "replicas did not mach, rejecting sample: replica=%s, elected=%s", replica, elected)
}
// Modifies the labels parameter in place, removing labels that match
// the replica or cluster label and returning their values. Returns an error
// if we find one but not both of the labels.
func findHALabels(replicaLabel, clusterLabel string, labels []client.LabelAdapter) (string, string) {
var cluster, replica string
var pair client.LabelAdapter
for _, pair = range labels {
if pair.Name == replicaLabel {
replica = string(pair.Value)
}
if pair.Name == clusterLabel {
cluster = string(pair.Value)
}
}
return cluster, replica
}
| replicasNotMatchError | identifier_name |
ha_tracker.go | package distributor
import (
"context"
"errors"
"flag"
"fmt"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"github.com/weaveworks/common/httpgrpc"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/weaveworks/common/mtime"
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
)
var (
electedReplicaChanges = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_changes_total",
Help: "The total number of times the elected replica has changed for a user ID/cluster.",
}, []string{"user", "cluster"})
electedReplicaTimestamp = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_timestamp_seconds",
Help: "The timestamp stored for the currently elected replica, from the KVStore.",
}, []string{"user", "cluster"})
electedReplicaPropagationTime = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_change_propagation_time_seconds",
Help: "The time it for the distributor to update the replica change.",
Buckets: prometheus.DefBuckets,
})
kvCASCalls = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "cortex",
Name: "ha_tracker_kv_store_cas_total",
Help: "The total number of CAS calls to the KV store for a user ID/cluster.",
}, []string{"user", "cluster"})
errNegativeUpdateTimeoutJitterMax = errors.New("HA tracker max update timeout jitter shouldn't be negative")
errInvalidFailoverTimeout = "HA Tracker failover timeout (%v) must be at least 1s greater than update timeout - max jitter (%v)"
)
// ProtoReplicaDescFactory makes new InstanceDescs
func ProtoReplicaDescFactory() proto.Message {
return NewReplicaDesc()
}
// NewReplicaDesc returns an empty *distributor.ReplicaDesc.
func NewReplicaDesc() *ReplicaDesc {
return &ReplicaDesc{}
}
// Track the replica we're accepting samples from
// for each HA cluster we know about.
type haTracker struct {
logger log.Logger
cfg HATrackerConfig
client kv.Client
updateTimeoutJitter time.Duration
// Replicas we are accepting samples from.
electedLock sync.RWMutex
elected map[string]ReplicaDesc
done chan struct{}
cancel context.CancelFunc
}
// HATrackerConfig contains the configuration require to
// create a HA Tracker.
type HATrackerConfig struct {
EnableHATracker bool `yaml:"enable_ha_tracker,omitempty"`
// We should only update the timestamp if the difference
// between the stored timestamp and the time we received a sample at
// is more than this duration.
UpdateTimeout time.Duration `yaml:"ha_tracker_update_timeout"`
UpdateTimeoutJitterMax time.Duration `yaml:"ha_tracker_update_timeout_jitter_max"`
// We should only failover to accepting samples from a replica
// other than the replica written in the KVStore if the difference
// between the stored timestamp and the time we received a sample is
// more than this duration
FailoverTimeout time.Duration `yaml:"ha_tracker_failover_timeout"`
KVStore kv.Config
}
// RegisterFlags adds the flags required to config this to the given FlagSet.
func (cfg *HATrackerConfig) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.EnableHATracker,
"distributor.ha-tracker.enable",
false,
"Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels).")
f.DurationVar(&cfg.UpdateTimeout,
"distributor.ha-tracker.update-timeout",
15*time.Second,
"Update the timestamp in the KV store for a given cluster/replica only after this amount of time has passed since the current stored timestamp.")
f.DurationVar(&cfg.UpdateTimeoutJitterMax,
"distributor.ha-tracker.update-timeout-jitter-max",
5*time.Second,
"To spread the HA deduping heartbeats out over time.")
f.DurationVar(&cfg.FailoverTimeout,
"distributor.ha-tracker.failover-timeout",
30*time.Second,
"If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout")
// We want the ability to use different Consul instances for the ring and for HA cluster tracking.
cfg.KVStore.RegisterFlagsWithPrefix("distributor.ha-tracker.", f)
}
// Validate config and returns error on failure
func (cfg *HATrackerConfig) Validate() error {
if cfg.UpdateTimeoutJitterMax < 0 {
return errNegativeUpdateTimeoutJitterMax
}
minFailureTimeout := cfg.UpdateTimeout + cfg.UpdateTimeoutJitterMax + time.Second
if cfg.FailoverTimeout < minFailureTimeout {
return fmt.Errorf(errInvalidFailoverTimeout, cfg.FailoverTimeout, minFailureTimeout)
}
return nil
}
// NewClusterTracker returns a new HA cluster tracker using either Consul
// or in-memory KV store.
func newClusterTracker(cfg HATrackerConfig) (*haTracker, error) {
codec := codec.Proto{Factory: ProtoReplicaDescFactory}
var jitter time.Duration
if cfg.UpdateTimeoutJitterMax > 0 {
jitter = time.Duration(rand.Int63n(int64(2*cfg.UpdateTimeoutJitterMax))) - cfg.UpdateTimeoutJitterMax
}
ctx, cancel := context.WithCancel(context.Background())
t := haTracker{
logger: util.Logger,
cfg: cfg,
updateTimeoutJitter: jitter,
done: make(chan struct{}),
elected: map[string]ReplicaDesc{},
cancel: cancel,
}
if cfg.EnableHATracker {
client, err := kv.NewClient(cfg.KVStore, codec)
if err != nil {
return nil, err
}
t.client = client
go t.loop(ctx)
}
return &t, nil
}
// Follows pattern used by ring for WatchKey.
func (c *haTracker) loop(ctx context.Context) {
defer close(c.done)
// The KVStore config we gave when creating c should have contained a prefix,
// which would have given us a prefixed KVStore client. So, we can pass empty string here.
c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool {
replica := value.(*ReplicaDesc)
c.electedLock.Lock()
defer c.electedLock.Unlock()
chunks := strings.SplitN(key, "/", 2)
// The prefix has already been stripped, so a valid key would look like cluster/replica,
// and a key without a / such as `ring` would be invalid.
if len(chunks) != 2 {
return true
}
if replica.Replica != c.elected[key].Replica {
electedReplicaChanges.WithLabelValues(chunks[0], chunks[1]).Inc()
}
c.elected[key] = *replica
electedReplicaTimestamp.WithLabelValues(chunks[0], chunks[1]).Set(float64(replica.ReceivedAt / 1000))
electedReplicaPropagationTime.Observe(time.Since(timestamp.Time(replica.ReceivedAt)).Seconds())
return true
})
}
// Stop ends calls the trackers cancel function, which will end the loop for WatchPrefix.
func (c *haTracker) stop() {
if c.cfg.EnableHATracker {
c.cancel()
<-c.done
}
}
// CheckReplica checks the cluster and replica against the backing KVStore and local cache in the
// tracker c to see if we should accept the incomming sample. It will return an error if the sample
// should not be accepted. Note that internally this function does checks against the stored values
// and may modify the stored data, for example to failover between replicas after a certain period of time.
// A 202 response code is returned (from checkKVstore) if we shouldn't store this sample but are
// accepting samples from another replica for the cluster, so that there isn't a bunch of error's returned
// to customers clients.
func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica string) error {
// If HA tracking isn't enabled then accept the sample
if !c.cfg.EnableHATracker {
return nil
}
key := fmt.Sprintf("%s/%s", userID, cluster)
now := mtime.Now()
c.electedLock.RLock()
entry, ok := c.elected[key]
c.electedLock.RUnlock()
if ok && now.Sub(timestamp.Time(entry.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter {
if entry.Replica != replica {
return replicasNotMatchError(replica, entry.Replica)
}
return nil
}
err := c.checkKVStore(ctx, key, replica, now)
kvCASCalls.WithLabelValues(userID, cluster).Inc()
if err != nil {
// The callback within checkKVStore will return a 202 if the sample is being deduped,
// otherwise there may have been an actual error CAS'ing that we should log.
if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() != 202 {
level.Error(util.Logger).Log("msg", "rejecting sample", "error", err)
}
}
return err
}
func (c *haTracker) checkKVStore(ctx context.Context, key, replica string, now time.Time) error {
return c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) {
if desc, ok := in.(*ReplicaDesc); ok {
// We don't need to CAS and update the timestamp in the KV store if the timestamp we've received
// this sample at is less than updateTimeout amount of time since the timestamp in the KV store.
if desc.Replica == replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter {
return nil, false, nil
}
// We shouldn't failover to accepting a new replica if the timestamp we've received this sample at
// is less than failOver timeout amount of time since the timestamp in the KV store.
if desc.Replica != replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.FailoverTimeout {
// Return a 202.
return nil, false, replicasNotMatchError(replica, desc.Replica)
}
}
// There was either invalid or no data for the key, so we now accept samples
// from this replica. Invalid could mean that the timestamp in the KV store was
// out of date based on the update and failover timeouts when compared to now.
return &ReplicaDesc{
Replica: replica, ReceivedAt: timestamp.FromTime(now),
}, true, nil
})
}
func replicasNotMatchError(replica, elected string) error {
return httpgrpc.Errorf(http.StatusAccepted, "replicas did not mach, rejecting sample: replica=%s, elected=%s", replica, elected)
}
// Modifies the labels parameter in place, removing labels that match
// the replica or cluster label and returning their values. Returns an error
// if we find one but not both of the labels.
func findHALabels(replicaLabel, clusterLabel string, labels []client.LabelAdapter) (string, string) {
var cluster, replica string
var pair client.LabelAdapter
for _, pair = range labels |
return cluster, replica
}
| {
if pair.Name == replicaLabel {
replica = string(pair.Value)
}
if pair.Name == clusterLabel {
cluster = string(pair.Value)
}
} | conditional_block |
ha_tracker.go | package distributor
import (
"context"
"errors"
"flag"
"fmt"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"github.com/weaveworks/common/httpgrpc"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/weaveworks/common/mtime"
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
)
var (
electedReplicaChanges = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_changes_total",
Help: "The total number of times the elected replica has changed for a user ID/cluster.",
}, []string{"user", "cluster"})
electedReplicaTimestamp = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_timestamp_seconds",
Help: "The timestamp stored for the currently elected replica, from the KVStore.",
}, []string{"user", "cluster"})
electedReplicaPropagationTime = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "ha_tracker_elected_replica_change_propagation_time_seconds",
Help: "The time it for the distributor to update the replica change.",
Buckets: prometheus.DefBuckets,
})
kvCASCalls = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "cortex",
Name: "ha_tracker_kv_store_cas_total",
Help: "The total number of CAS calls to the KV store for a user ID/cluster.",
}, []string{"user", "cluster"})
errNegativeUpdateTimeoutJitterMax = errors.New("HA tracker max update timeout jitter shouldn't be negative")
errInvalidFailoverTimeout = "HA Tracker failover timeout (%v) must be at least 1s greater than update timeout - max jitter (%v)"
)
// ProtoReplicaDescFactory makes new InstanceDescs
func ProtoReplicaDescFactory() proto.Message {
return NewReplicaDesc()
}
// NewReplicaDesc returns an empty *distributor.ReplicaDesc.
func NewReplicaDesc() *ReplicaDesc {
return &ReplicaDesc{}
}
// Track the replica we're accepting samples from
// for each HA cluster we know about.
type haTracker struct {
logger log.Logger
cfg HATrackerConfig
client kv.Client
updateTimeoutJitter time.Duration
// Replicas we are accepting samples from.
electedLock sync.RWMutex
elected map[string]ReplicaDesc
done chan struct{}
cancel context.CancelFunc
}
// HATrackerConfig contains the configuration require to
// create a HA Tracker.
type HATrackerConfig struct {
EnableHATracker bool `yaml:"enable_ha_tracker,omitempty"`
// We should only update the timestamp if the difference
// between the stored timestamp and the time we received a sample at
// is more than this duration.
UpdateTimeout time.Duration `yaml:"ha_tracker_update_timeout"`
UpdateTimeoutJitterMax time.Duration `yaml:"ha_tracker_update_timeout_jitter_max"`
// We should only failover to accepting samples from a replica
// other than the replica written in the KVStore if the difference
// between the stored timestamp and the time we received a sample is
// more than this duration
FailoverTimeout time.Duration `yaml:"ha_tracker_failover_timeout"`
KVStore kv.Config
}
// RegisterFlags adds the flags required to config this to the given FlagSet.
func (cfg *HATrackerConfig) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.EnableHATracker,
"distributor.ha-tracker.enable",
false,
"Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels).")
f.DurationVar(&cfg.UpdateTimeout,
"distributor.ha-tracker.update-timeout",
15*time.Second,
"Update the timestamp in the KV store for a given cluster/replica only after this amount of time has passed since the current stored timestamp.")
f.DurationVar(&cfg.UpdateTimeoutJitterMax,
"distributor.ha-tracker.update-timeout-jitter-max",
5*time.Second,
"To spread the HA deduping heartbeats out over time.")
f.DurationVar(&cfg.FailoverTimeout,
"distributor.ha-tracker.failover-timeout",
30*time.Second,
"If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout")
// We want the ability to use different Consul instances for the ring and for HA cluster tracking.
cfg.KVStore.RegisterFlagsWithPrefix("distributor.ha-tracker.", f)
}
// Validate config and returns error on failure
func (cfg *HATrackerConfig) Validate() error {
if cfg.UpdateTimeoutJitterMax < 0 {
return errNegativeUpdateTimeoutJitterMax
}
minFailureTimeout := cfg.UpdateTimeout + cfg.UpdateTimeoutJitterMax + time.Second
if cfg.FailoverTimeout < minFailureTimeout {
return fmt.Errorf(errInvalidFailoverTimeout, cfg.FailoverTimeout, minFailureTimeout)
}
return nil
}
// NewClusterTracker returns a new HA cluster tracker using either Consul
// or in-memory KV store.
func newClusterTracker(cfg HATrackerConfig) (*haTracker, error) {
codec := codec.Proto{Factory: ProtoReplicaDescFactory}
var jitter time.Duration
if cfg.UpdateTimeoutJitterMax > 0 {
jitter = time.Duration(rand.Int63n(int64(2*cfg.UpdateTimeoutJitterMax))) - cfg.UpdateTimeoutJitterMax
}
ctx, cancel := context.WithCancel(context.Background())
t := haTracker{
logger: util.Logger,
cfg: cfg,
updateTimeoutJitter: jitter,
done: make(chan struct{}),
elected: map[string]ReplicaDesc{},
cancel: cancel,
}
if cfg.EnableHATracker {
client, err := kv.NewClient(cfg.KVStore, codec)
if err != nil {
return nil, err
}
t.client = client
go t.loop(ctx)
}
return &t, nil
}
// Follows pattern used by ring for WatchKey.
func (c *haTracker) loop(ctx context.Context) {
defer close(c.done)
// The KVStore config we gave when creating c should have contained a prefix,
// which would have given us a prefixed KVStore client. So, we can pass empty string here.
c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool {
replica := value.(*ReplicaDesc)
c.electedLock.Lock()
defer c.electedLock.Unlock()
chunks := strings.SplitN(key, "/", 2)
// The prefix has already been stripped, so a valid key would look like cluster/replica,
// and a key without a / such as `ring` would be invalid.
if len(chunks) != 2 {
return true
}
if replica.Replica != c.elected[key].Replica {
electedReplicaChanges.WithLabelValues(chunks[0], chunks[1]).Inc()
}
c.elected[key] = *replica
electedReplicaTimestamp.WithLabelValues(chunks[0], chunks[1]).Set(float64(replica.ReceivedAt / 1000))
electedReplicaPropagationTime.Observe(time.Since(timestamp.Time(replica.ReceivedAt)).Seconds())
return true
})
}
// Stop ends calls the trackers cancel function, which will end the loop for WatchPrefix.
func (c *haTracker) stop() {
if c.cfg.EnableHATracker {
c.cancel()
<-c.done
}
}
// CheckReplica checks the cluster and replica against the backing KVStore and local cache in the
// tracker c to see if we should accept the incomming sample. It will return an error if the sample
// should not be accepted. Note that internally this function does checks against the stored values
// and may modify the stored data, for example to failover between replicas after a certain period of time.
// A 202 response code is returned (from checkKVstore) if we shouldn't store this sample but are
// accepting samples from another replica for the cluster, so that there isn't a bunch of error's returned
// to customers clients.
func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica string) error {
// If HA tracking isn't enabled then accept the sample
if !c.cfg.EnableHATracker {
return nil
}
key := fmt.Sprintf("%s/%s", userID, cluster)
now := mtime.Now()
c.electedLock.RLock()
entry, ok := c.elected[key]
c.electedLock.RUnlock()
if ok && now.Sub(timestamp.Time(entry.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter {
if entry.Replica != replica {
return replicasNotMatchError(replica, entry.Replica)
}
return nil
}
err := c.checkKVStore(ctx, key, replica, now)
kvCASCalls.WithLabelValues(userID, cluster).Inc()
if err != nil {
// The callback within checkKVStore will return a 202 if the sample is being deduped,
// otherwise there may have been an actual error CAS'ing that we should log.
if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() != 202 {
level.Error(util.Logger).Log("msg", "rejecting sample", "error", err)
}
}
return err
}
func (c *haTracker) checkKVStore(ctx context.Context, key, replica string, now time.Time) error {
return c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) {
if desc, ok := in.(*ReplicaDesc); ok {
// We don't need to CAS and update the timestamp in the KV store if the timestamp we've received
// this sample at is less than updateTimeout amount of time since the timestamp in the KV store.
if desc.Replica == replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter {
return nil, false, nil
}
// We shouldn't failover to accepting a new replica if the timestamp we've received this sample at
// is less than failOver timeout amount of time since the timestamp in the KV store.
if desc.Replica != replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.FailoverTimeout {
// Return a 202.
return nil, false, replicasNotMatchError(replica, desc.Replica)
}
}
// There was either invalid or no data for the key, so we now accept samples
// from this replica. Invalid could mean that the timestamp in the KV store was
// out of date based on the update and failover timeouts when compared to now.
return &ReplicaDesc{
Replica: replica, ReceivedAt: timestamp.FromTime(now),
}, true, nil
})
}
func replicasNotMatchError(replica, elected string) error |
// Modifies the labels parameter in place, removing labels that match
// the replica or cluster label and returning their values. Returns an error
// if we find one but not both of the labels.
func findHALabels(replicaLabel, clusterLabel string, labels []client.LabelAdapter) (string, string) {
var cluster, replica string
var pair client.LabelAdapter
for _, pair = range labels {
if pair.Name == replicaLabel {
replica = string(pair.Value)
}
if pair.Name == clusterLabel {
cluster = string(pair.Value)
}
}
return cluster, replica
}
| {
return httpgrpc.Errorf(http.StatusAccepted, "replicas did not mach, rejecting sample: replica=%s, elected=%s", replica, elected)
} | identifier_body |
file.go | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan9
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
type File struct {
// File identifier.
Qid
// The number of fid references.
refs int32
// Our real underlying read/write paths.
read_path string
read_exists bool
write_path string
write_exists bool
write_deleted bool
// Our underlying mode.
mode uint32
// Our access timestamp (for LRU).
// This is internal and used for LRU,
// it is not the atime or mtime on the
// underlying file -- which is directly
// from the underlying filesystem.
used time.Time
// Our index in the LRU.
index int
// The associated file fds.
read_fd int
write_fd int
// The write map --
//
// This is future work.
//
// This will track sparse holes in the write_fd,
// and ensures that it is populated as necessary.
// Each entry represents a sparse hole. If a read
// comes in and corresponds to a hole, we will send
// the read to the read_fd. If a read comes in and
// partially overlaps with a hole, then we need to
// copy data from the read_fd to the write_fd first,
// then return the write_fd. When a write comes in,
// we always send the write to the write_fd and
// update the write_map appropriately to remove any
// holes that might be there.
//
// NOTE: The write files are actually *sparse*
// copies on top of the read files. It's very
// important that tar -S is used to compress and
// uncompress bundles to have this maintained.
//
// type Hole struct {
// start uint64
// length uint64
// }
//
// write_map []Hole
// Our RWMutex (protects r=>w transition).
sync.RWMutex
}
var ModeToP9Type = map[uint32]uint16{
syscall.S_IFDIR: QTDIR,
syscall.S_IFLNK: QTSYMLINK,
}
var P9TypeToMode = map[uint8]uint32{
QTDIR: syscall.S_IFDIR,
QTSYMLINK: syscall.S_IFLNK,
}
var ModeToP9Mode = map[uint32]uint32{
syscall.S_IFDIR: DMDIR,
syscall.S_IFLNK: DMSYMLINK,
syscall.S_IFSOCK: DMSOCKET,
syscall.S_IFBLK: DMDEVICE,
syscall.S_IFCHR: DMDEVICE,
syscall.S_ISUID: DMSETUID,
syscall.S_ISGID: DMSETGID,
syscall.S_IRUSR: DMREAD << 6,
syscall.S_IWUSR: DMWRITE << 6,
syscall.S_IXUSR: DMEXEC << 6,
syscall.S_IRGRP: DMREAD << 3,
syscall.S_IWGRP: DMWRITE << 3,
syscall.S_IXGRP: DMEXEC << 3,
syscall.S_IROTH: DMREAD,
syscall.S_IWOTH: DMWRITE,
syscall.S_IXOTH: DMEXEC,
}
var P9ModeToMode = map[uint32]uint32{
DMDIR: syscall.S_IFDIR,
DMSYMLINK: syscall.S_IFLNK,
DMSOCKET: syscall.S_IFSOCK,
DMDEVICE: syscall.S_IFCHR,
DMSETUID: syscall.S_ISUID,
DMSETGID: syscall.S_ISGID,
(DMREAD << 6): syscall.S_IRUSR,
(DMWRITE << 6): syscall.S_IWUSR,
(DMEXEC << 6): syscall.S_IXUSR,
(DMREAD << 3): syscall.S_IRGRP,
(DMWRITE << 3): syscall.S_IWGRP,
(DMEXEC << 3): syscall.S_IXGRP,
DMREAD: syscall.S_IROTH,
DMWRITE: syscall.S_IWOTH,
DMEXEC: syscall.S_IXOTH,
}
func (file *File) findPaths(fs *Fs, filepath string) {
// Figure out our write path first.
write_prefix := ""
write_backing_path := "."
for prefix, backing_path := range fs.Write {
if strings.HasPrefix(filepath, prefix) &&
len(prefix) > len(write_prefix) {
write_prefix = prefix
write_backing_path = backing_path
}
}
file.write_path = path.Join(
write_backing_path,
filepath[len(write_prefix):])
var stat syscall.Stat_t
err := syscall.Lstat(file.write_path, &stat)
if err == nil {
file.write_exists = true
file.write_deleted, _ = readdelattr(file.write_path)
if !file.write_deleted {
file.mode = stat.Mode
}
} else {
file.write_exists = false
file.write_deleted = false
}
// Figure out our read path.
read_prefix := write_prefix
read_backing_path := write_backing_path
file.read_exists = false
for prefix, backing_paths := range fs.Read {
if strings.HasPrefix(filepath, prefix) &&
(!file.read_exists ||
len(prefix) > len(read_prefix)) {
for _, backing_path := range backing_paths {
// Does this file exist?
test_path := path.Join(backing_path, filepath[len(prefix):])
err := syscall.Lstat(test_path, &stat)
if err == nil {
// Check if it's deleted.
// NOTE: If we can't read the extended
// attributes on this file, we can assume
// that it is not deleted.
deleted, _ := readdelattr(test_path)
if !deleted {
read_prefix = prefix
read_backing_path = backing_path
file.read_exists = true
if !file.write_deleted && !file.write_exists {
file.mode = stat.Mode
}
}
}
}
}
}
file.read_path = path.Join(
read_backing_path,
filepath[len(read_prefix):])
}
func (fs *Fs) lookup(path string) (*File, error) {
// Normalize path.
if len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
fs.filesLock.RLock()
file, ok := fs.files[path]
if ok {
atomic.AddInt32(&file.refs, 1)
fs.filesLock.RUnlock()
return file, nil
}
fs.filesLock.RUnlock()
// Create our new file object.
// This isn't in the hotpath, so we
// aren't blocking anyone else.
newfile, err := fs.NewFile(path)
// Escalate and create if necessary.
fs.filesLock.Lock()
file, ok = fs.files[path]
if ok {
// Race caught.
newfile.DecRef(fs, path)
atomic.AddInt32(&file.refs, 1)
fs.filesLock.Unlock()
return file, nil
}
if err != nil {
fs.filesLock.Unlock()
return nil, err
}
// Add the file.
// NOTE: We add the file synchronously to the
// LRU currently because otherwise race conditions
// related to removing the file become very complex.
fs.files[path] = newfile
fs.filesLock.Unlock()
return newfile, nil
}
func (fs *Fs) swapLru(i1 int, i2 int) {
older_file := fs.lru[i1]
fs.lru[i1] = fs.lru[i2]
fs.lru[i1].index = i1
fs.lru[i2] = older_file
fs.lru[i2].index = i2
}
func (fs *Fs) removeLru(file *File, lock bool) {
// This function will be called as an
// independent goroutine in order to remove
// a specific file (for example, on close)
// or it will be called as a subroutine from
// updateLru -- which is itself an synchronous
// update function.
if lock {
fs.lruLock.Lock()
defer fs.lruLock.Unlock()
}
// Shutdown all descriptors.
file.flush()
// Remove from our LRU.
if file.index != -1 {
if file.index == len(fs.lru)-1 {
// Just truncate.
fs.lru = fs.lru[0 : len(fs.lru)-1]
} else {
// Swap and run a bubble.
// This may end up recursing.
other_file := fs.lru[len(fs.lru)-1]
fs.swapLru(file.index, len(fs.lru)-1)
fs.lru = fs.lru[0 : len(fs.lru)-1]
fs.updateLru(other_file, false)
}
// Clear our LRU index.
file.index = -1
}
}
func (fs *Fs) updateLru(file *File, lock bool) {
if lock {
fs.lruLock.Lock()
defer fs.lruLock.Unlock()
file.used = time.Now()
if file.index == -1 {
fs.lru = append(fs.lru, file)
file.index = len(fs.lru) - 1
}
}
// Not in the LRU?
// This may be a stale update goroutine.
if file.index == -1 {
return
}
// Bubble up.
index := file.index
for index != 0 {
if file.used.Before(fs.lru[index/2].used) {
fs.swapLru(index, index/2)
index = index / 2
continue
}
break
}
// Bubble down.
for index*2 < len(fs.lru) {
if file.used.After(fs.lru[index*2].used) {
fs.swapLru(index, index*2)
index = index * 2
continue
}
if index*2+1 < len(fs.lru) && file.used.After(fs.lru[index*2+1].used) {
fs.swapLru(index, index*2+1)
index = index*2 + 1
continue
}
break
}
fs.flushLru()
}
func (fs *Fs) touchLru(file *File) {
if file.index == -1 {
// This needs to be done synchronously,
// to ensure that this file is in the LRU
// because we may have a remove() event.
fs.updateLru(file, true)
} else {
// We can do this update asynchronously.
go fs.updateLru(file, true)
}
}
func (fs *Fs) flushLru() {
// Are we over our limit?
// Schedule a removal. Note that this will end
// up recursing through updateLru() again, and
// may end up calling flushLru() again. So we
// don't need to check bounds, only one call.
if len(fs.lru) > int(fs.Fdlimit) {
fs.removeLru(fs.lru[0], false)
}
}
func (file *File) unlink() error {
// Remove whatever was there.
// NOTE: We will generally require the
// write lock to be held for this routine.
if file.write_deleted {
err := cleardelattr(file.write_path)
if err != nil {
return err
}
}
var stat syscall.Stat_t
err := syscall.Lstat(file.write_path, &stat)
if err == nil {
if stat.Mode&syscall.S_IFDIR != 0 {
err = syscall.Rmdir(file.write_path)
if err != nil {
return err
}
} else {
err = syscall.Unlink(file.write_path)
if err != nil {
return err
}
}
}
file.write_exists = false
file.write_deleted = false
return nil
}
func (file *File) remove(
fs *Fs,
path string) error {
file.RWMutex.Lock()
defer file.RWMutex.Unlock()
// Unlink what's there.
err := file.unlink()
if err != nil {
return err
}
// Make sure the parent exists.
err = file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// We need to have something we can record
// on. Even for files we record a directory,
// this later on packs may choose to make this
// into a tree and we need to be ready for that.
mode := (syscall.S_IFDIR | syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR)
err = syscall.Mkdir(file.write_path, uint32(mode))
if err != nil {
return err
}
// Mark this file as deleted.
err = setdelattr(file.write_path)
if err != nil {
return err
}
// We're deleted.
file.write_exists = true
file.write_deleted = true
return nil
}
func (file *File) exists() bool {
// Some file must exist.
return (!file.write_deleted &&
(file.read_exists || file.write_exists))
}
func (file *File) makeTree(
fs *Fs,
path string) error {
// Make all the super directories.
basedir, _ := filepath.Split(path)
if basedir != path {
parent, err := fs.lookup(basedir)
if err != nil {
return err
}
// The parent must have had
// a valid mode set at some point.
// We ignore this error, as this
// may actually return Eexist.
parent.create(fs, basedir, parent.mode)
parent.DecRef(fs, basedir)
}
return nil
}
func (file *File) create(
fs *Fs,
path string,
mode uint32) error {
file.RWMutex.Lock()
did_exist := file.exists()
if file.write_exists && !file.write_deleted {
file.RWMutex.Unlock()
return Eexist
}
// Save our mode.
file.mode = mode
// Is it a directory?
if file.mode&syscall.S_IFDIR != 0 {
if file.write_exists && file.write_deleted {
// Is it just marked deleted?
err := file.unlink()
if err != nil {
file.RWMutex.Unlock()
return err
}
}
// Make sure the parent exists.
err := file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Make this directory.
err = syscall.Mkdir(file.write_path, mode)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Fill out type.
err = file.fillType(file.write_path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// We now exist.
file.write_exists = true
file.RWMutex.Unlock()
} else {
// Make sure the parent exists.
err := file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
file.RWMutex.Unlock()
err = file.lockWrite(fs)
if err != nil {
return err
}
err = file.fillType(file.write_path)
if err != nil {
file.unlock()
return err
}
file.unlock()
}
if did_exist {
return Eexist
}
return nil
}
func (file *File) rename(
fs *Fs,
orig_path string,
new_path string) error {
fs.filesLock.Lock()
defer fs.filesLock.Unlock()
other_file, ok := fs.files[new_path]
if ok && other_file.exists() {
return Eexist
}
// Drop the original reference.
// (We've not replaced it atomically).
if other_file != nil {
defer other_file.DecRef(fs, "")
}
if file.write_exists && file.write_deleted {
// Is it just marked deleted?
err := file.unlink()
if err != nil {
return err
}
}
// Try the rename.
orig_read_path := file.read_path
orig_write_path := file.write_path
file.findPaths(fs, new_path)
err := syscall.Rename(orig_write_path, file.write_path)
if err != nil {
if err == syscall.EXDEV {
// TODO: The file cannot be renamed across file system.
// This is a simple matter of copying the file across when
// this happens. For now, we just return not implemented.
err = Enotimpl
}
file.read_path = orig_read_path
file.write_path = orig_write_path
return err
}
// We've moved this file.
// It didn't exist a moment ago, but it does now.
file.write_exists = true
file.write_deleted = false
// Update our fids.
// This is a bit messy, but since we are
// holding a writeLock on this file, this
// atomic should be reasonably atomic.
for _, fid := range fs.Pool {
if fid.file == file {
fid.Path = new_path
} else if other_file != nil && fid.file == other_file {
// Since we hold at least one reference
// to other_file, this should never trigger
// a full cleanup of other_file. It's safe
// to call DecRef here while locking the lock.
file.IncRef(fs)
fid.file = file
other_file.DecRef(fs, "")
}
}
// Perform the swaperoo.
fs.files[new_path] = file
delete(fs.files, orig_path)
// Ensure the original file is deleted.
// This is done at the very end, since there's
// really nothing we can do at this point. We
// even explicitly ignore the result. Ugh.
setdelattr(orig_write_path)
return nil
}
func (file *File) lockWrite(fs *Fs) error {
file.RWMutex.RLock()
if file.write_fd != -1 {
fs.touchLru(file)
return nil
}
// Escalate.
file.RWMutex.RUnlock()
file.RWMutex.Lock()
if file.write_fd != -1 {
// Race caught.
file.RWMutex.Unlock()
return file.lockWrite(fs)
}
// NOTE: All files are opened CLOEXEC.
mode := syscall.O_RDWR | syscall.O_CLOEXEC
var perm uint32
// Make sure the file exists.
if !file.write_exists || file.write_deleted {
// NOTE: It would be really great to handle
// all these writes as simply overlays and keep
// a map of all the sparse holes in the file.
// See above with the write_map, for now I'll
// leave this for future work.
if file.write_deleted {
// Remove the file.
file.unlink()
mode |= syscall.O_RDWR | syscall.O_CREAT
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
file.write_deleted = false
file.write_exists = true
} else if !file.read_exists {
// This is a fresh file.
// It doesn't exist in any read layer.
mode |= syscall.O_CREAT | syscall.O_RDWR
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
file.write_exists = true
} else {
// Not deleted && read_exists.
// We grab a memory map and write out
// a copy of the new file. This could
// be made much more efficient (per above).
data, err := ioutil.ReadFile(file.read_path)
if err != nil {
file.RWMutex.Unlock()
return err
}
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
err = ioutil.WriteFile(file.write_path, data, os.FileMode(perm))
if err != nil {
file.RWMutex.Unlock()
return err
}
file.write_exists = true
}
}
new_fd, err := syscall.Open(file.write_path, mode, perm)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Open successful.
file.write_fd = new_fd
// Flush the current readFD.
if file.read_fd != -1 {
syscall.Close(file.read_fd)
file.read_fd = -1
}
// Retry (for the RLock).
file.RWMutex.Unlock()
return file.lockWrite(fs)
}
func (file *File) lockRead(fs *Fs) error {
file.RWMutex.RLock()
if file.read_fd != -1 {
fs.touchLru(file)
return nil
}
// Escalate.
file.RWMutex.RUnlock()
file.RWMutex.Lock()
if file.read_fd != -1 {
// Race caught.
file.RWMutex.Unlock()
return file.lockRead(fs)
}
if file.write_fd != -1 {
// Use the same Fd.
// The close logic handles this.
file.read_fd = file.write_fd
file.RWMutex.Unlock()
return file.lockRead(fs)
}
// Okay, no write available.
// Let's open our read path.
new_fd, err := syscall.Open(file.read_path, syscall.O_RDONLY, 0)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Open successful.
file.read_fd = new_fd
// Retry (for the RLock).
file.RWMutex.Unlock()
return file.lockRead(fs)
}
func (file *File) flush() {
file.RWMutex.Lock()
defer file.RWMutex.Unlock()
// Close the file if still opened.
if file.read_fd != -1 {
syscall.Close(file.read_fd)
}
// Close the write_fd if it's open
// (and it's unique).
if file.write_fd != -1 &&
file.write_fd != file.read_fd {
syscall.Close(file.write_fd)
}
file.read_fd = -1
file.write_fd = -1
}
func (file *File) dir(
name string,
locked bool) (*Dir, error) {
if locked {
file.RWMutex.RLock()
}
var stat_path string
if file.write_exists {
stat_path = file.write_path
} else {
stat_path = file.read_path
}
var stat syscall.Stat_t
err := syscall.Lstat(stat_path, &stat)
if locked {
file.RWMutex.RUnlock()
}
if err != nil {
return nil, err
}
dir := new(Dir)
dir.Type = 0 // Set below.
dir.Mode = 0 // Set below.
dir.Qid = file.Qid
dir.Dev = uint32(stat.Dev)
atim, _ := stat.Atim.Unix()
dir.Atime = uint32(atim)
mtim, _ := stat.Mtim.Unix()
dir.Mtime = uint32(mtim)
if stat.Mode&syscall.S_IFDIR != 0 {
dir.Length = 0
} else {
dir.Length = uint64(stat.Size)
}
dir.Name = name
dir.Uid = "root"
dir.Gid = "root"
dir.Muid = "root"
dir.Ext = ""
dir.Uidnum = stat.Uid
dir.Gidnum = stat.Gid
dir.Muidnum = stat.Uid
for mask, type_bit := range ModeToP9Type {
if stat.Mode&mask == mask {
dir.Type = dir.Type | type_bit
}
}
for mask, mode_bit := range ModeToP9Mode {
if stat.Mode&mask == mask {
dir.Mode = dir.Mode | mode_bit
}
}
// Read our symlink if available.
if dir.Type&QTSYMLINK != 0 || dir.Mode&DMSYMLINK != 0 {
dir.Ext, err = os.Readlink(stat_path)
if err != nil {
return nil, err
}
}
// Plan9 doesn't handle dir+symlink.
// We return just a raw symlink.
if dir.Type&QTDIR != 0 && dir.Type&QTSYMLINK != 0 {
dir.Type &= ^uint16(QTDIR)
}
if dir.Mode&DMDIR != 0 && dir.Mode&DMSYMLINK != 0 {
dir.Mode &= ^uint32(DMDIR)
}
return dir, nil
}
func (file *File) children(fs *Fs, dirpath string) ([]*Dir, error) {
child_set := make(map[string]bool)
gather_dir := func(realdir string) {
files, err := filepath.Glob(path.Join(realdir, "*"))
if err != nil {
return
}
for _, file := range files {
// This file exists somewhere.
child_set[path.Base(file)] = true
}
}
// We need to collect all possible matching paths.
// This has the potential to be a very long list.
for prefix, backing_path := range fs.Write {
if strings.HasPrefix(dirpath, prefix) {
gather_dir(path.Join(backing_path, dirpath[len(prefix):]))
}
}
for prefix, backing_paths := range fs.Read {
if strings.HasPrefix(dirpath, prefix) {
for _, backing_path := range backing_paths {
gather_dir(path.Join(backing_path, dirpath[len(prefix):]))
}
}
}
// We stat each of these files.
results := make([]*Dir, 0, len(child_set))
for name, _ := range child_set {
// Find this child.
child_path := path.Join(dirpath, name)
child, err := fs.lookup(child_path)
if err != nil {
if child != nil {
child.DecRef(fs, child_path)
}
return nil, err
}
// Deleted?
if !child.exists() {
child.DecRef(fs, child_path)
continue
}
// Get the stat.
child_dir, err := child.dir(name, true)
child.DecRef(fs, child_path)
if err != nil {
return nil, err
}
results = append(results, child_dir)
}
// We're good.
return results, nil
}
func (file *File) | () {
file.RWMutex.RUnlock()
}
func (file *File) IncRef(fs *Fs) {
fs.filesLock.RLock()
atomic.AddInt32(&file.refs, 1)
fs.filesLock.RUnlock()
}
func (file *File) DecRef(fs *Fs, path string) {
new_refs := atomic.AddInt32(&file.refs, -1)
if new_refs == 0 {
fs.filesLock.Lock()
if file.refs != 0 {
// Race condition caught.
fs.filesLock.Unlock()
return
}
// Remove this file.
if path != "" {
delete(fs.files, path)
}
fs.filesLock.Unlock()
// Ensure that file is removed from the LRU.
// This will be done asynchronously, and as a
// result all file descriptors will be closed.
go fs.removeLru(file, true)
}
}
func (file *File) fillType(path string) error {
// Figure out the type.
dir, err := file.dir(path, false)
if err != nil {
return err
}
// Get file type.
file.Qid.Type = uint8(dir.Type)
return nil
}
func (fs *Fs) NewFile(path string) (*File, error) {
file := new(File)
file.refs = 1
// Figure out the paths.
file.findPaths(fs, path)
// Clear our LRU index.
file.index = -1
// Reset our FDs.
file.read_fd = -1
file.write_fd = -1
file.Qid.Version = 0
file.Qid.Path = atomic.AddUint64(&fs.Fileid, 1)
if file.exists() {
return file, file.fillType(path)
}
return file, nil
}
| unlock | identifier_name |
file.go | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan9
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
type File struct {
// File identifier.
Qid
// The number of fid references.
refs int32
// Our real underlying read/write paths.
read_path string
read_exists bool
write_path string
write_exists bool
write_deleted bool
// Our underlying mode.
mode uint32
// Our access timestamp (for LRU).
// This is internal and used for LRU,
// it is not the atime or mtime on the
// underlying file -- which is directly
// from the underlying filesystem.
used time.Time
// Our index in the LRU.
index int
// The associated file fds.
read_fd int
write_fd int
// The write map --
//
// This is future work.
//
// This will track sparse holes in the write_fd,
// and ensures that it is populated as necessary.
// Each entry represents a sparse hole. If a read
// comes in and corresponds to a hole, we will send
// the read to the read_fd. If a read comes in and
// partially overlaps with a hole, then we need to
// copy data from the read_fd to the write_fd first,
// then return the write_fd. When a write comes in,
// we always send the write to the write_fd and
// update the write_map appropriately to remove any
// holes that might be there.
//
// NOTE: The write files are actually *sparse*
// copies on top of the read files. It's very
// important that tar -S is used to compress and
// uncompress bundles to have this maintained.
//
// type Hole struct {
// start uint64
// length uint64
// }
//
// write_map []Hole
// Our RWMutex (protects r=>w transition).
sync.RWMutex
}
var ModeToP9Type = map[uint32]uint16{
syscall.S_IFDIR: QTDIR,
syscall.S_IFLNK: QTSYMLINK,
}
var P9TypeToMode = map[uint8]uint32{
QTDIR: syscall.S_IFDIR,
QTSYMLINK: syscall.S_IFLNK,
}
var ModeToP9Mode = map[uint32]uint32{
syscall.S_IFDIR: DMDIR,
syscall.S_IFLNK: DMSYMLINK,
syscall.S_IFSOCK: DMSOCKET,
syscall.S_IFBLK: DMDEVICE,
syscall.S_IFCHR: DMDEVICE,
syscall.S_ISUID: DMSETUID,
syscall.S_ISGID: DMSETGID,
syscall.S_IRUSR: DMREAD << 6,
syscall.S_IWUSR: DMWRITE << 6,
syscall.S_IXUSR: DMEXEC << 6,
syscall.S_IRGRP: DMREAD << 3,
syscall.S_IWGRP: DMWRITE << 3,
syscall.S_IXGRP: DMEXEC << 3,
syscall.S_IROTH: DMREAD,
syscall.S_IWOTH: DMWRITE,
syscall.S_IXOTH: DMEXEC,
}
var P9ModeToMode = map[uint32]uint32{
DMDIR: syscall.S_IFDIR,
DMSYMLINK: syscall.S_IFLNK,
DMSOCKET: syscall.S_IFSOCK,
DMDEVICE: syscall.S_IFCHR,
DMSETUID: syscall.S_ISUID,
DMSETGID: syscall.S_ISGID,
(DMREAD << 6): syscall.S_IRUSR,
(DMWRITE << 6): syscall.S_IWUSR,
(DMEXEC << 6): syscall.S_IXUSR,
(DMREAD << 3): syscall.S_IRGRP,
(DMWRITE << 3): syscall.S_IWGRP,
(DMEXEC << 3): syscall.S_IXGRP,
DMREAD: syscall.S_IROTH,
DMWRITE: syscall.S_IWOTH,
DMEXEC: syscall.S_IXOTH,
}
func (file *File) findPaths(fs *Fs, filepath string) {
// Figure out our write path first.
write_prefix := ""
write_backing_path := "."
for prefix, backing_path := range fs.Write {
if strings.HasPrefix(filepath, prefix) &&
len(prefix) > len(write_prefix) {
write_prefix = prefix
write_backing_path = backing_path
}
}
file.write_path = path.Join(
write_backing_path,
filepath[len(write_prefix):])
var stat syscall.Stat_t
err := syscall.Lstat(file.write_path, &stat)
if err == nil {
file.write_exists = true
file.write_deleted, _ = readdelattr(file.write_path)
if !file.write_deleted {
file.mode = stat.Mode
}
} else {
file.write_exists = false
file.write_deleted = false
}
// Figure out our read path.
read_prefix := write_prefix
read_backing_path := write_backing_path
file.read_exists = false
for prefix, backing_paths := range fs.Read {
if strings.HasPrefix(filepath, prefix) &&
(!file.read_exists ||
len(prefix) > len(read_prefix)) {
for _, backing_path := range backing_paths {
// Does this file exist?
test_path := path.Join(backing_path, filepath[len(prefix):])
err := syscall.Lstat(test_path, &stat)
if err == nil {
// Check if it's deleted.
// NOTE: If we can't read the extended
// attributes on this file, we can assume
// that it is not deleted.
deleted, _ := readdelattr(test_path)
if !deleted {
read_prefix = prefix
read_backing_path = backing_path
file.read_exists = true
if !file.write_deleted && !file.write_exists {
file.mode = stat.Mode
}
}
}
}
}
}
file.read_path = path.Join(
read_backing_path,
filepath[len(read_prefix):])
}
func (fs *Fs) lookup(path string) (*File, error) {
// Normalize path.
if len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
fs.filesLock.RLock()
file, ok := fs.files[path]
if ok {
atomic.AddInt32(&file.refs, 1)
fs.filesLock.RUnlock()
return file, nil
}
fs.filesLock.RUnlock()
// Create our new file object.
// This isn't in the hotpath, so we
// aren't blocking anyone else.
newfile, err := fs.NewFile(path)
// Escalate and create if necessary.
fs.filesLock.Lock()
file, ok = fs.files[path]
if ok {
// Race caught.
newfile.DecRef(fs, path)
atomic.AddInt32(&file.refs, 1)
fs.filesLock.Unlock()
return file, nil
}
if err != nil {
fs.filesLock.Unlock()
return nil, err
}
// Add the file.
// NOTE: We add the file synchronously to the
// LRU currently because otherwise race conditions
// related to removing the file become very complex.
fs.files[path] = newfile
fs.filesLock.Unlock()
return newfile, nil
}
func (fs *Fs) swapLru(i1 int, i2 int) {
older_file := fs.lru[i1]
fs.lru[i1] = fs.lru[i2]
fs.lru[i1].index = i1
fs.lru[i2] = older_file
fs.lru[i2].index = i2
}
func (fs *Fs) removeLru(file *File, lock bool) {
// This function will be called as an
// independent goroutine in order to remove
// a specific file (for example, on close)
// or it will be called as a subroutine from
// updateLru -- which is itself an synchronous
// update function.
if lock {
fs.lruLock.Lock()
defer fs.lruLock.Unlock()
}
// Shutdown all descriptors.
file.flush()
// Remove from our LRU.
if file.index != -1 {
if file.index == len(fs.lru)-1 {
// Just truncate.
fs.lru = fs.lru[0 : len(fs.lru)-1]
} else {
// Swap and run a bubble.
// This may end up recursing.
other_file := fs.lru[len(fs.lru)-1]
fs.swapLru(file.index, len(fs.lru)-1)
fs.lru = fs.lru[0 : len(fs.lru)-1]
fs.updateLru(other_file, false)
}
// Clear our LRU index.
file.index = -1
}
}
func (fs *Fs) updateLru(file *File, lock bool) {
if lock {
fs.lruLock.Lock()
defer fs.lruLock.Unlock()
file.used = time.Now()
if file.index == -1 {
fs.lru = append(fs.lru, file)
file.index = len(fs.lru) - 1
}
}
// Not in the LRU?
// This may be a stale update goroutine.
if file.index == -1 {
return
}
// Bubble up.
index := file.index
for index != 0 {
if file.used.Before(fs.lru[index/2].used) {
fs.swapLru(index, index/2)
index = index / 2
continue
}
break
}
// Bubble down.
for index*2 < len(fs.lru) {
if file.used.After(fs.lru[index*2].used) {
fs.swapLru(index, index*2)
index = index * 2
continue
}
if index*2+1 < len(fs.lru) && file.used.After(fs.lru[index*2+1].used) {
fs.swapLru(index, index*2+1)
index = index*2 + 1
continue
}
break
}
fs.flushLru()
}
func (fs *Fs) touchLru(file *File) {
if file.index == -1 {
// This needs to be done synchronously,
// to ensure that this file is in the LRU
// because we may have a remove() event.
fs.updateLru(file, true)
} else {
// We can do this update asynchronously.
go fs.updateLru(file, true)
}
}
func (fs *Fs) flushLru() {
// Are we over our limit?
// Schedule a removal. Note that this will end
// up recursing through updateLru() again, and
// may end up calling flushLru() again. So we
// don't need to check bounds, only one call.
if len(fs.lru) > int(fs.Fdlimit) {
fs.removeLru(fs.lru[0], false)
}
}
func (file *File) unlink() error {
// Remove whatever was there.
// NOTE: We will generally require the
// write lock to be held for this routine.
if file.write_deleted |
var stat syscall.Stat_t
err := syscall.Lstat(file.write_path, &stat)
if err == nil {
if stat.Mode&syscall.S_IFDIR != 0 {
err = syscall.Rmdir(file.write_path)
if err != nil {
return err
}
} else {
err = syscall.Unlink(file.write_path)
if err != nil {
return err
}
}
}
file.write_exists = false
file.write_deleted = false
return nil
}
func (file *File) remove(
fs *Fs,
path string) error {
file.RWMutex.Lock()
defer file.RWMutex.Unlock()
// Unlink what's there.
err := file.unlink()
if err != nil {
return err
}
// Make sure the parent exists.
err = file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// We need to have something we can record
// on. Even for files we record a directory,
// this later on packs may choose to make this
// into a tree and we need to be ready for that.
mode := (syscall.S_IFDIR | syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR)
err = syscall.Mkdir(file.write_path, uint32(mode))
if err != nil {
return err
}
// Mark this file as deleted.
err = setdelattr(file.write_path)
if err != nil {
return err
}
// We're deleted.
file.write_exists = true
file.write_deleted = true
return nil
}
func (file *File) exists() bool {
// Some file must exist.
return (!file.write_deleted &&
(file.read_exists || file.write_exists))
}
func (file *File) makeTree(
fs *Fs,
path string) error {
// Make all the super directories.
basedir, _ := filepath.Split(path)
if basedir != path {
parent, err := fs.lookup(basedir)
if err != nil {
return err
}
// The parent must have had
// a valid mode set at some point.
// We ignore this error, as this
// may actually return Eexist.
parent.create(fs, basedir, parent.mode)
parent.DecRef(fs, basedir)
}
return nil
}
func (file *File) create(
fs *Fs,
path string,
mode uint32) error {
file.RWMutex.Lock()
did_exist := file.exists()
if file.write_exists && !file.write_deleted {
file.RWMutex.Unlock()
return Eexist
}
// Save our mode.
file.mode = mode
// Is it a directory?
if file.mode&syscall.S_IFDIR != 0 {
if file.write_exists && file.write_deleted {
// Is it just marked deleted?
err := file.unlink()
if err != nil {
file.RWMutex.Unlock()
return err
}
}
// Make sure the parent exists.
err := file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Make this directory.
err = syscall.Mkdir(file.write_path, mode)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Fill out type.
err = file.fillType(file.write_path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// We now exist.
file.write_exists = true
file.RWMutex.Unlock()
} else {
// Make sure the parent exists.
err := file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
file.RWMutex.Unlock()
err = file.lockWrite(fs)
if err != nil {
return err
}
err = file.fillType(file.write_path)
if err != nil {
file.unlock()
return err
}
file.unlock()
}
if did_exist {
return Eexist
}
return nil
}
func (file *File) rename(
fs *Fs,
orig_path string,
new_path string) error {
fs.filesLock.Lock()
defer fs.filesLock.Unlock()
other_file, ok := fs.files[new_path]
if ok && other_file.exists() {
return Eexist
}
// Drop the original reference.
// (We've not replaced it atomically).
if other_file != nil {
defer other_file.DecRef(fs, "")
}
if file.write_exists && file.write_deleted {
// Is it just marked deleted?
err := file.unlink()
if err != nil {
return err
}
}
// Try the rename.
orig_read_path := file.read_path
orig_write_path := file.write_path
file.findPaths(fs, new_path)
err := syscall.Rename(orig_write_path, file.write_path)
if err != nil {
if err == syscall.EXDEV {
// TODO: The file cannot be renamed across file system.
// This is a simple matter of copying the file across when
// this happens. For now, we just return not implemented.
err = Enotimpl
}
file.read_path = orig_read_path
file.write_path = orig_write_path
return err
}
// We've moved this file.
// It didn't exist a moment ago, but it does now.
file.write_exists = true
file.write_deleted = false
// Update our fids.
// This is a bit messy, but since we are
// holding a writeLock on this file, this
// atomic should be reasonably atomic.
for _, fid := range fs.Pool {
if fid.file == file {
fid.Path = new_path
} else if other_file != nil && fid.file == other_file {
// Since we hold at least one reference
// to other_file, this should never trigger
// a full cleanup of other_file. It's safe
// to call DecRef here while locking the lock.
file.IncRef(fs)
fid.file = file
other_file.DecRef(fs, "")
}
}
// Perform the swaperoo.
fs.files[new_path] = file
delete(fs.files, orig_path)
// Ensure the original file is deleted.
// This is done at the very end, since there's
// really nothing we can do at this point. We
// even explicitly ignore the result. Ugh.
setdelattr(orig_write_path)
return nil
}
func (file *File) lockWrite(fs *Fs) error {
file.RWMutex.RLock()
if file.write_fd != -1 {
fs.touchLru(file)
return nil
}
// Escalate.
file.RWMutex.RUnlock()
file.RWMutex.Lock()
if file.write_fd != -1 {
// Race caught.
file.RWMutex.Unlock()
return file.lockWrite(fs)
}
// NOTE: All files are opened CLOEXEC.
mode := syscall.O_RDWR | syscall.O_CLOEXEC
var perm uint32
// Make sure the file exists.
if !file.write_exists || file.write_deleted {
// NOTE: It would be really great to handle
// all these writes as simply overlays and keep
// a map of all the sparse holes in the file.
// See above with the write_map, for now I'll
// leave this for future work.
if file.write_deleted {
// Remove the file.
file.unlink()
mode |= syscall.O_RDWR | syscall.O_CREAT
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
file.write_deleted = false
file.write_exists = true
} else if !file.read_exists {
// This is a fresh file.
// It doesn't exist in any read layer.
mode |= syscall.O_CREAT | syscall.O_RDWR
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
file.write_exists = true
} else {
// Not deleted && read_exists.
// We grab a memory map and write out
// a copy of the new file. This could
// be made much more efficient (per above).
data, err := ioutil.ReadFile(file.read_path)
if err != nil {
file.RWMutex.Unlock()
return err
}
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
err = ioutil.WriteFile(file.write_path, data, os.FileMode(perm))
if err != nil {
file.RWMutex.Unlock()
return err
}
file.write_exists = true
}
}
new_fd, err := syscall.Open(file.write_path, mode, perm)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Open successful.
file.write_fd = new_fd
// Flush the current readFD.
if file.read_fd != -1 {
syscall.Close(file.read_fd)
file.read_fd = -1
}
// Retry (for the RLock).
file.RWMutex.Unlock()
return file.lockWrite(fs)
}
func (file *File) lockRead(fs *Fs) error {
file.RWMutex.RLock()
if file.read_fd != -1 {
fs.touchLru(file)
return nil
}
// Escalate.
file.RWMutex.RUnlock()
file.RWMutex.Lock()
if file.read_fd != -1 {
// Race caught.
file.RWMutex.Unlock()
return file.lockRead(fs)
}
if file.write_fd != -1 {
// Use the same Fd.
// The close logic handles this.
file.read_fd = file.write_fd
file.RWMutex.Unlock()
return file.lockRead(fs)
}
// Okay, no write available.
// Let's open our read path.
new_fd, err := syscall.Open(file.read_path, syscall.O_RDONLY, 0)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Open successful.
file.read_fd = new_fd
// Retry (for the RLock).
file.RWMutex.Unlock()
return file.lockRead(fs)
}
func (file *File) flush() {
file.RWMutex.Lock()
defer file.RWMutex.Unlock()
// Close the file if still opened.
if file.read_fd != -1 {
syscall.Close(file.read_fd)
}
// Close the write_fd if it's open
// (and it's unique).
if file.write_fd != -1 &&
file.write_fd != file.read_fd {
syscall.Close(file.write_fd)
}
file.read_fd = -1
file.write_fd = -1
}
func (file *File) dir(
name string,
locked bool) (*Dir, error) {
if locked {
file.RWMutex.RLock()
}
var stat_path string
if file.write_exists {
stat_path = file.write_path
} else {
stat_path = file.read_path
}
var stat syscall.Stat_t
err := syscall.Lstat(stat_path, &stat)
if locked {
file.RWMutex.RUnlock()
}
if err != nil {
return nil, err
}
dir := new(Dir)
dir.Type = 0 // Set below.
dir.Mode = 0 // Set below.
dir.Qid = file.Qid
dir.Dev = uint32(stat.Dev)
atim, _ := stat.Atim.Unix()
dir.Atime = uint32(atim)
mtim, _ := stat.Mtim.Unix()
dir.Mtime = uint32(mtim)
if stat.Mode&syscall.S_IFDIR != 0 {
dir.Length = 0
} else {
dir.Length = uint64(stat.Size)
}
dir.Name = name
dir.Uid = "root"
dir.Gid = "root"
dir.Muid = "root"
dir.Ext = ""
dir.Uidnum = stat.Uid
dir.Gidnum = stat.Gid
dir.Muidnum = stat.Uid
for mask, type_bit := range ModeToP9Type {
if stat.Mode&mask == mask {
dir.Type = dir.Type | type_bit
}
}
for mask, mode_bit := range ModeToP9Mode {
if stat.Mode&mask == mask {
dir.Mode = dir.Mode | mode_bit
}
}
// Read our symlink if available.
if dir.Type&QTSYMLINK != 0 || dir.Mode&DMSYMLINK != 0 {
dir.Ext, err = os.Readlink(stat_path)
if err != nil {
return nil, err
}
}
// Plan9 doesn't handle dir+symlink.
// We return just a raw symlink.
if dir.Type&QTDIR != 0 && dir.Type&QTSYMLINK != 0 {
dir.Type &= ^uint16(QTDIR)
}
if dir.Mode&DMDIR != 0 && dir.Mode&DMSYMLINK != 0 {
dir.Mode &= ^uint32(DMDIR)
}
return dir, nil
}
func (file *File) children(fs *Fs, dirpath string) ([]*Dir, error) {
child_set := make(map[string]bool)
gather_dir := func(realdir string) {
files, err := filepath.Glob(path.Join(realdir, "*"))
if err != nil {
return
}
for _, file := range files {
// This file exists somewhere.
child_set[path.Base(file)] = true
}
}
// We need to collect all possible matching paths.
// This has the potential to be a very long list.
for prefix, backing_path := range fs.Write {
if strings.HasPrefix(dirpath, prefix) {
gather_dir(path.Join(backing_path, dirpath[len(prefix):]))
}
}
for prefix, backing_paths := range fs.Read {
if strings.HasPrefix(dirpath, prefix) {
for _, backing_path := range backing_paths {
gather_dir(path.Join(backing_path, dirpath[len(prefix):]))
}
}
}
// We stat each of these files.
results := make([]*Dir, 0, len(child_set))
for name, _ := range child_set {
// Find this child.
child_path := path.Join(dirpath, name)
child, err := fs.lookup(child_path)
if err != nil {
if child != nil {
child.DecRef(fs, child_path)
}
return nil, err
}
// Deleted?
if !child.exists() {
child.DecRef(fs, child_path)
continue
}
// Get the stat.
child_dir, err := child.dir(name, true)
child.DecRef(fs, child_path)
if err != nil {
return nil, err
}
results = append(results, child_dir)
}
// We're good.
return results, nil
}
func (file *File) unlock() {
file.RWMutex.RUnlock()
}
func (file *File) IncRef(fs *Fs) {
fs.filesLock.RLock()
atomic.AddInt32(&file.refs, 1)
fs.filesLock.RUnlock()
}
func (file *File) DecRef(fs *Fs, path string) {
new_refs := atomic.AddInt32(&file.refs, -1)
if new_refs == 0 {
fs.filesLock.Lock()
if file.refs != 0 {
// Race condition caught.
fs.filesLock.Unlock()
return
}
// Remove this file.
if path != "" {
delete(fs.files, path)
}
fs.filesLock.Unlock()
// Ensure that file is removed from the LRU.
// This will be done asynchronously, and as a
// result all file descriptors will be closed.
go fs.removeLru(file, true)
}
}
func (file *File) fillType(path string) error {
// Figure out the type.
dir, err := file.dir(path, false)
if err != nil {
return err
}
// Get file type.
file.Qid.Type = uint8(dir.Type)
return nil
}
func (fs *Fs) NewFile(path string) (*File, error) {
file := new(File)
file.refs = 1
// Figure out the paths.
file.findPaths(fs, path)
// Clear our LRU index.
file.index = -1
// Reset our FDs.
file.read_fd = -1
file.write_fd = -1
file.Qid.Version = 0
file.Qid.Path = atomic.AddUint64(&fs.Fileid, 1)
if file.exists() {
return file, file.fillType(path)
}
return file, nil
}
| {
err := cleardelattr(file.write_path)
if err != nil {
return err
}
} | conditional_block |
file.go | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan9
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
type File struct {
// File identifier.
Qid
// The number of fid references.
refs int32
// Our real underlying read/write paths.
read_path string
read_exists bool
write_path string
write_exists bool
write_deleted bool
// Our underlying mode.
mode uint32
// Our access timestamp (for LRU).
// This is internal and used for LRU,
// it is not the atime or mtime on the
// underlying file -- which is directly
// from the underlying filesystem.
used time.Time
// Our index in the LRU.
index int
// The associated file fds.
read_fd int
write_fd int
// The write map --
//
// This is future work.
//
// This will track sparse holes in the write_fd,
// and ensures that it is populated as necessary.
// Each entry represents a sparse hole. If a read
// comes in and corresponds to a hole, we will send
// the read to the read_fd. If a read comes in and
// partially overlaps with a hole, then we need to
// copy data from the read_fd to the write_fd first,
// then return the write_fd. When a write comes in,
// we always send the write to the write_fd and
// update the write_map appropriately to remove any
// holes that might be there.
//
// NOTE: The write files are actually *sparse*
// copies on top of the read files. It's very
// important that tar -S is used to compress and
// uncompress bundles to have this maintained.
//
// type Hole struct {
// start uint64
// length uint64
// }
//
// write_map []Hole
// Our RWMutex (protects r=>w transition).
sync.RWMutex
}
var ModeToP9Type = map[uint32]uint16{
syscall.S_IFDIR: QTDIR,
syscall.S_IFLNK: QTSYMLINK,
}
var P9TypeToMode = map[uint8]uint32{
QTDIR: syscall.S_IFDIR,
QTSYMLINK: syscall.S_IFLNK,
}
var ModeToP9Mode = map[uint32]uint32{
syscall.S_IFDIR: DMDIR,
syscall.S_IFLNK: DMSYMLINK,
syscall.S_IFSOCK: DMSOCKET,
syscall.S_IFBLK: DMDEVICE,
syscall.S_IFCHR: DMDEVICE,
syscall.S_ISUID: DMSETUID,
syscall.S_ISGID: DMSETGID,
syscall.S_IRUSR: DMREAD << 6,
syscall.S_IWUSR: DMWRITE << 6,
syscall.S_IXUSR: DMEXEC << 6,
syscall.S_IRGRP: DMREAD << 3,
syscall.S_IWGRP: DMWRITE << 3,
syscall.S_IXGRP: DMEXEC << 3,
syscall.S_IROTH: DMREAD,
syscall.S_IWOTH: DMWRITE,
syscall.S_IXOTH: DMEXEC,
}
var P9ModeToMode = map[uint32]uint32{
DMDIR: syscall.S_IFDIR,
DMSYMLINK: syscall.S_IFLNK,
DMSOCKET: syscall.S_IFSOCK,
DMDEVICE: syscall.S_IFCHR,
DMSETUID: syscall.S_ISUID,
DMSETGID: syscall.S_ISGID,
(DMREAD << 6): syscall.S_IRUSR,
(DMWRITE << 6): syscall.S_IWUSR,
(DMEXEC << 6): syscall.S_IXUSR,
(DMREAD << 3): syscall.S_IRGRP,
(DMWRITE << 3): syscall.S_IWGRP,
(DMEXEC << 3): syscall.S_IXGRP,
DMREAD: syscall.S_IROTH,
DMWRITE: syscall.S_IWOTH,
DMEXEC: syscall.S_IXOTH,
}
func (file *File) findPaths(fs *Fs, filepath string) |
func (fs *Fs) lookup(path string) (*File, error) {
// Normalize path.
if len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
fs.filesLock.RLock()
file, ok := fs.files[path]
if ok {
atomic.AddInt32(&file.refs, 1)
fs.filesLock.RUnlock()
return file, nil
}
fs.filesLock.RUnlock()
// Create our new file object.
// This isn't in the hotpath, so we
// aren't blocking anyone else.
newfile, err := fs.NewFile(path)
// Escalate and create if necessary.
fs.filesLock.Lock()
file, ok = fs.files[path]
if ok {
// Race caught.
newfile.DecRef(fs, path)
atomic.AddInt32(&file.refs, 1)
fs.filesLock.Unlock()
return file, nil
}
if err != nil {
fs.filesLock.Unlock()
return nil, err
}
// Add the file.
// NOTE: We add the file synchronously to the
// LRU currently because otherwise race conditions
// related to removing the file become very complex.
fs.files[path] = newfile
fs.filesLock.Unlock()
return newfile, nil
}
func (fs *Fs) swapLru(i1 int, i2 int) {
older_file := fs.lru[i1]
fs.lru[i1] = fs.lru[i2]
fs.lru[i1].index = i1
fs.lru[i2] = older_file
fs.lru[i2].index = i2
}
func (fs *Fs) removeLru(file *File, lock bool) {
// This function will be called as an
// independent goroutine in order to remove
// a specific file (for example, on close)
// or it will be called as a subroutine from
// updateLru -- which is itself an synchronous
// update function.
if lock {
fs.lruLock.Lock()
defer fs.lruLock.Unlock()
}
// Shutdown all descriptors.
file.flush()
// Remove from our LRU.
if file.index != -1 {
if file.index == len(fs.lru)-1 {
// Just truncate.
fs.lru = fs.lru[0 : len(fs.lru)-1]
} else {
// Swap and run a bubble.
// This may end up recursing.
other_file := fs.lru[len(fs.lru)-1]
fs.swapLru(file.index, len(fs.lru)-1)
fs.lru = fs.lru[0 : len(fs.lru)-1]
fs.updateLru(other_file, false)
}
// Clear our LRU index.
file.index = -1
}
}
func (fs *Fs) updateLru(file *File, lock bool) {
if lock {
fs.lruLock.Lock()
defer fs.lruLock.Unlock()
file.used = time.Now()
if file.index == -1 {
fs.lru = append(fs.lru, file)
file.index = len(fs.lru) - 1
}
}
// Not in the LRU?
// This may be a stale update goroutine.
if file.index == -1 {
return
}
// Bubble up.
index := file.index
for index != 0 {
if file.used.Before(fs.lru[index/2].used) {
fs.swapLru(index, index/2)
index = index / 2
continue
}
break
}
// Bubble down.
for index*2 < len(fs.lru) {
if file.used.After(fs.lru[index*2].used) {
fs.swapLru(index, index*2)
index = index * 2
continue
}
if index*2+1 < len(fs.lru) && file.used.After(fs.lru[index*2+1].used) {
fs.swapLru(index, index*2+1)
index = index*2 + 1
continue
}
break
}
fs.flushLru()
}
func (fs *Fs) touchLru(file *File) {
if file.index == -1 {
// This needs to be done synchronously,
// to ensure that this file is in the LRU
// because we may have a remove() event.
fs.updateLru(file, true)
} else {
// We can do this update asynchronously.
go fs.updateLru(file, true)
}
}
func (fs *Fs) flushLru() {
// Are we over our limit?
// Schedule a removal. Note that this will end
// up recursing through updateLru() again, and
// may end up calling flushLru() again. So we
// don't need to check bounds, only one call.
if len(fs.lru) > int(fs.Fdlimit) {
fs.removeLru(fs.lru[0], false)
}
}
func (file *File) unlink() error {
// Remove whatever was there.
// NOTE: We will generally require the
// write lock to be held for this routine.
if file.write_deleted {
err := cleardelattr(file.write_path)
if err != nil {
return err
}
}
var stat syscall.Stat_t
err := syscall.Lstat(file.write_path, &stat)
if err == nil {
if stat.Mode&syscall.S_IFDIR != 0 {
err = syscall.Rmdir(file.write_path)
if err != nil {
return err
}
} else {
err = syscall.Unlink(file.write_path)
if err != nil {
return err
}
}
}
file.write_exists = false
file.write_deleted = false
return nil
}
func (file *File) remove(
fs *Fs,
path string) error {
file.RWMutex.Lock()
defer file.RWMutex.Unlock()
// Unlink what's there.
err := file.unlink()
if err != nil {
return err
}
// Make sure the parent exists.
err = file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// We need to have something we can record
// on. Even for files we record a directory,
// this later on packs may choose to make this
// into a tree and we need to be ready for that.
mode := (syscall.S_IFDIR | syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR)
err = syscall.Mkdir(file.write_path, uint32(mode))
if err != nil {
return err
}
// Mark this file as deleted.
err = setdelattr(file.write_path)
if err != nil {
return err
}
// We're deleted.
file.write_exists = true
file.write_deleted = true
return nil
}
func (file *File) exists() bool {
// Some file must exist.
return (!file.write_deleted &&
(file.read_exists || file.write_exists))
}
func (file *File) makeTree(
fs *Fs,
path string) error {
// Make all the super directories.
basedir, _ := filepath.Split(path)
if basedir != path {
parent, err := fs.lookup(basedir)
if err != nil {
return err
}
// The parent must have had
// a valid mode set at some point.
// We ignore this error, as this
// may actually return Eexist.
parent.create(fs, basedir, parent.mode)
parent.DecRef(fs, basedir)
}
return nil
}
func (file *File) create(
fs *Fs,
path string,
mode uint32) error {
file.RWMutex.Lock()
did_exist := file.exists()
if file.write_exists && !file.write_deleted {
file.RWMutex.Unlock()
return Eexist
}
// Save our mode.
file.mode = mode
// Is it a directory?
if file.mode&syscall.S_IFDIR != 0 {
if file.write_exists && file.write_deleted {
// Is it just marked deleted?
err := file.unlink()
if err != nil {
file.RWMutex.Unlock()
return err
}
}
// Make sure the parent exists.
err := file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Make this directory.
err = syscall.Mkdir(file.write_path, mode)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Fill out type.
err = file.fillType(file.write_path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// We now exist.
file.write_exists = true
file.RWMutex.Unlock()
} else {
// Make sure the parent exists.
err := file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
file.RWMutex.Unlock()
err = file.lockWrite(fs)
if err != nil {
return err
}
err = file.fillType(file.write_path)
if err != nil {
file.unlock()
return err
}
file.unlock()
}
if did_exist {
return Eexist
}
return nil
}
func (file *File) rename(
fs *Fs,
orig_path string,
new_path string) error {
fs.filesLock.Lock()
defer fs.filesLock.Unlock()
other_file, ok := fs.files[new_path]
if ok && other_file.exists() {
return Eexist
}
// Drop the original reference.
// (We've not replaced it atomically).
if other_file != nil {
defer other_file.DecRef(fs, "")
}
if file.write_exists && file.write_deleted {
// Is it just marked deleted?
err := file.unlink()
if err != nil {
return err
}
}
// Try the rename.
orig_read_path := file.read_path
orig_write_path := file.write_path
file.findPaths(fs, new_path)
err := syscall.Rename(orig_write_path, file.write_path)
if err != nil {
if err == syscall.EXDEV {
// TODO: The file cannot be renamed across file system.
// This is a simple matter of copying the file across when
// this happens. For now, we just return not implemented.
err = Enotimpl
}
file.read_path = orig_read_path
file.write_path = orig_write_path
return err
}
// We've moved this file.
// It didn't exist a moment ago, but it does now.
file.write_exists = true
file.write_deleted = false
// Update our fids.
// This is a bit messy, but since we are
// holding a writeLock on this file, this
// atomic should be reasonably atomic.
for _, fid := range fs.Pool {
if fid.file == file {
fid.Path = new_path
} else if other_file != nil && fid.file == other_file {
// Since we hold at least one reference
// to other_file, this should never trigger
// a full cleanup of other_file. It's safe
// to call DecRef here while locking the lock.
file.IncRef(fs)
fid.file = file
other_file.DecRef(fs, "")
}
}
// Perform the swaperoo.
fs.files[new_path] = file
delete(fs.files, orig_path)
// Ensure the original file is deleted.
// This is done at the very end, since there's
// really nothing we can do at this point. We
// even explicitly ignore the result. Ugh.
setdelattr(orig_write_path)
return nil
}
func (file *File) lockWrite(fs *Fs) error {
file.RWMutex.RLock()
if file.write_fd != -1 {
fs.touchLru(file)
return nil
}
// Escalate.
file.RWMutex.RUnlock()
file.RWMutex.Lock()
if file.write_fd != -1 {
// Race caught.
file.RWMutex.Unlock()
return file.lockWrite(fs)
}
// NOTE: All files are opened CLOEXEC.
mode := syscall.O_RDWR | syscall.O_CLOEXEC
var perm uint32
// Make sure the file exists.
if !file.write_exists || file.write_deleted {
// NOTE: It would be really great to handle
// all these writes as simply overlays and keep
// a map of all the sparse holes in the file.
// See above with the write_map, for now I'll
// leave this for future work.
if file.write_deleted {
// Remove the file.
file.unlink()
mode |= syscall.O_RDWR | syscall.O_CREAT
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
file.write_deleted = false
file.write_exists = true
} else if !file.read_exists {
// This is a fresh file.
// It doesn't exist in any read layer.
mode |= syscall.O_CREAT | syscall.O_RDWR
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
file.write_exists = true
} else {
// Not deleted && read_exists.
// We grab a memory map and write out
// a copy of the new file. This could
// be made much more efficient (per above).
data, err := ioutil.ReadFile(file.read_path)
if err != nil {
file.RWMutex.Unlock()
return err
}
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
err = ioutil.WriteFile(file.write_path, data, os.FileMode(perm))
if err != nil {
file.RWMutex.Unlock()
return err
}
file.write_exists = true
}
}
new_fd, err := syscall.Open(file.write_path, mode, perm)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Open successful.
file.write_fd = new_fd
// Flush the current readFD.
if file.read_fd != -1 {
syscall.Close(file.read_fd)
file.read_fd = -1
}
// Retry (for the RLock).
file.RWMutex.Unlock()
return file.lockWrite(fs)
}
func (file *File) lockRead(fs *Fs) error {
file.RWMutex.RLock()
if file.read_fd != -1 {
fs.touchLru(file)
return nil
}
// Escalate.
file.RWMutex.RUnlock()
file.RWMutex.Lock()
if file.read_fd != -1 {
// Race caught.
file.RWMutex.Unlock()
return file.lockRead(fs)
}
if file.write_fd != -1 {
// Use the same Fd.
// The close logic handles this.
file.read_fd = file.write_fd
file.RWMutex.Unlock()
return file.lockRead(fs)
}
// Okay, no write available.
// Let's open our read path.
new_fd, err := syscall.Open(file.read_path, syscall.O_RDONLY, 0)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Open successful.
file.read_fd = new_fd
// Retry (for the RLock).
file.RWMutex.Unlock()
return file.lockRead(fs)
}
func (file *File) flush() {
file.RWMutex.Lock()
defer file.RWMutex.Unlock()
// Close the file if still opened.
if file.read_fd != -1 {
syscall.Close(file.read_fd)
}
// Close the write_fd if it's open
// (and it's unique).
if file.write_fd != -1 &&
file.write_fd != file.read_fd {
syscall.Close(file.write_fd)
}
file.read_fd = -1
file.write_fd = -1
}
func (file *File) dir(
name string,
locked bool) (*Dir, error) {
if locked {
file.RWMutex.RLock()
}
var stat_path string
if file.write_exists {
stat_path = file.write_path
} else {
stat_path = file.read_path
}
var stat syscall.Stat_t
err := syscall.Lstat(stat_path, &stat)
if locked {
file.RWMutex.RUnlock()
}
if err != nil {
return nil, err
}
dir := new(Dir)
dir.Type = 0 // Set below.
dir.Mode = 0 // Set below.
dir.Qid = file.Qid
dir.Dev = uint32(stat.Dev)
atim, _ := stat.Atim.Unix()
dir.Atime = uint32(atim)
mtim, _ := stat.Mtim.Unix()
dir.Mtime = uint32(mtim)
if stat.Mode&syscall.S_IFDIR != 0 {
dir.Length = 0
} else {
dir.Length = uint64(stat.Size)
}
dir.Name = name
dir.Uid = "root"
dir.Gid = "root"
dir.Muid = "root"
dir.Ext = ""
dir.Uidnum = stat.Uid
dir.Gidnum = stat.Gid
dir.Muidnum = stat.Uid
for mask, type_bit := range ModeToP9Type {
if stat.Mode&mask == mask {
dir.Type = dir.Type | type_bit
}
}
for mask, mode_bit := range ModeToP9Mode {
if stat.Mode&mask == mask {
dir.Mode = dir.Mode | mode_bit
}
}
// Read our symlink if available.
if dir.Type&QTSYMLINK != 0 || dir.Mode&DMSYMLINK != 0 {
dir.Ext, err = os.Readlink(stat_path)
if err != nil {
return nil, err
}
}
// Plan9 doesn't handle dir+symlink.
// We return just a raw symlink.
if dir.Type&QTDIR != 0 && dir.Type&QTSYMLINK != 0 {
dir.Type &= ^uint16(QTDIR)
}
if dir.Mode&DMDIR != 0 && dir.Mode&DMSYMLINK != 0 {
dir.Mode &= ^uint32(DMDIR)
}
return dir, nil
}
func (file *File) children(fs *Fs, dirpath string) ([]*Dir, error) {
child_set := make(map[string]bool)
gather_dir := func(realdir string) {
files, err := filepath.Glob(path.Join(realdir, "*"))
if err != nil {
return
}
for _, file := range files {
// This file exists somewhere.
child_set[path.Base(file)] = true
}
}
// We need to collect all possible matching paths.
// This has the potential to be a very long list.
for prefix, backing_path := range fs.Write {
if strings.HasPrefix(dirpath, prefix) {
gather_dir(path.Join(backing_path, dirpath[len(prefix):]))
}
}
for prefix, backing_paths := range fs.Read {
if strings.HasPrefix(dirpath, prefix) {
for _, backing_path := range backing_paths {
gather_dir(path.Join(backing_path, dirpath[len(prefix):]))
}
}
}
// We stat each of these files.
results := make([]*Dir, 0, len(child_set))
for name, _ := range child_set {
// Find this child.
child_path := path.Join(dirpath, name)
child, err := fs.lookup(child_path)
if err != nil {
if child != nil {
child.DecRef(fs, child_path)
}
return nil, err
}
// Deleted?
if !child.exists() {
child.DecRef(fs, child_path)
continue
}
// Get the stat.
child_dir, err := child.dir(name, true)
child.DecRef(fs, child_path)
if err != nil {
return nil, err
}
results = append(results, child_dir)
}
// We're good.
return results, nil
}
func (file *File) unlock() {
file.RWMutex.RUnlock()
}
func (file *File) IncRef(fs *Fs) {
fs.filesLock.RLock()
atomic.AddInt32(&file.refs, 1)
fs.filesLock.RUnlock()
}
func (file *File) DecRef(fs *Fs, path string) {
new_refs := atomic.AddInt32(&file.refs, -1)
if new_refs == 0 {
fs.filesLock.Lock()
if file.refs != 0 {
// Race condition caught.
fs.filesLock.Unlock()
return
}
// Remove this file.
if path != "" {
delete(fs.files, path)
}
fs.filesLock.Unlock()
// Ensure that file is removed from the LRU.
// This will be done asynchronously, and as a
// result all file descriptors will be closed.
go fs.removeLru(file, true)
}
}
func (file *File) fillType(path string) error {
// Figure out the type.
dir, err := file.dir(path, false)
if err != nil {
return err
}
// Get file type.
file.Qid.Type = uint8(dir.Type)
return nil
}
func (fs *Fs) NewFile(path string) (*File, error) {
file := new(File)
file.refs = 1
// Figure out the paths.
file.findPaths(fs, path)
// Clear our LRU index.
file.index = -1
// Reset our FDs.
file.read_fd = -1
file.write_fd = -1
file.Qid.Version = 0
file.Qid.Path = atomic.AddUint64(&fs.Fileid, 1)
if file.exists() {
return file, file.fillType(path)
}
return file, nil
}
| {
// Figure out our write path first.
write_prefix := ""
write_backing_path := "."
for prefix, backing_path := range fs.Write {
if strings.HasPrefix(filepath, prefix) &&
len(prefix) > len(write_prefix) {
write_prefix = prefix
write_backing_path = backing_path
}
}
file.write_path = path.Join(
write_backing_path,
filepath[len(write_prefix):])
var stat syscall.Stat_t
err := syscall.Lstat(file.write_path, &stat)
if err == nil {
file.write_exists = true
file.write_deleted, _ = readdelattr(file.write_path)
if !file.write_deleted {
file.mode = stat.Mode
}
} else {
file.write_exists = false
file.write_deleted = false
}
// Figure out our read path.
read_prefix := write_prefix
read_backing_path := write_backing_path
file.read_exists = false
for prefix, backing_paths := range fs.Read {
if strings.HasPrefix(filepath, prefix) &&
(!file.read_exists ||
len(prefix) > len(read_prefix)) {
for _, backing_path := range backing_paths {
// Does this file exist?
test_path := path.Join(backing_path, filepath[len(prefix):])
err := syscall.Lstat(test_path, &stat)
if err == nil {
// Check if it's deleted.
// NOTE: If we can't read the extended
// attributes on this file, we can assume
// that it is not deleted.
deleted, _ := readdelattr(test_path)
if !deleted {
read_prefix = prefix
read_backing_path = backing_path
file.read_exists = true
if !file.write_deleted && !file.write_exists {
file.mode = stat.Mode
}
}
}
}
}
}
file.read_path = path.Join(
read_backing_path,
filepath[len(read_prefix):])
} | identifier_body |
file.go | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan9
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
type File struct {
// File identifier.
Qid
// The number of fid references.
refs int32
// Our real underlying read/write paths.
read_path string
read_exists bool
write_path string
write_exists bool
write_deleted bool
// Our underlying mode.
mode uint32
// Our access timestamp (for LRU).
// This is internal and used for LRU,
// it is not the atime or mtime on the
// underlying file -- which is directly
// from the underlying filesystem.
used time.Time
// Our index in the LRU.
index int
// The associated file fds.
read_fd int
write_fd int
// The write map --
//
// This is future work.
//
// This will track sparse holes in the write_fd,
// and ensures that it is populated as necessary.
// Each entry represents a sparse hole. If a read
// comes in and corresponds to a hole, we will send
// the read to the read_fd. If a read comes in and
// partially overlaps with a hole, then we need to
// copy data from the read_fd to the write_fd first,
// then return the write_fd. When a write comes in,
// we always send the write to the write_fd and
// update the write_map appropriately to remove any
// holes that might be there.
//
// NOTE: The write files are actually *sparse*
// copies on top of the read files. It's very
// important that tar -S is used to compress and
// uncompress bundles to have this maintained.
//
// type Hole struct {
// start uint64
// length uint64
// }
//
// write_map []Hole
// Our RWMutex (protects r=>w transition).
sync.RWMutex
}
var ModeToP9Type = map[uint32]uint16{
syscall.S_IFDIR: QTDIR,
syscall.S_IFLNK: QTSYMLINK,
}
var P9TypeToMode = map[uint8]uint32{
QTDIR: syscall.S_IFDIR,
QTSYMLINK: syscall.S_IFLNK,
}
var ModeToP9Mode = map[uint32]uint32{
syscall.S_IFDIR: DMDIR,
syscall.S_IFLNK: DMSYMLINK,
syscall.S_IFSOCK: DMSOCKET,
syscall.S_IFBLK: DMDEVICE,
syscall.S_IFCHR: DMDEVICE,
syscall.S_ISUID: DMSETUID,
syscall.S_ISGID: DMSETGID,
syscall.S_IRUSR: DMREAD << 6,
syscall.S_IWUSR: DMWRITE << 6,
syscall.S_IXUSR: DMEXEC << 6,
syscall.S_IRGRP: DMREAD << 3,
syscall.S_IWGRP: DMWRITE << 3,
syscall.S_IXGRP: DMEXEC << 3,
syscall.S_IROTH: DMREAD,
syscall.S_IWOTH: DMWRITE,
syscall.S_IXOTH: DMEXEC,
}
var P9ModeToMode = map[uint32]uint32{
DMDIR: syscall.S_IFDIR,
DMSYMLINK: syscall.S_IFLNK,
DMSOCKET: syscall.S_IFSOCK,
DMDEVICE: syscall.S_IFCHR,
DMSETUID: syscall.S_ISUID,
DMSETGID: syscall.S_ISGID,
(DMREAD << 6): syscall.S_IRUSR,
(DMWRITE << 6): syscall.S_IWUSR,
(DMEXEC << 6): syscall.S_IXUSR,
(DMREAD << 3): syscall.S_IRGRP,
(DMWRITE << 3): syscall.S_IWGRP,
(DMEXEC << 3): syscall.S_IXGRP,
DMREAD: syscall.S_IROTH,
DMWRITE: syscall.S_IWOTH,
DMEXEC: syscall.S_IXOTH,
}
func (file *File) findPaths(fs *Fs, filepath string) {
// Figure out our write path first.
write_prefix := ""
write_backing_path := "."
for prefix, backing_path := range fs.Write {
if strings.HasPrefix(filepath, prefix) &&
len(prefix) > len(write_prefix) {
write_prefix = prefix
write_backing_path = backing_path
}
}
file.write_path = path.Join(
write_backing_path,
filepath[len(write_prefix):])
var stat syscall.Stat_t
err := syscall.Lstat(file.write_path, &stat)
if err == nil {
file.write_exists = true
file.write_deleted, _ = readdelattr(file.write_path)
if !file.write_deleted {
file.mode = stat.Mode
}
} else {
file.write_exists = false
file.write_deleted = false
}
// Figure out our read path.
read_prefix := write_prefix
read_backing_path := write_backing_path
file.read_exists = false
for prefix, backing_paths := range fs.Read {
if strings.HasPrefix(filepath, prefix) &&
(!file.read_exists ||
len(prefix) > len(read_prefix)) {
for _, backing_path := range backing_paths {
// Does this file exist?
test_path := path.Join(backing_path, filepath[len(prefix):])
err := syscall.Lstat(test_path, &stat)
if err == nil {
// Check if it's deleted.
// NOTE: If we can't read the extended
// attributes on this file, we can assume
// that it is not deleted.
deleted, _ := readdelattr(test_path)
if !deleted {
read_prefix = prefix
read_backing_path = backing_path
file.read_exists = true
if !file.write_deleted && !file.write_exists {
file.mode = stat.Mode
}
}
}
}
}
}
file.read_path = path.Join(
read_backing_path,
filepath[len(read_prefix):])
}
func (fs *Fs) lookup(path string) (*File, error) {
// Normalize path.
if len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
fs.filesLock.RLock()
file, ok := fs.files[path]
if ok {
atomic.AddInt32(&file.refs, 1)
fs.filesLock.RUnlock()
return file, nil
}
fs.filesLock.RUnlock()
// Create our new file object.
// This isn't in the hotpath, so we
// aren't blocking anyone else.
newfile, err := fs.NewFile(path)
// Escalate and create if necessary.
fs.filesLock.Lock()
file, ok = fs.files[path]
if ok {
// Race caught.
newfile.DecRef(fs, path)
atomic.AddInt32(&file.refs, 1)
fs.filesLock.Unlock()
return file, nil
}
if err != nil {
fs.filesLock.Unlock()
return nil, err
}
// Add the file.
// NOTE: We add the file synchronously to the
// LRU currently because otherwise race conditions
// related to removing the file become very complex.
fs.files[path] = newfile
fs.filesLock.Unlock()
return newfile, nil
}
func (fs *Fs) swapLru(i1 int, i2 int) {
older_file := fs.lru[i1]
fs.lru[i1] = fs.lru[i2]
fs.lru[i1].index = i1
fs.lru[i2] = older_file
fs.lru[i2].index = i2
}
func (fs *Fs) removeLru(file *File, lock bool) {
// This function will be called as an
// independent goroutine in order to remove
// a specific file (for example, on close)
// or it will be called as a subroutine from
// updateLru -- which is itself an synchronous
// update function.
if lock {
fs.lruLock.Lock()
defer fs.lruLock.Unlock()
}
// Shutdown all descriptors.
file.flush()
// Remove from our LRU.
if file.index != -1 {
if file.index == len(fs.lru)-1 {
// Just truncate.
fs.lru = fs.lru[0 : len(fs.lru)-1]
} else {
// Swap and run a bubble.
// This may end up recursing.
other_file := fs.lru[len(fs.lru)-1]
fs.swapLru(file.index, len(fs.lru)-1)
fs.lru = fs.lru[0 : len(fs.lru)-1]
fs.updateLru(other_file, false)
}
// Clear our LRU index.
file.index = -1
}
}
func (fs *Fs) updateLru(file *File, lock bool) {
if lock {
fs.lruLock.Lock()
defer fs.lruLock.Unlock()
file.used = time.Now()
if file.index == -1 {
fs.lru = append(fs.lru, file)
file.index = len(fs.lru) - 1
}
}
// Not in the LRU?
// This may be a stale update goroutine.
if file.index == -1 {
return
}
// Bubble up.
index := file.index
for index != 0 {
if file.used.Before(fs.lru[index/2].used) {
fs.swapLru(index, index/2)
index = index / 2
continue
}
break
}
// Bubble down.
for index*2 < len(fs.lru) {
if file.used.After(fs.lru[index*2].used) {
fs.swapLru(index, index*2)
index = index * 2
continue
}
if index*2+1 < len(fs.lru) && file.used.After(fs.lru[index*2+1].used) {
fs.swapLru(index, index*2+1)
index = index*2 + 1
continue
}
break
}
fs.flushLru()
}
func (fs *Fs) touchLru(file *File) {
if file.index == -1 {
// This needs to be done synchronously,
// to ensure that this file is in the LRU
// because we may have a remove() event.
fs.updateLru(file, true)
} else {
// We can do this update asynchronously.
go fs.updateLru(file, true)
}
}
func (fs *Fs) flushLru() {
// Are we over our limit?
// Schedule a removal. Note that this will end
// up recursing through updateLru() again, and
// may end up calling flushLru() again. So we
// don't need to check bounds, only one call.
if len(fs.lru) > int(fs.Fdlimit) {
fs.removeLru(fs.lru[0], false)
}
}
func (file *File) unlink() error {
// Remove whatever was there.
// NOTE: We will generally require the
// write lock to be held for this routine.
if file.write_deleted {
err := cleardelattr(file.write_path)
if err != nil {
return err
}
}
var stat syscall.Stat_t
err := syscall.Lstat(file.write_path, &stat)
if err == nil {
if stat.Mode&syscall.S_IFDIR != 0 {
err = syscall.Rmdir(file.write_path)
if err != nil {
return err
}
} else {
err = syscall.Unlink(file.write_path)
if err != nil {
return err
}
}
}
file.write_exists = false
file.write_deleted = false
return nil
}
func (file *File) remove(
fs *Fs,
path string) error {
file.RWMutex.Lock()
defer file.RWMutex.Unlock()
// Unlink what's there.
err := file.unlink()
if err != nil {
return err
}
// Make sure the parent exists.
err = file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// We need to have something we can record
// on. Even for files we record a directory,
// this later on packs may choose to make this
// into a tree and we need to be ready for that.
mode := (syscall.S_IFDIR | syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR)
err = syscall.Mkdir(file.write_path, uint32(mode))
if err != nil {
return err
}
// Mark this file as deleted.
err = setdelattr(file.write_path)
if err != nil {
return err
}
// We're deleted.
file.write_exists = true
file.write_deleted = true
return nil
}
func (file *File) exists() bool {
// Some file must exist.
return (!file.write_deleted &&
(file.read_exists || file.write_exists))
}
func (file *File) makeTree(
fs *Fs,
path string) error {
// Make all the super directories.
basedir, _ := filepath.Split(path)
if basedir != path {
parent, err := fs.lookup(basedir)
if err != nil {
return err
}
// The parent must have had
// a valid mode set at some point.
// We ignore this error, as this
// may actually return Eexist.
parent.create(fs, basedir, parent.mode)
parent.DecRef(fs, basedir)
}
return nil
}
func (file *File) create(
fs *Fs,
path string,
mode uint32) error {
file.RWMutex.Lock()
did_exist := file.exists()
if file.write_exists && !file.write_deleted {
file.RWMutex.Unlock()
return Eexist
}
// Save our mode.
file.mode = mode
// Is it a directory?
if file.mode&syscall.S_IFDIR != 0 {
if file.write_exists && file.write_deleted {
// Is it just marked deleted?
err := file.unlink()
if err != nil {
file.RWMutex.Unlock() | // Make sure the parent exists.
err := file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Make this directory.
err = syscall.Mkdir(file.write_path, mode)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Fill out type.
err = file.fillType(file.write_path)
if err != nil {
file.RWMutex.Unlock()
return err
}
// We now exist.
file.write_exists = true
file.RWMutex.Unlock()
} else {
// Make sure the parent exists.
err := file.makeTree(fs, path)
if err != nil {
file.RWMutex.Unlock()
return err
}
file.RWMutex.Unlock()
err = file.lockWrite(fs)
if err != nil {
return err
}
err = file.fillType(file.write_path)
if err != nil {
file.unlock()
return err
}
file.unlock()
}
if did_exist {
return Eexist
}
return nil
}
func (file *File) rename(
fs *Fs,
orig_path string,
new_path string) error {
fs.filesLock.Lock()
defer fs.filesLock.Unlock()
other_file, ok := fs.files[new_path]
if ok && other_file.exists() {
return Eexist
}
// Drop the original reference.
// (We've not replaced it atomically).
if other_file != nil {
defer other_file.DecRef(fs, "")
}
if file.write_exists && file.write_deleted {
// Is it just marked deleted?
err := file.unlink()
if err != nil {
return err
}
}
// Try the rename.
orig_read_path := file.read_path
orig_write_path := file.write_path
file.findPaths(fs, new_path)
err := syscall.Rename(orig_write_path, file.write_path)
if err != nil {
if err == syscall.EXDEV {
// TODO: The file cannot be renamed across file system.
// This is a simple matter of copying the file across when
// this happens. For now, we just return not implemented.
err = Enotimpl
}
file.read_path = orig_read_path
file.write_path = orig_write_path
return err
}
// We've moved this file.
// It didn't exist a moment ago, but it does now.
file.write_exists = true
file.write_deleted = false
// Update our fids.
// This is a bit messy, but since we are
// holding a writeLock on this file, this
// atomic should be reasonably atomic.
for _, fid := range fs.Pool {
if fid.file == file {
fid.Path = new_path
} else if other_file != nil && fid.file == other_file {
// Since we hold at least one reference
// to other_file, this should never trigger
// a full cleanup of other_file. It's safe
// to call DecRef here while locking the lock.
file.IncRef(fs)
fid.file = file
other_file.DecRef(fs, "")
}
}
// Perform the swaperoo.
fs.files[new_path] = file
delete(fs.files, orig_path)
// Ensure the original file is deleted.
// This is done at the very end, since there's
// really nothing we can do at this point. We
// even explicitly ignore the result. Ugh.
setdelattr(orig_write_path)
return nil
}
func (file *File) lockWrite(fs *Fs) error {
file.RWMutex.RLock()
if file.write_fd != -1 {
fs.touchLru(file)
return nil
}
// Escalate.
file.RWMutex.RUnlock()
file.RWMutex.Lock()
if file.write_fd != -1 {
// Race caught.
file.RWMutex.Unlock()
return file.lockWrite(fs)
}
// NOTE: All files are opened CLOEXEC.
mode := syscall.O_RDWR | syscall.O_CLOEXEC
var perm uint32
// Make sure the file exists.
if !file.write_exists || file.write_deleted {
// NOTE: It would be really great to handle
// all these writes as simply overlays and keep
// a map of all the sparse holes in the file.
// See above with the write_map, for now I'll
// leave this for future work.
if file.write_deleted {
// Remove the file.
file.unlink()
mode |= syscall.O_RDWR | syscall.O_CREAT
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
file.write_deleted = false
file.write_exists = true
} else if !file.read_exists {
// This is a fresh file.
// It doesn't exist in any read layer.
mode |= syscall.O_CREAT | syscall.O_RDWR
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
file.write_exists = true
} else {
// Not deleted && read_exists.
// We grab a memory map and write out
// a copy of the new file. This could
// be made much more efficient (per above).
data, err := ioutil.ReadFile(file.read_path)
if err != nil {
file.RWMutex.Unlock()
return err
}
perm |= syscall.S_IRUSR | syscall.S_IWUSR | syscall.S_IXUSR
err = ioutil.WriteFile(file.write_path, data, os.FileMode(perm))
if err != nil {
file.RWMutex.Unlock()
return err
}
file.write_exists = true
}
}
new_fd, err := syscall.Open(file.write_path, mode, perm)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Open successful.
file.write_fd = new_fd
// Flush the current readFD.
if file.read_fd != -1 {
syscall.Close(file.read_fd)
file.read_fd = -1
}
// Retry (for the RLock).
file.RWMutex.Unlock()
return file.lockWrite(fs)
}
func (file *File) lockRead(fs *Fs) error {
file.RWMutex.RLock()
if file.read_fd != -1 {
fs.touchLru(file)
return nil
}
// Escalate.
file.RWMutex.RUnlock()
file.RWMutex.Lock()
if file.read_fd != -1 {
// Race caught.
file.RWMutex.Unlock()
return file.lockRead(fs)
}
if file.write_fd != -1 {
// Use the same Fd.
// The close logic handles this.
file.read_fd = file.write_fd
file.RWMutex.Unlock()
return file.lockRead(fs)
}
// Okay, no write available.
// Let's open our read path.
new_fd, err := syscall.Open(file.read_path, syscall.O_RDONLY, 0)
if err != nil {
file.RWMutex.Unlock()
return err
}
// Open successful.
file.read_fd = new_fd
// Retry (for the RLock).
file.RWMutex.Unlock()
return file.lockRead(fs)
}
func (file *File) flush() {
file.RWMutex.Lock()
defer file.RWMutex.Unlock()
// Close the file if still opened.
if file.read_fd != -1 {
syscall.Close(file.read_fd)
}
// Close the write_fd if it's open
// (and it's unique).
if file.write_fd != -1 &&
file.write_fd != file.read_fd {
syscall.Close(file.write_fd)
}
file.read_fd = -1
file.write_fd = -1
}
func (file *File) dir(
name string,
locked bool) (*Dir, error) {
if locked {
file.RWMutex.RLock()
}
var stat_path string
if file.write_exists {
stat_path = file.write_path
} else {
stat_path = file.read_path
}
var stat syscall.Stat_t
err := syscall.Lstat(stat_path, &stat)
if locked {
file.RWMutex.RUnlock()
}
if err != nil {
return nil, err
}
dir := new(Dir)
dir.Type = 0 // Set below.
dir.Mode = 0 // Set below.
dir.Qid = file.Qid
dir.Dev = uint32(stat.Dev)
atim, _ := stat.Atim.Unix()
dir.Atime = uint32(atim)
mtim, _ := stat.Mtim.Unix()
dir.Mtime = uint32(mtim)
if stat.Mode&syscall.S_IFDIR != 0 {
dir.Length = 0
} else {
dir.Length = uint64(stat.Size)
}
dir.Name = name
dir.Uid = "root"
dir.Gid = "root"
dir.Muid = "root"
dir.Ext = ""
dir.Uidnum = stat.Uid
dir.Gidnum = stat.Gid
dir.Muidnum = stat.Uid
for mask, type_bit := range ModeToP9Type {
if stat.Mode&mask == mask {
dir.Type = dir.Type | type_bit
}
}
for mask, mode_bit := range ModeToP9Mode {
if stat.Mode&mask == mask {
dir.Mode = dir.Mode | mode_bit
}
}
// Read our symlink if available.
if dir.Type&QTSYMLINK != 0 || dir.Mode&DMSYMLINK != 0 {
dir.Ext, err = os.Readlink(stat_path)
if err != nil {
return nil, err
}
}
// Plan9 doesn't handle dir+symlink.
// We return just a raw symlink.
if dir.Type&QTDIR != 0 && dir.Type&QTSYMLINK != 0 {
dir.Type &= ^uint16(QTDIR)
}
if dir.Mode&DMDIR != 0 && dir.Mode&DMSYMLINK != 0 {
dir.Mode &= ^uint32(DMDIR)
}
return dir, nil
}
func (file *File) children(fs *Fs, dirpath string) ([]*Dir, error) {
child_set := make(map[string]bool)
gather_dir := func(realdir string) {
files, err := filepath.Glob(path.Join(realdir, "*"))
if err != nil {
return
}
for _, file := range files {
// This file exists somewhere.
child_set[path.Base(file)] = true
}
}
// We need to collect all possible matching paths.
// This has the potential to be a very long list.
for prefix, backing_path := range fs.Write {
if strings.HasPrefix(dirpath, prefix) {
gather_dir(path.Join(backing_path, dirpath[len(prefix):]))
}
}
for prefix, backing_paths := range fs.Read {
if strings.HasPrefix(dirpath, prefix) {
for _, backing_path := range backing_paths {
gather_dir(path.Join(backing_path, dirpath[len(prefix):]))
}
}
}
// We stat each of these files.
results := make([]*Dir, 0, len(child_set))
for name, _ := range child_set {
// Find this child.
child_path := path.Join(dirpath, name)
child, err := fs.lookup(child_path)
if err != nil {
if child != nil {
child.DecRef(fs, child_path)
}
return nil, err
}
// Deleted?
if !child.exists() {
child.DecRef(fs, child_path)
continue
}
// Get the stat.
child_dir, err := child.dir(name, true)
child.DecRef(fs, child_path)
if err != nil {
return nil, err
}
results = append(results, child_dir)
}
// We're good.
return results, nil
}
func (file *File) unlock() {
file.RWMutex.RUnlock()
}
func (file *File) IncRef(fs *Fs) {
fs.filesLock.RLock()
atomic.AddInt32(&file.refs, 1)
fs.filesLock.RUnlock()
}
func (file *File) DecRef(fs *Fs, path string) {
new_refs := atomic.AddInt32(&file.refs, -1)
if new_refs == 0 {
fs.filesLock.Lock()
if file.refs != 0 {
// Race condition caught.
fs.filesLock.Unlock()
return
}
// Remove this file.
if path != "" {
delete(fs.files, path)
}
fs.filesLock.Unlock()
// Ensure that file is removed from the LRU.
// This will be done asynchronously, and as a
// result all file descriptors will be closed.
go fs.removeLru(file, true)
}
}
func (file *File) fillType(path string) error {
// Figure out the type.
dir, err := file.dir(path, false)
if err != nil {
return err
}
// Get file type.
file.Qid.Type = uint8(dir.Type)
return nil
}
func (fs *Fs) NewFile(path string) (*File, error) {
file := new(File)
file.refs = 1
// Figure out the paths.
file.findPaths(fs, path)
// Clear our LRU index.
file.index = -1
// Reset our FDs.
file.read_fd = -1
file.write_fd = -1
file.Qid.Version = 0
file.Qid.Path = atomic.AddUint64(&fs.Fileid, 1)
if file.exists() {
return file, file.fillType(path)
}
return file, nil
} | return err
}
}
| random_line_split |
controller.rs | mod cli;
mod collect;
mod detector;
mod init;
mod reconcile_queue;
mod reconciler;
mod supervisor;
mod validate_api_server;
pub use self::{
collect::Collect,
reconciler::{ReconcileContext, ReconcileStatus},
};
use self::collect::ControllerDescriptionCollector;
use crate::controller::reconcile_queue::QueueConfig;
use anyhow::Context as _;
use async_trait::async_trait;
use futures::future::FutureExt;
use k8s_openapi::{
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
apimachinery::pkg::apis::meta::v1::OwnerReference,
};
use kube::api::{Api, ApiResource, DynamicObject, Resource, ResourceExt};
use serde::de::DeserializeOwned;
use std::{sync::Arc, time::Duration};
/// Type, wrapping several controllers and providing all
/// required infrastructure for them to work.
pub struct ControllerManager {
controllers: Vec<DynController>,
}
impl ControllerManager {
pub fn new() -> Self {
ControllerManager {
controllers: Vec::new(),
}
}
/// Adds a controller
/// # Panics
/// Panics if `<C as Controller>::describe` is incorrect
pub fn add<C: Controller>(&mut self) {
let collector = ControllerDescriptionCollector::new();
C::describe(&collector);
let meta = collector.finalize();
let vtable = ControllerVtable::of::<C>();
let dc = DynController { meta, vtable };
dc.validate();
self.controllers.push(dc);
}
/// Controller manger entry point.
///
/// This function parses command line arguments,
/// launches web server and serves to completion
#[tracing::instrument(skip(self))]
pub async fn main(self) -> anyhow::Result<()> {
let args: cli::Args = clap::Clap::parse();
tracing::info!(args = ?args, "parsed command-line arguments");
match args {
cli::Args::List => self.print_list(),
cli::Args::Run(args) => self.run(args).await?,
cli::Args::PrintCustomResources => self.crd_print(),
cli::Args::ApplyCustomResources => self.crd_apply().await?,
}
Ok(())
}
fn print_list(self) {
println!("Supported controllers:");
for c in &self.controllers {
let crd_info = if let Some(crd) = c.meta.crd.as_ref() {
format!(
"(Custom Resource: {}/{})",
crd.name(),
(c.vtable.api_resource)().version
)
} else {
"".to_string()
};
println!("\t{}{}", c.meta.name, crd_info);
}
}
fn crd_print(self) {
for c in &self.controllers {
if let Some(crd) = c.meta.crd.as_ref() {
let crd = serde_yaml::to_string(&crd).expect("failed to serialize CRD");
print!("{}", crd);
}
}
}
async fn crd_apply(self) -> anyhow::Result<()> {
let k = kube::Client::try_default()
.await
.context("failed to connect to cluster")?;
let crd_api = Api::<CustomResourceDefinition>::all(k);
for c in &self.controllers {
if let Some(crd) = c.meta.crd.as_ref() {
println!("Reconciling crd {}", crd.name());
match crd_api.get(&crd.name()).await {
Ok(existing) => {
let report = crate::crds::is_subset_of(&existing, &crd, false);
report
.into_result()
.context("Error: can not safely replace existing crd")?;
println!("Updating crd {}", crd.name());
let mut crd = crd.clone();
crd.meta_mut().resource_version = existing.resource_version();
crd_api
.replace(&crd.name(), &Default::default(), &crd)
.await?;
}
Err(err)
if crate::errors::classify_kube(&err)
== crate::errors::ErrorClass::NotFound =>
{
println!("Creating crd {}", crd.name());
crd_api.create(&Default::default(), &crd).await?;
}
Err(err) => {
return Err(err).context("failed to get existing CRD");
}
}
}
}
Ok(())
}
#[tracing::instrument(skip(self, args))]
async fn run(self, args: cli::Run) -> anyhow::Result<()> {
let enabled_controllers = {
let controllers = self
.controllers
.iter()
.map(|c| c.meta.name.clone())
.collect::<Vec<_>>();
init::process_controller_filters(&controllers, &args.controllers)?
};
tracing::info!(enabled_controllers = ?enabled_controllers, "Selected controllers to run");
tracing::info!("Connecting to Kubernetes");
let client = kube::Client::try_default()
.await
.context("Failed to connect to kubernetes API")?;
tracing::info!("Starting version skew checker");
let version_skew_check_fut = {
let client = client.clone();
async move {
loop {
let sleep_timeout =
match validate_api_server::check_api_server_version(&client).await {
Ok(_) => 3600,
Err(e) => |
};
tokio::time::sleep(Duration::from_secs(sleep_timeout)).await;
}
}
};
tokio::task::spawn(version_skew_check_fut);
//tracing::info!("Discovering cluster APIs");
//let discovery = Arc::new(Discovery::new(&client).await?);
let watcher_set = crate::multiwatch::WatcherSet::new(client.clone());
let watcher_set = Arc::new(watcher_set);
let mut supervised = Vec::new();
for controller in enabled_controllers {
let dc = self
.controllers
.iter()
.find(|c| c.meta.name == controller)
.unwrap()
.clone();
let cfg = QueueConfig {
throttle: Duration::from_secs(3),
};
let ctl = supervisor::supervise(dc, watcher_set.clone(), client.clone(), cfg);
supervised.push(ctl);
}
{
let mut cancel = Vec::new();
for ctl in &supervised {
cancel.push(ctl.get_cancellation_token());
}
tokio::task::spawn(async move {
tracing::info!("Waiting for termination signal");
match tokio::signal::ctrl_c().await {
Ok(_) => {
tracing::info!("Got termination signal");
for c in cancel {
c.cancel();
}
}
Err(e) => {
tracing::warn!("Failed to wait for termination signal: {:#}", e);
}
}
});
}
tracing::info!("Waiting for supervisors exit");
for ctl in supervised {
ctl.wait().await;
}
Ok(())
}
}
/// Description of a controller
#[derive(Clone)]
struct ControllerDescription {
name: String,
crd: Option<CustomResourceDefinition>,
watches: Vec<ApiResource>,
}
#[derive(Clone)]
pub(crate) struct DynController {
meta: ControllerDescription,
vtable: ControllerVtable,
}
impl DynController {
fn validate(&self) {
if let Some(crd) = self.meta.crd.as_ref() {
let res = (self.vtable.api_resource)();
assert_eq!(crd.spec.names.plural, res.plural);
assert_eq!(crd.spec.names.kind, res.kind);
assert_eq!(crd.spec.group, res.group);
let has_version = crd.spec.versions.iter().any(|ver| ver.name == res.version);
assert!(has_version);
}
}
}
#[derive(Clone)]
struct ControllerVtable {
api_resource: fn() -> ApiResource,
reconcile: fn(
DynamicObject,
cx: &mut ReconcileContext,
) -> futures::future::BoxFuture<'_, anyhow::Result<ReconcileStatus>>,
}
impl ControllerVtable {
fn of<C: Controller>() -> Self {
ControllerVtable {
api_resource: || ApiResource::erase::<C::Resource>(&C::resource_dynamic_type()),
reconcile: |obj, cx| {
async move {
// TODO: debug this
let obj = serde_json::to_string(&obj).unwrap();
let obj =
serde_json::from_str(&obj).context("failed to parse DynamicObject")?;
C::reconcile(cx, obj).await
}
.boxed()
},
}
}
}
pub fn make_owner_reference<Owner: Resource>(
owner: &Owner,
dt: &Owner::DynamicType,
) -> OwnerReference {
OwnerReference {
api_version: Owner::api_version(dt).to_string(),
block_owner_deletion: None,
controller: Some(true),
kind: Owner::kind(dt).to_string(),
name: owner.name(),
uid: owner.uid().expect("missing uid on persisted object"),
}
}
pub fn downcast_dynamic_object<K: Resource<DynamicType = ()> + DeserializeOwned>(
obj: &DynamicObject,
) -> anyhow::Result<K> {
let obj = serde_json::to_value(obj)?;
let obj = serde_json::from_value(obj)?;
Ok(obj)
}
/// Trait, implemented by a controller
#[async_trait]
pub trait Controller {
/// Resource which manages the controller behavior
/// (e.g. Deployment for deployment controller)
type Resource: Resource + DeserializeOwned + Send;
/// Additional data for dynamic types
fn resource_dynamic_type() -> <Self::Resource as Resource>::DynamicType;
/// Reports some information to given collector
fn describe<C: Collect>(collector: &C);
/// Reconciles single object
async fn reconcile(
cx: &mut ReconcileContext,
resource: Self::Resource,
) -> anyhow::Result<ReconcileStatus>;
}
| {
tracing::warn!("Failed to validate api server version: {:#}", e);
10
} | conditional_block |
controller.rs | mod cli;
mod collect;
mod detector;
mod init;
mod reconcile_queue;
mod reconciler;
mod supervisor;
mod validate_api_server;
pub use self::{
collect::Collect,
reconciler::{ReconcileContext, ReconcileStatus},
};
use self::collect::ControllerDescriptionCollector;
use crate::controller::reconcile_queue::QueueConfig;
use anyhow::Context as _;
use async_trait::async_trait;
use futures::future::FutureExt;
use k8s_openapi::{
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
apimachinery::pkg::apis::meta::v1::OwnerReference,
};
use kube::api::{Api, ApiResource, DynamicObject, Resource, ResourceExt};
use serde::de::DeserializeOwned;
use std::{sync::Arc, time::Duration};
/// Type, wrapping several controllers and providing all
/// required infrastructure for them to work.
pub struct ControllerManager {
controllers: Vec<DynController>,
}
impl ControllerManager {
pub fn new() -> Self {
ControllerManager {
controllers: Vec::new(),
}
}
/// Adds a controller
/// # Panics
/// Panics if `<C as Controller>::describe` is incorrect
pub fn add<C: Controller>(&mut self) {
let collector = ControllerDescriptionCollector::new();
C::describe(&collector);
let meta = collector.finalize();
let vtable = ControllerVtable::of::<C>();
let dc = DynController { meta, vtable };
dc.validate();
self.controllers.push(dc);
}
/// Controller manger entry point.
///
/// This function parses command line arguments,
/// launches web server and serves to completion
#[tracing::instrument(skip(self))]
pub async fn main(self) -> anyhow::Result<()> {
let args: cli::Args = clap::Clap::parse();
tracing::info!(args = ?args, "parsed command-line arguments");
match args {
cli::Args::List => self.print_list(),
cli::Args::Run(args) => self.run(args).await?,
cli::Args::PrintCustomResources => self.crd_print(),
cli::Args::ApplyCustomResources => self.crd_apply().await?,
}
Ok(())
}
fn print_list(self) {
println!("Supported controllers:");
for c in &self.controllers {
let crd_info = if let Some(crd) = c.meta.crd.as_ref() {
format!(
"(Custom Resource: {}/{})",
crd.name(),
(c.vtable.api_resource)().version
)
} else {
"".to_string()
};
println!("\t{}{}", c.meta.name, crd_info);
}
}
fn crd_print(self) {
for c in &self.controllers {
if let Some(crd) = c.meta.crd.as_ref() {
let crd = serde_yaml::to_string(&crd).expect("failed to serialize CRD");
print!("{}", crd);
}
}
}
async fn crd_apply(self) -> anyhow::Result<()> {
let k = kube::Client::try_default()
.await
.context("failed to connect to cluster")?;
let crd_api = Api::<CustomResourceDefinition>::all(k);
for c in &self.controllers {
if let Some(crd) = c.meta.crd.as_ref() {
println!("Reconciling crd {}", crd.name());
match crd_api.get(&crd.name()).await {
Ok(existing) => {
let report = crate::crds::is_subset_of(&existing, &crd, false);
report
.into_result()
.context("Error: can not safely replace existing crd")?;
println!("Updating crd {}", crd.name());
let mut crd = crd.clone();
crd.meta_mut().resource_version = existing.resource_version();
crd_api
.replace(&crd.name(), &Default::default(), &crd)
.await?;
}
Err(err)
if crate::errors::classify_kube(&err)
== crate::errors::ErrorClass::NotFound =>
{
println!("Creating crd {}", crd.name());
crd_api.create(&Default::default(), &crd).await?;
}
Err(err) => {
return Err(err).context("failed to get existing CRD");
}
}
}
}
Ok(())
}
#[tracing::instrument(skip(self, args))]
async fn run(self, args: cli::Run) -> anyhow::Result<()> {
let enabled_controllers = {
let controllers = self
.controllers
.iter()
.map(|c| c.meta.name.clone())
.collect::<Vec<_>>();
init::process_controller_filters(&controllers, &args.controllers)?
};
tracing::info!(enabled_controllers = ?enabled_controllers, "Selected controllers to run");
tracing::info!("Connecting to Kubernetes");
let client = kube::Client::try_default()
.await
.context("Failed to connect to kubernetes API")?;
tracing::info!("Starting version skew checker");
let version_skew_check_fut = {
let client = client.clone();
async move {
loop {
let sleep_timeout =
match validate_api_server::check_api_server_version(&client).await {
Ok(_) => 3600,
Err(e) => {
tracing::warn!("Failed to validate api server version: {:#}", e);
10
}
};
tokio::time::sleep(Duration::from_secs(sleep_timeout)).await;
}
}
};
tokio::task::spawn(version_skew_check_fut);
//tracing::info!("Discovering cluster APIs");
//let discovery = Arc::new(Discovery::new(&client).await?);
let watcher_set = crate::multiwatch::WatcherSet::new(client.clone());
let watcher_set = Arc::new(watcher_set);
let mut supervised = Vec::new();
for controller in enabled_controllers {
let dc = self
.controllers
.iter()
.find(|c| c.meta.name == controller)
.unwrap()
.clone();
let cfg = QueueConfig {
throttle: Duration::from_secs(3),
};
let ctl = supervisor::supervise(dc, watcher_set.clone(), client.clone(), cfg);
supervised.push(ctl);
}
{
let mut cancel = Vec::new();
for ctl in &supervised {
cancel.push(ctl.get_cancellation_token());
}
tokio::task::spawn(async move {
tracing::info!("Waiting for termination signal");
match tokio::signal::ctrl_c().await {
Ok(_) => {
tracing::info!("Got termination signal");
for c in cancel {
c.cancel();
}
}
Err(e) => {
tracing::warn!("Failed to wait for termination signal: {:#}", e);
}
}
});
}
tracing::info!("Waiting for supervisors exit");
for ctl in supervised {
ctl.wait().await;
}
Ok(())
}
}
/// Description of a controller
#[derive(Clone)]
struct ControllerDescription {
name: String,
crd: Option<CustomResourceDefinition>,
watches: Vec<ApiResource>,
}
#[derive(Clone)]
pub(crate) struct DynController {
meta: ControllerDescription,
vtable: ControllerVtable,
}
impl DynController {
fn | (&self) {
if let Some(crd) = self.meta.crd.as_ref() {
let res = (self.vtable.api_resource)();
assert_eq!(crd.spec.names.plural, res.plural);
assert_eq!(crd.spec.names.kind, res.kind);
assert_eq!(crd.spec.group, res.group);
let has_version = crd.spec.versions.iter().any(|ver| ver.name == res.version);
assert!(has_version);
}
}
}
#[derive(Clone)]
struct ControllerVtable {
api_resource: fn() -> ApiResource,
reconcile: fn(
DynamicObject,
cx: &mut ReconcileContext,
) -> futures::future::BoxFuture<'_, anyhow::Result<ReconcileStatus>>,
}
impl ControllerVtable {
fn of<C: Controller>() -> Self {
ControllerVtable {
api_resource: || ApiResource::erase::<C::Resource>(&C::resource_dynamic_type()),
reconcile: |obj, cx| {
async move {
// TODO: debug this
let obj = serde_json::to_string(&obj).unwrap();
let obj =
serde_json::from_str(&obj).context("failed to parse DynamicObject")?;
C::reconcile(cx, obj).await
}
.boxed()
},
}
}
}
pub fn make_owner_reference<Owner: Resource>(
owner: &Owner,
dt: &Owner::DynamicType,
) -> OwnerReference {
OwnerReference {
api_version: Owner::api_version(dt).to_string(),
block_owner_deletion: None,
controller: Some(true),
kind: Owner::kind(dt).to_string(),
name: owner.name(),
uid: owner.uid().expect("missing uid on persisted object"),
}
}
pub fn downcast_dynamic_object<K: Resource<DynamicType = ()> + DeserializeOwned>(
obj: &DynamicObject,
) -> anyhow::Result<K> {
let obj = serde_json::to_value(obj)?;
let obj = serde_json::from_value(obj)?;
Ok(obj)
}
/// Trait, implemented by a controller
#[async_trait]
pub trait Controller {
/// Resource which manages the controller behavior
/// (e.g. Deployment for deployment controller)
type Resource: Resource + DeserializeOwned + Send;
/// Additional data for dynamic types
fn resource_dynamic_type() -> <Self::Resource as Resource>::DynamicType;
/// Reports some information to given collector
fn describe<C: Collect>(collector: &C);
/// Reconciles single object
async fn reconcile(
cx: &mut ReconcileContext,
resource: Self::Resource,
) -> anyhow::Result<ReconcileStatus>;
}
| validate | identifier_name |
controller.rs | mod cli;
mod collect;
mod detector;
mod init;
mod reconcile_queue;
mod reconciler;
mod supervisor;
mod validate_api_server;
pub use self::{
collect::Collect,
reconciler::{ReconcileContext, ReconcileStatus},
};
use self::collect::ControllerDescriptionCollector;
use crate::controller::reconcile_queue::QueueConfig;
use anyhow::Context as _;
use async_trait::async_trait;
use futures::future::FutureExt;
use k8s_openapi::{
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
apimachinery::pkg::apis::meta::v1::OwnerReference,
};
use kube::api::{Api, ApiResource, DynamicObject, Resource, ResourceExt};
use serde::de::DeserializeOwned;
use std::{sync::Arc, time::Duration};
/// Type, wrapping several controllers and providing all
/// required infrastructure for them to work.
pub struct ControllerManager {
controllers: Vec<DynController>,
}
impl ControllerManager {
pub fn new() -> Self {
ControllerManager {
controllers: Vec::new(),
}
}
/// Adds a controller
/// # Panics
/// Panics if `<C as Controller>::describe` is incorrect
pub fn add<C: Controller>(&mut self) {
let collector = ControllerDescriptionCollector::new();
C::describe(&collector);
let meta = collector.finalize();
let vtable = ControllerVtable::of::<C>();
let dc = DynController { meta, vtable };
dc.validate();
self.controllers.push(dc);
}
/// Controller manger entry point.
///
/// This function parses command line arguments,
/// launches web server and serves to completion
#[tracing::instrument(skip(self))]
pub async fn main(self) -> anyhow::Result<()> {
let args: cli::Args = clap::Clap::parse();
tracing::info!(args = ?args, "parsed command-line arguments");
match args {
cli::Args::List => self.print_list(),
cli::Args::Run(args) => self.run(args).await?,
cli::Args::PrintCustomResources => self.crd_print(),
cli::Args::ApplyCustomResources => self.crd_apply().await?,
}
Ok(())
}
fn print_list(self) {
println!("Supported controllers:");
for c in &self.controllers {
let crd_info = if let Some(crd) = c.meta.crd.as_ref() {
format!(
"(Custom Resource: {}/{})",
crd.name(),
(c.vtable.api_resource)().version
)
} else {
"".to_string()
};
println!("\t{}{}", c.meta.name, crd_info);
}
}
fn crd_print(self) {
for c in &self.controllers {
if let Some(crd) = c.meta.crd.as_ref() {
let crd = serde_yaml::to_string(&crd).expect("failed to serialize CRD");
print!("{}", crd);
}
}
}
async fn crd_apply(self) -> anyhow::Result<()> {
let k = kube::Client::try_default()
.await
.context("failed to connect to cluster")?;
let crd_api = Api::<CustomResourceDefinition>::all(k);
for c in &self.controllers {
if let Some(crd) = c.meta.crd.as_ref() {
println!("Reconciling crd {}", crd.name());
match crd_api.get(&crd.name()).await {
Ok(existing) => {
let report = crate::crds::is_subset_of(&existing, &crd, false);
report
.into_result()
.context("Error: can not safely replace existing crd")?;
println!("Updating crd {}", crd.name());
let mut crd = crd.clone();
crd.meta_mut().resource_version = existing.resource_version();
crd_api
.replace(&crd.name(), &Default::default(), &crd)
.await?;
}
Err(err)
if crate::errors::classify_kube(&err)
== crate::errors::ErrorClass::NotFound =>
{
println!("Creating crd {}", crd.name());
crd_api.create(&Default::default(), &crd).await?;
}
Err(err) => {
return Err(err).context("failed to get existing CRD");
}
}
}
}
Ok(())
}
#[tracing::instrument(skip(self, args))]
async fn run(self, args: cli::Run) -> anyhow::Result<()> {
let enabled_controllers = {
let controllers = self
.controllers
.iter()
.map(|c| c.meta.name.clone())
.collect::<Vec<_>>();
init::process_controller_filters(&controllers, &args.controllers)?
};
tracing::info!(enabled_controllers = ?enabled_controllers, "Selected controllers to run");
tracing::info!("Connecting to Kubernetes");
let client = kube::Client::try_default()
.await
.context("Failed to connect to kubernetes API")?;
tracing::info!("Starting version skew checker");
let version_skew_check_fut = {
let client = client.clone();
async move {
loop {
let sleep_timeout =
match validate_api_server::check_api_server_version(&client).await {
Ok(_) => 3600,
Err(e) => {
tracing::warn!("Failed to validate api server version: {:#}", e);
10
}
};
tokio::time::sleep(Duration::from_secs(sleep_timeout)).await;
}
}
};
tokio::task::spawn(version_skew_check_fut);
//tracing::info!("Discovering cluster APIs");
//let discovery = Arc::new(Discovery::new(&client).await?);
let watcher_set = crate::multiwatch::WatcherSet::new(client.clone());
let watcher_set = Arc::new(watcher_set);
let mut supervised = Vec::new();
for controller in enabled_controllers {
let dc = self
.controllers
.iter()
.find(|c| c.meta.name == controller)
.unwrap()
.clone();
let cfg = QueueConfig {
throttle: Duration::from_secs(3),
};
let ctl = supervisor::supervise(dc, watcher_set.clone(), client.clone(), cfg);
supervised.push(ctl);
}
{
let mut cancel = Vec::new();
for ctl in &supervised {
cancel.push(ctl.get_cancellation_token());
}
tokio::task::spawn(async move {
tracing::info!("Waiting for termination signal");
match tokio::signal::ctrl_c().await {
Ok(_) => {
tracing::info!("Got termination signal");
for c in cancel {
c.cancel();
}
}
Err(e) => {
tracing::warn!("Failed to wait for termination signal: {:#}", e);
}
}
});
}
tracing::info!("Waiting for supervisors exit");
for ctl in supervised {
ctl.wait().await; | }
Ok(())
}
}
/// Description of a controller
#[derive(Clone)]
struct ControllerDescription {
name: String,
crd: Option<CustomResourceDefinition>,
watches: Vec<ApiResource>,
}
#[derive(Clone)]
pub(crate) struct DynController {
meta: ControllerDescription,
vtable: ControllerVtable,
}
impl DynController {
fn validate(&self) {
if let Some(crd) = self.meta.crd.as_ref() {
let res = (self.vtable.api_resource)();
assert_eq!(crd.spec.names.plural, res.plural);
assert_eq!(crd.spec.names.kind, res.kind);
assert_eq!(crd.spec.group, res.group);
let has_version = crd.spec.versions.iter().any(|ver| ver.name == res.version);
assert!(has_version);
}
}
}
#[derive(Clone)]
struct ControllerVtable {
api_resource: fn() -> ApiResource,
reconcile: fn(
DynamicObject,
cx: &mut ReconcileContext,
) -> futures::future::BoxFuture<'_, anyhow::Result<ReconcileStatus>>,
}
impl ControllerVtable {
fn of<C: Controller>() -> Self {
ControllerVtable {
api_resource: || ApiResource::erase::<C::Resource>(&C::resource_dynamic_type()),
reconcile: |obj, cx| {
async move {
// TODO: debug this
let obj = serde_json::to_string(&obj).unwrap();
let obj =
serde_json::from_str(&obj).context("failed to parse DynamicObject")?;
C::reconcile(cx, obj).await
}
.boxed()
},
}
}
}
pub fn make_owner_reference<Owner: Resource>(
owner: &Owner,
dt: &Owner::DynamicType,
) -> OwnerReference {
OwnerReference {
api_version: Owner::api_version(dt).to_string(),
block_owner_deletion: None,
controller: Some(true),
kind: Owner::kind(dt).to_string(),
name: owner.name(),
uid: owner.uid().expect("missing uid on persisted object"),
}
}
pub fn downcast_dynamic_object<K: Resource<DynamicType = ()> + DeserializeOwned>(
obj: &DynamicObject,
) -> anyhow::Result<K> {
let obj = serde_json::to_value(obj)?;
let obj = serde_json::from_value(obj)?;
Ok(obj)
}
/// Trait, implemented by a controller
#[async_trait]
pub trait Controller {
/// Resource which manages the controller behavior
/// (e.g. Deployment for deployment controller)
type Resource: Resource + DeserializeOwned + Send;
/// Additional data for dynamic types
fn resource_dynamic_type() -> <Self::Resource as Resource>::DynamicType;
/// Reports some information to given collector
fn describe<C: Collect>(collector: &C);
/// Reconciles single object
async fn reconcile(
cx: &mut ReconcileContext,
resource: Self::Resource,
) -> anyhow::Result<ReconcileStatus>;
} | random_line_split | |
fileopenmodeenum.go | package pathfileops
import (
"fmt"
"os"
"reflect"
"strings"
)
// mFileOpenModeIntToString - This map is used to map enumeration values
// to enumeration names stored as strings for Type FileOpenMode.
var mFileOpenModeIntToString = map[int]string{}
// mFileOpenModeStringToInt - This map is used to map enumeration names
// stored as strings to enumeration values for Type FileOpenMode.
var mFileOpenModeStringToInt = map[string]int{}
// mFileOpenModeLwrCaseStringToInt - This map is used to map enumeration names
// stored as lower case strings to enumeration values for Type FileOpenMode.
// This map is used for case insensitive look ups.
var mFileOpenModeLwrCaseStringToInt = map[string]int{}
// FileOpenMode - To further control the file open operation, one
// or more FileOpenMode values may be or'd with a FileOpenType
// code in order to control behavior.
//
// In addition, one of the three codes may be or'd with
// zero or more of the following File Open Modes (Type: 'FileOpenMode')
// to better control file open behavior.
//
// FileOpenMode has been adapted to function as an enumeration of valid
// File Open Mode values. Since Go does not directly support enumerations,
// the 'FileOpenMode' has been configured to function in a manner similar
// to classic enumerations found in other languages like C#. For additional
// information, reference:
//
// Jeffrey Richter Using Reflection to implement enumerated types
// https://www.youtube.com/watch?v=DyXJy_0v0_U
//
//
// These FileOpenMode methods used as enumerators for os mode constants:
//
// FileOpenMode(0).ModeNone()
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeTypeCreate()
// FileOpenMode(0).ModeExclusive()
// FileOpenMode(0).ModeSync()
// FileOpenMode(0).ModeTruncate()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
// The FileOpenType type is used in conjunction with FileOpenMode to specify
// file permissions. Reference the 'FileOpenType' in this 'pathfileops' package.
// The methods used to specify File Open Types are listed as follows:
//
// FileOpenType(0).TypeReadOnly()
// FileOpenType(0).TypeWriteOnly()
// FileOpenType(0).TypeReadWrite()
// | // None - No File Open Mode is active
func (fOpenMode FileOpenMode) ModeNone() FileOpenMode { return FileOpenMode(-1) }
// Append - append data to the file when writing.
func (fOpenMode FileOpenMode) ModeAppend() FileOpenMode { return FileOpenMode(os.O_APPEND) }
// Create - create a new file if none exists.
func (fOpenMode FileOpenMode) ModeCreate() FileOpenMode { return FileOpenMode(os.O_CREATE) }
// Exclusive - used with FileOpenControlMode(0).Create(), file must not exist.
func (fOpenMode FileOpenMode) ModeExclusive() FileOpenMode { return FileOpenMode(os.O_EXCL) }
// Sync - open for synchronous I/O.
func (fOpenMode FileOpenMode) ModeSync() FileOpenMode { return FileOpenMode(os.O_SYNC) }
// Truncate - if possible, truncate file when opened.
func (fOpenMode FileOpenMode) ModeTruncate() FileOpenMode { return FileOpenMode(os.O_TRUNC) }
// IsValid - If the value of the current FileOpenMode is 'invalid',
// this method will return an error. If the FileOpenMode is 'valid',
// this method will return a value of 'nil'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
func (fOpenMode FileOpenMode) IsValid() error {
fOpenMode.checkInitializeMaps(false)
_, ok := mFileOpenModeIntToString[int(fOpenMode)]
if !ok {
ePrefix := "FileOpenMode.IsValid() "
return fmt.Errorf(ePrefix+
"Error: Ivalid FileOpenMode! Current FileOpenMode='%v'",
fOpenMode)
}
return nil
}
// ParseString - Receives a string and attempts to match it with
// the string value of a supported enumeration. If successful, a
// new instance of FileOpenMode is returned set to the value of the
// associated enumeration.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// valueString string - A string which will be matched against the
// enumeration string values. If 'valueString'
// is equal to one of the enumeration names, this
// method will proceed to successful completion
//
// caseSensitive bool - If 'true' the search for enumeration names
// will be case sensitive and will require an
// exact match. Therefore, 'append' will NOT
// match the enumeration name, 'Append'.
//
// If 'false' a case insensitive search is conducted
// for the enumeration name. In this case, 'append'
// will match match enumeration name 'Append'.
//
// ------------------------------------------------------------------------
//
// Return Values
//
// FileOpenMode - Upon successful completion, this method will return a new
// instance of FileOpenMode set to the value of the enumeration
// matched by the string search performed on input parameter,
// 'valueString'.
//
// error - If this method completes successfully, the returned error
// Type is set equal to 'nil'. If an error condition is encountered,
// this method will return an error Type which encapsulates an
// appropriate error message.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t, err := FileOpenMode(0).ParseString("Append", true)
// OR
// t, err := FileOpenMode(0).ParseString("ModeAppend", true)
// OR
// t, err := FileOpenMode(0).ParseString("ModeAppend()", true)
// OR
// t, err := FileOpenMode(0).ParseString("Append()", true)
// OR
// t, err := FileOpenMode(0).ParseString("append", false)
//
// In any case shown above, t is now equal to FileOpenMode(0).Append()
//
func (fOpenMode FileOpenMode) ParseString(
valueString string,
caseSensitive bool) (FileOpenMode, error) {
ePrefix := "FileOpenMode.ParseString() "
fOpenMode.checkInitializeMaps(false)
result := FileOpenMode(0)
lenValueStr := len(valueString)
if strings.HasSuffix(valueString, "()") {
valueString = valueString[0 : lenValueStr-2]
lenValueStr -= 2
}
if lenValueStr < 3 {
return result,
fmt.Errorf(ePrefix+
"Input parameter 'valueString' is INVALID! valueString='%v' ", valueString)
}
var ok bool
var idx int
if caseSensitive {
if !strings.HasPrefix(valueString, "Mode") {
valueString = "Mode" + valueString
}
idx, ok = mFileOpenModeStringToInt[valueString]
if !ok {
return FileOpenMode(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' ", valueString)
}
result = FileOpenMode(idx)
} else {
valueString = strings.ToLower(valueString)
if !strings.HasPrefix(valueString, "mode") {
valueString = "mode" + valueString
}
idx, ok = mFileOpenModeLwrCaseStringToInt[valueString]
if !ok {
return FileOpenMode(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' ", valueString)
}
result =
FileOpenMode(idx)
}
return result, nil
}
// String - Returns a string with the name of the enumeration associated
// with this instance of 'FileOpenMode'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
// ------------------------------------------------------------------------
//
// Return Value:
//
// string - The string label or description for the current enumeration
// value. If, the FileOpenMode value is invalid, this method will
// return an empty string.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t:= FileOpenMode(0).ModeAppend()
// str := t.String()
// str is now equal to 'ModeAppend'
//
func (fOpenMode FileOpenMode) String() string {
fOpenMode.checkInitializeMaps(false)
str, ok := mFileOpenModeIntToString[int(fOpenMode)]
if !ok {
return ""
}
return str
}
// Value - This is a utility method which is not part of the
// enumerations supported by this type. It returns the numeric
// value of the enumeration associated with the current FileOpenMode
// instance.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
func (fOpenMode FileOpenMode) Value() int {
return int(fOpenMode)
}
// checkInitializeMaps - String and value comparisons performed on enumerations
// supported by this Type, utilizes a series of 3-map types. These maps are used
// internally to perform 'string to value' or 'value to string' look ups on
// enumerations supported by this type. Each time FileOpenMode.String() or
// FileOpenMode.ParseString() a call is made to this method to determine if
// these maps have been initialized. If the maps and look up data have been
// properly initialized and indexed, this method returns without taking action.
//
// On the other hand, if the maps have not yet been initialized, this method will
// initialize all associated map slices.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// reInitialize bool - If 'true', this will force initialization of
// all associated maps.
//
func (fOpenMode FileOpenMode) checkInitializeMaps(reInitialize bool) {
if !reInitialize &&
mFileOpenModeIntToString != nil &&
len(mFileOpenModeIntToString) > 5 &&
mFileOpenModeStringToInt != nil &&
len(mFileOpenModeStringToInt) > 5 &&
mFileOpenModeLwrCaseStringToInt != nil &&
len(mFileOpenModeLwrCaseStringToInt) > 5 {
return
}
var t = FOpenMode.ModeAppend()
mFileOpenModeIntToString = make(map[int]string, 0)
mFileOpenModeStringToInt = make(map[string]int, 0)
mFileOpenModeLwrCaseStringToInt = make(map[string]int, 0)
s := reflect.TypeOf(t)
r := reflect.TypeOf(0) // int
args := [1]reflect.Value{reflect.Zero(s)}
for i := 0; i < s.NumMethod(); i++ {
f := s.Method(i).Name
if f == "String" ||
f == "ParseString" ||
f == "Value" ||
f == "IsValid" ||
f == "checkInitializeMaps" {
continue
}
value := s.Method(i).Func.Call(args[:])[0].Convert(r).Int()
x := int(value)
mFileOpenModeIntToString[x] = f
mFileOpenModeStringToInt[f] = x
mFileOpenModeLwrCaseStringToInt[strings.ToLower(f)] = x
}
}
// FOpenMode - This public global variable allows
// easy access to the enumerations of the FileOpenMode
// using the dot operator.
//
// Example:
//
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeCreate()
// FileOpenMode(0).ModeExclusive()
//
var FOpenMode = FileOpenMode(0) | // Reference CONSTANTS: https://golang.org/pkg/os/
//
type FileOpenMode int
| random_line_split |
fileopenmodeenum.go | package pathfileops
import (
"fmt"
"os"
"reflect"
"strings"
)
// mFileOpenModeIntToString - This map is used to map enumeration values
// to enumeration names stored as strings for Type FileOpenMode.
var mFileOpenModeIntToString = map[int]string{}
// mFileOpenModeStringToInt - This map is used to map enumeration names
// stored as strings to enumeration values for Type FileOpenMode.
var mFileOpenModeStringToInt = map[string]int{}
// mFileOpenModeLwrCaseStringToInt - This map is used to map enumeration names
// stored as lower case strings to enumeration values for Type FileOpenMode.
// This map is used for case insensitive look ups.
var mFileOpenModeLwrCaseStringToInt = map[string]int{}
// FileOpenMode - To further control the file open operation, one
// or more FileOpenMode values may be or'd with a FileOpenType
// code in order to control behavior.
//
// In addition, one of the three codes may be or'd with
// zero or more of the following File Open Modes (Type: 'FileOpenMode')
// to better control file open behavior.
//
// FileOpenMode has been adapted to function as an enumeration of valid
// File Open Mode values. Since Go does not directly support enumerations,
// the 'FileOpenMode' has been configured to function in a manner similar
// to classic enumerations found in other languages like C#. For additional
// information, reference:
//
// Jeffrey Richter Using Reflection to implement enumerated types
// https://www.youtube.com/watch?v=DyXJy_0v0_U
//
//
// These FileOpenMode methods used as enumerators for os mode constants:
//
// FileOpenMode(0).ModeNone()
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeTypeCreate()
// FileOpenMode(0).ModeExclusive()
// FileOpenMode(0).ModeSync()
// FileOpenMode(0).ModeTruncate()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
// The FileOpenType type is used in conjunction with FileOpenMode to specify
// file permissions. Reference the 'FileOpenType' in this 'pathfileops' package.
// The methods used to specify File Open Types are listed as follows:
//
// FileOpenType(0).TypeReadOnly()
// FileOpenType(0).TypeWriteOnly()
// FileOpenType(0).TypeReadWrite()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
type FileOpenMode int
// None - No File Open Mode is active
func (fOpenMode FileOpenMode) ModeNone() FileOpenMode { return FileOpenMode(-1) }
// Append - append data to the file when writing.
func (fOpenMode FileOpenMode) ModeAppend() FileOpenMode { return FileOpenMode(os.O_APPEND) }
// Create - create a new file if none exists.
func (fOpenMode FileOpenMode) ModeCreate() FileOpenMode |
// Exclusive - used with FileOpenControlMode(0).Create(), file must not exist.
func (fOpenMode FileOpenMode) ModeExclusive() FileOpenMode { return FileOpenMode(os.O_EXCL) }
// Sync - open for synchronous I/O.
func (fOpenMode FileOpenMode) ModeSync() FileOpenMode { return FileOpenMode(os.O_SYNC) }
// Truncate - if possible, truncate file when opened.
func (fOpenMode FileOpenMode) ModeTruncate() FileOpenMode { return FileOpenMode(os.O_TRUNC) }
// IsValid - If the value of the current FileOpenMode is 'invalid',
// this method will return an error. If the FileOpenMode is 'valid',
// this method will return a value of 'nil'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
func (fOpenMode FileOpenMode) IsValid() error {
fOpenMode.checkInitializeMaps(false)
_, ok := mFileOpenModeIntToString[int(fOpenMode)]
if !ok {
ePrefix := "FileOpenMode.IsValid() "
return fmt.Errorf(ePrefix+
"Error: Ivalid FileOpenMode! Current FileOpenMode='%v'",
fOpenMode)
}
return nil
}
// ParseString - Receives a string and attempts to match it with
// the string value of a supported enumeration. If successful, a
// new instance of FileOpenMode is returned set to the value of the
// associated enumeration.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// valueString string - A string which will be matched against the
// enumeration string values. If 'valueString'
// is equal to one of the enumeration names, this
// method will proceed to successful completion
//
// caseSensitive bool - If 'true' the search for enumeration names
// will be case sensitive and will require an
// exact match. Therefore, 'append' will NOT
// match the enumeration name, 'Append'.
//
// If 'false' a case insensitive search is conducted
// for the enumeration name. In this case, 'append'
// will match match enumeration name 'Append'.
//
// ------------------------------------------------------------------------
//
// Return Values
//
// FileOpenMode - Upon successful completion, this method will return a new
// instance of FileOpenMode set to the value of the enumeration
// matched by the string search performed on input parameter,
// 'valueString'.
//
// error - If this method completes successfully, the returned error
// Type is set equal to 'nil'. If an error condition is encountered,
// this method will return an error Type which encapsulates an
// appropriate error message.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t, err := FileOpenMode(0).ParseString("Append", true)
// OR
// t, err := FileOpenMode(0).ParseString("ModeAppend", true)
// OR
// t, err := FileOpenMode(0).ParseString("ModeAppend()", true)
// OR
// t, err := FileOpenMode(0).ParseString("Append()", true)
// OR
// t, err := FileOpenMode(0).ParseString("append", false)
//
// In any case shown above, t is now equal to FileOpenMode(0).Append()
//
func (fOpenMode FileOpenMode) ParseString(
valueString string,
caseSensitive bool) (FileOpenMode, error) {
ePrefix := "FileOpenMode.ParseString() "
fOpenMode.checkInitializeMaps(false)
result := FileOpenMode(0)
lenValueStr := len(valueString)
if strings.HasSuffix(valueString, "()") {
valueString = valueString[0 : lenValueStr-2]
lenValueStr -= 2
}
if lenValueStr < 3 {
return result,
fmt.Errorf(ePrefix+
"Input parameter 'valueString' is INVALID! valueString='%v' ", valueString)
}
var ok bool
var idx int
if caseSensitive {
if !strings.HasPrefix(valueString, "Mode") {
valueString = "Mode" + valueString
}
idx, ok = mFileOpenModeStringToInt[valueString]
if !ok {
return FileOpenMode(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' ", valueString)
}
result = FileOpenMode(idx)
} else {
valueString = strings.ToLower(valueString)
if !strings.HasPrefix(valueString, "mode") {
valueString = "mode" + valueString
}
idx, ok = mFileOpenModeLwrCaseStringToInt[valueString]
if !ok {
return FileOpenMode(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' ", valueString)
}
result =
FileOpenMode(idx)
}
return result, nil
}
// String - Returns a string with the name of the enumeration associated
// with this instance of 'FileOpenMode'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
// ------------------------------------------------------------------------
//
// Return Value:
//
// string - The string label or description for the current enumeration
// value. If, the FileOpenMode value is invalid, this method will
// return an empty string.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t:= FileOpenMode(0).ModeAppend()
// str := t.String()
// str is now equal to 'ModeAppend'
//
func (fOpenMode FileOpenMode) String() string {
fOpenMode.checkInitializeMaps(false)
str, ok := mFileOpenModeIntToString[int(fOpenMode)]
if !ok {
return ""
}
return str
}
// Value - This is a utility method which is not part of the
// enumerations supported by this type. It returns the numeric
// value of the enumeration associated with the current FileOpenMode
// instance.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
func (fOpenMode FileOpenMode) Value() int {
return int(fOpenMode)
}
// checkInitializeMaps - String and value comparisons performed on enumerations
// supported by this Type, utilizes a series of 3-map types. These maps are used
// internally to perform 'string to value' or 'value to string' look ups on
// enumerations supported by this type. Each time FileOpenMode.String() or
// FileOpenMode.ParseString() a call is made to this method to determine if
// these maps have been initialized. If the maps and look up data have been
// properly initialized and indexed, this method returns without taking action.
//
// On the other hand, if the maps have not yet been initialized, this method will
// initialize all associated map slices.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// reInitialize bool - If 'true', this will force initialization of
// all associated maps.
//
func (fOpenMode FileOpenMode) checkInitializeMaps(reInitialize bool) {
if !reInitialize &&
mFileOpenModeIntToString != nil &&
len(mFileOpenModeIntToString) > 5 &&
mFileOpenModeStringToInt != nil &&
len(mFileOpenModeStringToInt) > 5 &&
mFileOpenModeLwrCaseStringToInt != nil &&
len(mFileOpenModeLwrCaseStringToInt) > 5 {
return
}
var t = FOpenMode.ModeAppend()
mFileOpenModeIntToString = make(map[int]string, 0)
mFileOpenModeStringToInt = make(map[string]int, 0)
mFileOpenModeLwrCaseStringToInt = make(map[string]int, 0)
s := reflect.TypeOf(t)
r := reflect.TypeOf(0) // int
args := [1]reflect.Value{reflect.Zero(s)}
for i := 0; i < s.NumMethod(); i++ {
f := s.Method(i).Name
if f == "String" ||
f == "ParseString" ||
f == "Value" ||
f == "IsValid" ||
f == "checkInitializeMaps" {
continue
}
value := s.Method(i).Func.Call(args[:])[0].Convert(r).Int()
x := int(value)
mFileOpenModeIntToString[x] = f
mFileOpenModeStringToInt[f] = x
mFileOpenModeLwrCaseStringToInt[strings.ToLower(f)] = x
}
}
// FOpenMode - This public global variable allows
// easy access to the enumerations of the FileOpenMode
// using the dot operator.
//
// Example:
//
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeCreate()
// FileOpenMode(0).ModeExclusive()
//
var FOpenMode = FileOpenMode(0)
| { return FileOpenMode(os.O_CREATE) } | identifier_body |
fileopenmodeenum.go | package pathfileops
import (
"fmt"
"os"
"reflect"
"strings"
)
// mFileOpenModeIntToString - This map is used to map enumeration values
// to enumeration names stored as strings for Type FileOpenMode.
var mFileOpenModeIntToString = map[int]string{}
// mFileOpenModeStringToInt - This map is used to map enumeration names
// stored as strings to enumeration values for Type FileOpenMode.
var mFileOpenModeStringToInt = map[string]int{}
// mFileOpenModeLwrCaseStringToInt - This map is used to map enumeration names
// stored as lower case strings to enumeration values for Type FileOpenMode.
// This map is used for case insensitive look ups.
var mFileOpenModeLwrCaseStringToInt = map[string]int{}
// FileOpenMode - To further control the file open operation, one
// or more FileOpenMode values may be or'd with a FileOpenType
// code in order to control behavior.
//
// In addition, one of the three codes may be or'd with
// zero or more of the following File Open Modes (Type: 'FileOpenMode')
// to better control file open behavior.
//
// FileOpenMode has been adapted to function as an enumeration of valid
// File Open Mode values. Since Go does not directly support enumerations,
// the 'FileOpenMode' has been configured to function in a manner similar
// to classic enumerations found in other languages like C#. For additional
// information, reference:
//
// Jeffrey Richter Using Reflection to implement enumerated types
// https://www.youtube.com/watch?v=DyXJy_0v0_U
//
//
// These FileOpenMode methods used as enumerators for os mode constants:
//
// FileOpenMode(0).ModeNone()
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeTypeCreate()
// FileOpenMode(0).ModeExclusive()
// FileOpenMode(0).ModeSync()
// FileOpenMode(0).ModeTruncate()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
// The FileOpenType type is used in conjunction with FileOpenMode to specify
// file permissions. Reference the 'FileOpenType' in this 'pathfileops' package.
// The methods used to specify File Open Types are listed as follows:
//
// FileOpenType(0).TypeReadOnly()
// FileOpenType(0).TypeWriteOnly()
// FileOpenType(0).TypeReadWrite()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
type FileOpenMode int
// None - No File Open Mode is active
func (fOpenMode FileOpenMode) ModeNone() FileOpenMode { return FileOpenMode(-1) }
// Append - append data to the file when writing.
func (fOpenMode FileOpenMode) ModeAppend() FileOpenMode { return FileOpenMode(os.O_APPEND) }
// Create - create a new file if none exists.
func (fOpenMode FileOpenMode) ModeCreate() FileOpenMode { return FileOpenMode(os.O_CREATE) }
// Exclusive - used with FileOpenControlMode(0).Create(), file must not exist.
func (fOpenMode FileOpenMode) ModeExclusive() FileOpenMode { return FileOpenMode(os.O_EXCL) }
// Sync - open for synchronous I/O.
func (fOpenMode FileOpenMode) | () FileOpenMode { return FileOpenMode(os.O_SYNC) }
// Truncate - if possible, truncate file when opened.
func (fOpenMode FileOpenMode) ModeTruncate() FileOpenMode { return FileOpenMode(os.O_TRUNC) }
// IsValid - If the value of the current FileOpenMode is 'invalid',
// this method will return an error. If the FileOpenMode is 'valid',
// this method will return a value of 'nil'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
func (fOpenMode FileOpenMode) IsValid() error {
fOpenMode.checkInitializeMaps(false)
_, ok := mFileOpenModeIntToString[int(fOpenMode)]
if !ok {
ePrefix := "FileOpenMode.IsValid() "
return fmt.Errorf(ePrefix+
"Error: Ivalid FileOpenMode! Current FileOpenMode='%v'",
fOpenMode)
}
return nil
}
// ParseString - Receives a string and attempts to match it with
// the string value of a supported enumeration. If successful, a
// new instance of FileOpenMode is returned set to the value of the
// associated enumeration.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// valueString string - A string which will be matched against the
// enumeration string values. If 'valueString'
// is equal to one of the enumeration names, this
// method will proceed to successful completion
//
// caseSensitive bool - If 'true' the search for enumeration names
// will be case sensitive and will require an
// exact match. Therefore, 'append' will NOT
// match the enumeration name, 'Append'.
//
// If 'false' a case insensitive search is conducted
// for the enumeration name. In this case, 'append'
// will match match enumeration name 'Append'.
//
// ------------------------------------------------------------------------
//
// Return Values
//
// FileOpenMode - Upon successful completion, this method will return a new
// instance of FileOpenMode set to the value of the enumeration
// matched by the string search performed on input parameter,
// 'valueString'.
//
// error - If this method completes successfully, the returned error
// Type is set equal to 'nil'. If an error condition is encountered,
// this method will return an error Type which encapsulates an
// appropriate error message.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t, err := FileOpenMode(0).ParseString("Append", true)
// OR
// t, err := FileOpenMode(0).ParseString("ModeAppend", true)
// OR
// t, err := FileOpenMode(0).ParseString("ModeAppend()", true)
// OR
// t, err := FileOpenMode(0).ParseString("Append()", true)
// OR
// t, err := FileOpenMode(0).ParseString("append", false)
//
// In any case shown above, t is now equal to FileOpenMode(0).Append()
//
func (fOpenMode FileOpenMode) ParseString(
valueString string,
caseSensitive bool) (FileOpenMode, error) {
ePrefix := "FileOpenMode.ParseString() "
fOpenMode.checkInitializeMaps(false)
result := FileOpenMode(0)
lenValueStr := len(valueString)
if strings.HasSuffix(valueString, "()") {
valueString = valueString[0 : lenValueStr-2]
lenValueStr -= 2
}
if lenValueStr < 3 {
return result,
fmt.Errorf(ePrefix+
"Input parameter 'valueString' is INVALID! valueString='%v' ", valueString)
}
var ok bool
var idx int
if caseSensitive {
if !strings.HasPrefix(valueString, "Mode") {
valueString = "Mode" + valueString
}
idx, ok = mFileOpenModeStringToInt[valueString]
if !ok {
return FileOpenMode(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' ", valueString)
}
result = FileOpenMode(idx)
} else {
valueString = strings.ToLower(valueString)
if !strings.HasPrefix(valueString, "mode") {
valueString = "mode" + valueString
}
idx, ok = mFileOpenModeLwrCaseStringToInt[valueString]
if !ok {
return FileOpenMode(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' ", valueString)
}
result =
FileOpenMode(idx)
}
return result, nil
}
// String - Returns a string with the name of the enumeration associated
// with this instance of 'FileOpenMode'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
// ------------------------------------------------------------------------
//
// Return Value:
//
// string - The string label or description for the current enumeration
// value. If, the FileOpenMode value is invalid, this method will
// return an empty string.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t:= FileOpenMode(0).ModeAppend()
// str := t.String()
// str is now equal to 'ModeAppend'
//
func (fOpenMode FileOpenMode) String() string {
fOpenMode.checkInitializeMaps(false)
str, ok := mFileOpenModeIntToString[int(fOpenMode)]
if !ok {
return ""
}
return str
}
// Value - This is a utility method which is not part of the
// enumerations supported by this type. It returns the numeric
// value of the enumeration associated with the current FileOpenMode
// instance.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
func (fOpenMode FileOpenMode) Value() int {
return int(fOpenMode)
}
// checkInitializeMaps - String and value comparisons performed on enumerations
// supported by this Type, utilizes a series of 3-map types. These maps are used
// internally to perform 'string to value' or 'value to string' look ups on
// enumerations supported by this type. Each time FileOpenMode.String() or
// FileOpenMode.ParseString() a call is made to this method to determine if
// these maps have been initialized. If the maps and look up data have been
// properly initialized and indexed, this method returns without taking action.
//
// On the other hand, if the maps have not yet been initialized, this method will
// initialize all associated map slices.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// reInitialize bool - If 'true', this will force initialization of
// all associated maps.
//
func (fOpenMode FileOpenMode) checkInitializeMaps(reInitialize bool) {
if !reInitialize &&
mFileOpenModeIntToString != nil &&
len(mFileOpenModeIntToString) > 5 &&
mFileOpenModeStringToInt != nil &&
len(mFileOpenModeStringToInt) > 5 &&
mFileOpenModeLwrCaseStringToInt != nil &&
len(mFileOpenModeLwrCaseStringToInt) > 5 {
return
}
var t = FOpenMode.ModeAppend()
mFileOpenModeIntToString = make(map[int]string, 0)
mFileOpenModeStringToInt = make(map[string]int, 0)
mFileOpenModeLwrCaseStringToInt = make(map[string]int, 0)
s := reflect.TypeOf(t)
r := reflect.TypeOf(0) // int
args := [1]reflect.Value{reflect.Zero(s)}
for i := 0; i < s.NumMethod(); i++ {
f := s.Method(i).Name
if f == "String" ||
f == "ParseString" ||
f == "Value" ||
f == "IsValid" ||
f == "checkInitializeMaps" {
continue
}
value := s.Method(i).Func.Call(args[:])[0].Convert(r).Int()
x := int(value)
mFileOpenModeIntToString[x] = f
mFileOpenModeStringToInt[f] = x
mFileOpenModeLwrCaseStringToInt[strings.ToLower(f)] = x
}
}
// FOpenMode - This public global variable allows
// easy access to the enumerations of the FileOpenMode
// using the dot operator.
//
// Example:
//
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeCreate()
// FileOpenMode(0).ModeExclusive()
//
var FOpenMode = FileOpenMode(0)
| ModeSync | identifier_name |
fileopenmodeenum.go | package pathfileops
import (
"fmt"
"os"
"reflect"
"strings"
)
// mFileOpenModeIntToString - This map is used to map enumeration values
// to enumeration names stored as strings for Type FileOpenMode.
var mFileOpenModeIntToString = map[int]string{}
// mFileOpenModeStringToInt - This map is used to map enumeration names
// stored as strings to enumeration values for Type FileOpenMode.
var mFileOpenModeStringToInt = map[string]int{}
// mFileOpenModeLwrCaseStringToInt - This map is used to map enumeration names
// stored as lower case strings to enumeration values for Type FileOpenMode.
// This map is used for case insensitive look ups.
var mFileOpenModeLwrCaseStringToInt = map[string]int{}
// FileOpenMode - To further control the file open operation, one
// or more FileOpenMode values may be or'd with a FileOpenType
// code in order to control behavior.
//
// In addition, one of the three codes may be or'd with
// zero or more of the following File Open Modes (Type: 'FileOpenMode')
// to better control file open behavior.
//
// FileOpenMode has been adapted to function as an enumeration of valid
// File Open Mode values. Since Go does not directly support enumerations,
// the 'FileOpenMode' has been configured to function in a manner similar
// to classic enumerations found in other languages like C#. For additional
// information, reference:
//
// Jeffrey Richter Using Reflection to implement enumerated types
// https://www.youtube.com/watch?v=DyXJy_0v0_U
//
//
// These FileOpenMode methods used as enumerators for os mode constants:
//
// FileOpenMode(0).ModeNone()
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeTypeCreate()
// FileOpenMode(0).ModeExclusive()
// FileOpenMode(0).ModeSync()
// FileOpenMode(0).ModeTruncate()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
// The FileOpenType type is used in conjunction with FileOpenMode to specify
// file permissions. Reference the 'FileOpenType' in this 'pathfileops' package.
// The methods used to specify File Open Types are listed as follows:
//
// FileOpenType(0).TypeReadOnly()
// FileOpenType(0).TypeWriteOnly()
// FileOpenType(0).TypeReadWrite()
//
// Reference CONSTANTS: https://golang.org/pkg/os/
//
type FileOpenMode int
// None - No File Open Mode is active
func (fOpenMode FileOpenMode) ModeNone() FileOpenMode { return FileOpenMode(-1) }
// Append - append data to the file when writing.
func (fOpenMode FileOpenMode) ModeAppend() FileOpenMode { return FileOpenMode(os.O_APPEND) }
// Create - create a new file if none exists.
func (fOpenMode FileOpenMode) ModeCreate() FileOpenMode { return FileOpenMode(os.O_CREATE) }
// Exclusive - used with FileOpenControlMode(0).Create(), file must not exist.
func (fOpenMode FileOpenMode) ModeExclusive() FileOpenMode { return FileOpenMode(os.O_EXCL) }
// Sync - open for synchronous I/O.
func (fOpenMode FileOpenMode) ModeSync() FileOpenMode { return FileOpenMode(os.O_SYNC) }
// Truncate - if possible, truncate file when opened.
func (fOpenMode FileOpenMode) ModeTruncate() FileOpenMode { return FileOpenMode(os.O_TRUNC) }
// IsValid - If the value of the current FileOpenMode is 'invalid',
// this method will return an error. If the FileOpenMode is 'valid',
// this method will return a value of 'nil'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
func (fOpenMode FileOpenMode) IsValid() error {
fOpenMode.checkInitializeMaps(false)
_, ok := mFileOpenModeIntToString[int(fOpenMode)]
if !ok {
ePrefix := "FileOpenMode.IsValid() "
return fmt.Errorf(ePrefix+
"Error: Ivalid FileOpenMode! Current FileOpenMode='%v'",
fOpenMode)
}
return nil
}
// ParseString - Receives a string and attempts to match it with
// the string value of a supported enumeration. If successful, a
// new instance of FileOpenMode is returned set to the value of the
// associated enumeration.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// valueString string - A string which will be matched against the
// enumeration string values. If 'valueString'
// is equal to one of the enumeration names, this
// method will proceed to successful completion
//
// caseSensitive bool - If 'true' the search for enumeration names
// will be case sensitive and will require an
// exact match. Therefore, 'append' will NOT
// match the enumeration name, 'Append'.
//
// If 'false' a case insensitive search is conducted
// for the enumeration name. In this case, 'append'
// will match match enumeration name 'Append'.
//
// ------------------------------------------------------------------------
//
// Return Values
//
// FileOpenMode - Upon successful completion, this method will return a new
// instance of FileOpenMode set to the value of the enumeration
// matched by the string search performed on input parameter,
// 'valueString'.
//
// error - If this method completes successfully, the returned error
// Type is set equal to 'nil'. If an error condition is encountered,
// this method will return an error Type which encapsulates an
// appropriate error message.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t, err := FileOpenMode(0).ParseString("Append", true)
// OR
// t, err := FileOpenMode(0).ParseString("ModeAppend", true)
// OR
// t, err := FileOpenMode(0).ParseString("ModeAppend()", true)
// OR
// t, err := FileOpenMode(0).ParseString("Append()", true)
// OR
// t, err := FileOpenMode(0).ParseString("append", false)
//
// In any case shown above, t is now equal to FileOpenMode(0).Append()
//
func (fOpenMode FileOpenMode) ParseString(
valueString string,
caseSensitive bool) (FileOpenMode, error) {
ePrefix := "FileOpenMode.ParseString() "
fOpenMode.checkInitializeMaps(false)
result := FileOpenMode(0)
lenValueStr := len(valueString)
if strings.HasSuffix(valueString, "()") {
valueString = valueString[0 : lenValueStr-2]
lenValueStr -= 2
}
if lenValueStr < 3 {
return result,
fmt.Errorf(ePrefix+
"Input parameter 'valueString' is INVALID! valueString='%v' ", valueString)
}
var ok bool
var idx int
if caseSensitive {
if !strings.HasPrefix(valueString, "Mode") {
valueString = "Mode" + valueString
}
idx, ok = mFileOpenModeStringToInt[valueString]
if !ok |
result = FileOpenMode(idx)
} else {
valueString = strings.ToLower(valueString)
if !strings.HasPrefix(valueString, "mode") {
valueString = "mode" + valueString
}
idx, ok = mFileOpenModeLwrCaseStringToInt[valueString]
if !ok {
return FileOpenMode(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' ", valueString)
}
result =
FileOpenMode(idx)
}
return result, nil
}
// String - Returns a string with the name of the enumeration associated
// with this instance of 'FileOpenMode'.
//
// This is a standard utility method and is not part of the valid enumerations
// for this type.
//
// ------------------------------------------------------------------------
//
// Return Value:
//
// string - The string label or description for the current enumeration
// value. If, the FileOpenMode value is invalid, this method will
// return an empty string.
//
// ------------------------------------------------------------------------
//
// Usage
//
// t:= FileOpenMode(0).ModeAppend()
// str := t.String()
// str is now equal to 'ModeAppend'
//
func (fOpenMode FileOpenMode) String() string {
fOpenMode.checkInitializeMaps(false)
str, ok := mFileOpenModeIntToString[int(fOpenMode)]
if !ok {
return ""
}
return str
}
// Value - This is a utility method which is not part of the
// enumerations supported by this type. It returns the numeric
// value of the enumeration associated with the current FileOpenMode
// instance.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
func (fOpenMode FileOpenMode) Value() int {
return int(fOpenMode)
}
// checkInitializeMaps - String and value comparisons performed on enumerations
// supported by this Type, utilizes a series of 3-map types. These maps are used
// internally to perform 'string to value' or 'value to string' look ups on
// enumerations supported by this type. Each time FileOpenMode.String() or
// FileOpenMode.ParseString() a call is made to this method to determine if
// these maps have been initialized. If the maps and look up data have been
// properly initialized and indexed, this method returns without taking action.
//
// On the other hand, if the maps have not yet been initialized, this method will
// initialize all associated map slices.
//
// This is a standard utility method and is not part of the valid
// enumerations for this type.
//
// ------------------------------------------------------------------------
//
// Input Parameters
//
// reInitialize bool - If 'true', this will force initialization of
// all associated maps.
//
func (fOpenMode FileOpenMode) checkInitializeMaps(reInitialize bool) {
if !reInitialize &&
mFileOpenModeIntToString != nil &&
len(mFileOpenModeIntToString) > 5 &&
mFileOpenModeStringToInt != nil &&
len(mFileOpenModeStringToInt) > 5 &&
mFileOpenModeLwrCaseStringToInt != nil &&
len(mFileOpenModeLwrCaseStringToInt) > 5 {
return
}
var t = FOpenMode.ModeAppend()
mFileOpenModeIntToString = make(map[int]string, 0)
mFileOpenModeStringToInt = make(map[string]int, 0)
mFileOpenModeLwrCaseStringToInt = make(map[string]int, 0)
s := reflect.TypeOf(t)
r := reflect.TypeOf(0) // int
args := [1]reflect.Value{reflect.Zero(s)}
for i := 0; i < s.NumMethod(); i++ {
f := s.Method(i).Name
if f == "String" ||
f == "ParseString" ||
f == "Value" ||
f == "IsValid" ||
f == "checkInitializeMaps" {
continue
}
value := s.Method(i).Func.Call(args[:])[0].Convert(r).Int()
x := int(value)
mFileOpenModeIntToString[x] = f
mFileOpenModeStringToInt[f] = x
mFileOpenModeLwrCaseStringToInt[strings.ToLower(f)] = x
}
}
// FOpenMode - This public global variable allows
// easy access to the enumerations of the FileOpenMode
// using the dot operator.
//
// Example:
//
// FileOpenMode(0).ModeAppend()
// FileOpenMode(0).ModeCreate()
// FileOpenMode(0).ModeExclusive()
//
var FOpenMode = FileOpenMode(0)
| {
return FileOpenMode(0),
fmt.Errorf(ePrefix+
"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' ", valueString)
} | conditional_block |
partner.py | # -*- coding: utf-8 -*-
##############################################################################
#
#
# Programmed by: Alexander Olivares <olivaresa@gmail.com>
#
# This the script to connect with Seniat website
# for consult the rif asociated with a partner was taken from:
#
# http://siv.cenditel.gob.ve/svn/sigesic/ramas/sigesic-1.1.x/sigesic/apps/comun/seniat.py
#
# This script was modify by:
# Javier Duran <javier@vauxoo.com>
# Miguel Delgado <miguel@openerp.com.ve>
# Israel Fermín Montilla <israel@openerp.com.ve>
# Juan Márquez <jmarquez@tecvemar.com.ve>
# Humberto Arocha <hbto@vauxoo.com>
# Yanina Aular <yanina.aular@vauxoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import except_orm
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons import decimal_precision as dp
import re
class res_partner(osv.osv):
_inherit = 'res.partner'
def _get_country_code(self, cr, uid, context=None):
""" Return the country code of the user company. If not exists, return XX.
"""
context = context or {}
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
return user_company.partner_id and user_company.partner_id.country_id \
and user_company.partner_id.country_id.code or 'XX'
def default_get(self, cr, uid, fields, context=None):
""" Load the country code of the user company to form to be created.
"""
context = context or {}
res = super(res_partner, self).default_get(cr, uid, fields, context=context)
res.update({'uid_country': self._get_country_code(cr,uid,context=context)})
return res
def _get_uid_country(self, cr, uid, ids, field_name, args, context=None):
""" Return a dictionary of key ids as invoices, and value the country code
of the user company.
"""
context = context or {}
res= {}.fromkeys(ids,self._get_country_code(cr,uid,context=context))
return res
_columns = {
'seniat_updated': fields.boolean('Seniat Updated', help="This field indicates if partner was updated using SENIAT button"),
'uid_country': fields.function(_get_uid_country, type='char', string="uid_country", size=20, help="country code of the current company"),
'wh_iva_rate': fields.float(
string='Rate',
digits_compute=dp.get_precision('Withhold'),
help="Vat Withholding rate"),
'wh_iva_agent': fields.boolean('Wh. Agent',
help="Indicate if the partner is a withholding vat agent"),
}
_default = {
'seniat_updated': False,
}
def name_search(self,cr,uid,name='',args=[],operator='ilike',context=None,limit=80):
""" Gets el id of the partner with the vat or the name and return the name
"""
if context is None:
context={}
ids= []
if len(name) >= 2:
ids = self.search(cr, uid, [('vat',operator,name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr,uid,[('name',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr,uid,ids,context=context)
'''
Required Invoice Address
'''
def _check_partner_invoice_addr(self,cr,uid,ids,context={}):
""" Return true if the partner is a company of Venezuela and if the
address is for billing.
"""
partner_obj = self.browse(cr,uid,ids[0])
if partner_obj.vat and partner_obj.vat[:2].upper() == 'VE' and not partner_obj.parent_id:
res = partner_obj.type == 'invoice'
if res:
return True
else:
return False
else:
return True
return True
def _check_vat_uniqueness(self, cr, uid, ids, context=None):
""" Check that the vat is unique in the level where the partner in the tree
"""
if context is None: context = {}
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
acc_part_brw = self._find_accounting_partner(user_company.partner_id)
#User must be of VE
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
return True
for rp_brw in self.browse(cr, uid,ids):
acc_part_brw = self._find_accounting_partner(rp_brw)
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
continue
elif not acc_part_brw.country_id:
continue
if rp_brw.id == acc_part_brw.id and not acc_part_brw.vat:
return False
elif rp_brw.id == acc_part_brw.id and acc_part_brw.vat:
duplicates = self.search(cr, uid, [ ('vat', '=', rp_brw.vat), ('parent_id','=',False), ('id','!=',rp_brw.id) ])
if duplicates: return False
continue
return True
def _check_vat_mandatory(self, cr, uid, ids, context=None):
""" This method will check the vat mandatoriness in partners
for those user logged on with a Venezuelan Company
The method will return True when:
*) The user's company is not from Venezuela
*) The partner being created is the one for the a company being created [TODO]
The method will return False when:
*) The user's company is from Venezuela AND the vat field is empty AND:
+) partner is_company=True AND parent_id is not NULL
+) partner with parent_id is NULL
+) partner with parent_id is NOT NULL AND type of address is invoice
"""
if context is None: context = {}
# Avoiding Egg-Chicken Syndrome
# TODO: Refine this approach this is big exception
# One that can be handle by end user, I hope so!!!
if context.get('create_company',False):
return True
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
acc_part_brw = self._find_accounting_partner(user_company.partner_id)
#Check if the user is not from a VE Company
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
return True
for rp_brw in self.browse(cr, uid,ids):
acc_part_brw = self._find_accounting_partner(rp_brw)
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
continue
elif not acc_part_brw.country_id:
continue
if rp_brw.id == acc_part_brw.id and not acc_part_brw.vat:
return False
return True
def _validate(self, cr, uid, ids, context=None):
""" Validates the fields
"""
#In the original orm.py openerp does not allow using
#context within the constraint because we have to yield
# the same result always,
# we have overridden this behaviour
# TO ALLOW PASSING CONTEXT TO THE RESTRICTION IN RES.PARTNER
context = context or {}
lng = context.get('lang')
trans = self.pool.get('ir.translation')
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
# We don't pass around the context here: validation code
# must always yield the same results.
if not fun(self, cr, uid, ids, context=context):
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
if hasattr(msg, '__call__'):
tmp_msg = msg(self, cr, uid, ids, context=context)
if isinstance(tmp_msg, tuple):
tmp_msg, params = tmp_msg
translated_msg = tmp_msg % params
else:
translated_msg = tmp_msg
else:
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
error_msgs.append(
_("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
)
self._invalids.update(fields)
if error_msgs:
raise except_orm('ValidateError', '\n'.join(error_msgs))
else:
self._invalids.clear()
_constraints = [
(_check_vat_mandatory, _("Error ! VAT is mandatory in the Accounting Partner"), []),
(_check_vat_uniqueness, _("Error ! Partner's VAT must be a unique value or empty"), []),
#~ (_check_partner_invoice_addr, _('Error ! The partner does not have an invoice address.'), []),
]
def vat_change_fiscal_requirements(self, cr, uid, ids, value, context=None):
""" Checks the syntax of the vat
"""
if context is None:
context={}
if not value:
return super(res_partner,self).vat_change(cr, uid, ids, value, context=context)
res = self.search(cr, uid, [('vat', 'ilike', value)])
if res:
rp = self.browse(cr, uid, res[0],context=context)
return {'warning': {
'title':_('Vat Error !'),
'message':_('The VAT [%s] looks like '%value +
'[%s] which is'%rp.vat.upper()+
' already being used by: %s'%rp.name.upper())
}
}
else:
return super(res_partner,self).vat_change(cr, uid, ids, value, context=context)
def check_vat_ve(self, vat, context = None):
""" Check Venezuelan VAT number, locally called RIF.
RIF: JXXXXXXXXX RIF VENEZOLAN IDENTIFICATION CARD: VXXXXXXXXX FOREIGN IDENTIFICATION CARD: EXXXXXXXXX
"""
if context is None:
context={}
if re.search(r'^[VJEGP][0-9]{9}$', vat):
return True
if re.search(r'^([VE][0-9]{1,8}|[D][0-9]{9})$', vat):
return True
return False
def vi | elf, cr, uid, country_code, vat_number, context=None):
"""
Validate against VAT Information Exchange System (VIES)
"""
if country_code.upper() != "VE":
return super(res_partner, self).vies_vat_check(cr, uid, country_code, vat_number,context=context)
else:
return super(res_partner, self).simple_vat_check(cr, uid, country_code, vat_number, context=context)
def update_rif(self, cr, uid, ids, context=None):
""" Load the rif and name of the partner from the database seniat
"""
if context is None:
context = {}
su_obj = self.pool.get('seniat.url')
return su_obj.update_rif(cr, uid, ids, context=context)
def button_check_vat(self, cr, uid, ids, context=None):
""" Is called by the button that load information of the partner from database
SENIAT
"""
if context is None: context = {}
context.update({'update_fiscal_information':True})
super(res_partner, self).check_vat(cr, uid, ids, context=context)
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
if user_company.vat_check_vies:
# force full VIES online check
self.update_rif(cr, uid, ids, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| es_vat_check(s | identifier_name |
partner.py | # -*- coding: utf-8 -*-
##############################################################################
#
#
# Programmed by: Alexander Olivares <olivaresa@gmail.com>
#
# This the script to connect with Seniat website
# for consult the rif asociated with a partner was taken from:
#
# http://siv.cenditel.gob.ve/svn/sigesic/ramas/sigesic-1.1.x/sigesic/apps/comun/seniat.py
#
# This script was modify by:
# Javier Duran <javier@vauxoo.com>
# Miguel Delgado <miguel@openerp.com.ve>
# Israel Fermín Montilla <israel@openerp.com.ve>
# Juan Márquez <jmarquez@tecvemar.com.ve>
# Humberto Arocha <hbto@vauxoo.com>
# Yanina Aular <yanina.aular@vauxoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import except_orm
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons import decimal_precision as dp
import re
class res_partner(osv.osv):
_inherit = 'res.partner'
def _get_country_code(self, cr, uid, context=None):
""" Return the country code of the user company. If not exists, return XX.
"""
context = context or {}
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
return user_company.partner_id and user_company.partner_id.country_id \
and user_company.partner_id.country_id.code or 'XX'
def default_get(self, cr, uid, fields, context=None):
""" Load the country code of the user company to form to be created.
"""
context = context or {}
res = super(res_partner, self).default_get(cr, uid, fields, context=context)
res.update({'uid_country': self._get_country_code(cr,uid,context=context)})
return res
def _get_uid_country(self, cr, uid, ids, field_name, args, context=None):
""" Return a dictionary of key ids as invoices, and value the country code
of the user company.
"""
context = context or {}
res= {}.fromkeys(ids,self._get_country_code(cr,uid,context=context))
return res
_columns = {
'seniat_updated': fields.boolean('Seniat Updated', help="This field indicates if partner was updated using SENIAT button"),
'uid_country': fields.function(_get_uid_country, type='char', string="uid_country", size=20, help="country code of the current company"),
'wh_iva_rate': fields.float(
string='Rate',
digits_compute=dp.get_precision('Withhold'),
help="Vat Withholding rate"),
'wh_iva_agent': fields.boolean('Wh. Agent',
help="Indicate if the partner is a withholding vat agent"),
}
_default = {
'seniat_updated': False,
}
def name_search(self,cr,uid,name='',args=[],operator='ilike',context=None,limit=80):
""" Gets el id of the partner with the vat or the name and return the name
"""
if context is None:
context={}
ids= []
if len(name) >= 2:
ids = self.search(cr, uid, [('vat',operator,name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr,uid,[('name',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr,uid,ids,context=context)
'''
Required Invoice Address
'''
def _check_partner_invoice_addr(self,cr,uid,ids,context={}):
""" Return true if the partner is a company of Venezuela and if the
address is for billing.
"""
partner_obj = self.browse(cr,uid,ids[0])
if partner_obj.vat and partner_obj.vat[:2].upper() == 'VE' and not partner_obj.parent_id:
res = partner_obj.type == 'invoice'
if res:
return True
else:
return False
else:
return True
return True
def _check_vat_uniqueness(self, cr, uid, ids, context=None):
""" Check that the vat is unique in the level where the partner in the tree
"""
if context is None: context = {}
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
acc_part_brw = self._find_accounting_partner(user_company.partner_id)
#User must be of VE
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
return True
for rp_brw in self.browse(cr, uid,ids):
acc_part_brw = self._find_accounting_partner(rp_brw)
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
continue
elif not acc_part_brw.country_id:
continue
if rp_brw.id == acc_part_brw.id and not acc_part_brw.vat:
return False
elif rp_brw.id == acc_part_brw.id and acc_part_brw.vat:
duplicates = self.search(cr, uid, [ ('vat', '=', rp_brw.vat), ('parent_id','=',False), ('id','!=',rp_brw.id) ])
if duplicates: re | continue
return True
def _check_vat_mandatory(self, cr, uid, ids, context=None):
""" This method will check the vat mandatoriness in partners
for those user logged on with a Venezuelan Company
The method will return True when:
*) The user's company is not from Venezuela
*) The partner being created is the one for the a company being created [TODO]
The method will return False when:
*) The user's company is from Venezuela AND the vat field is empty AND:
+) partner is_company=True AND parent_id is not NULL
+) partner with parent_id is NULL
+) partner with parent_id is NOT NULL AND type of address is invoice
"""
if context is None: context = {}
# Avoiding Egg-Chicken Syndrome
# TODO: Refine this approach this is big exception
# One that can be handle by end user, I hope so!!!
if context.get('create_company',False):
return True
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
acc_part_brw = self._find_accounting_partner(user_company.partner_id)
#Check if the user is not from a VE Company
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
return True
for rp_brw in self.browse(cr, uid,ids):
acc_part_brw = self._find_accounting_partner(rp_brw)
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
continue
elif not acc_part_brw.country_id:
continue
if rp_brw.id == acc_part_brw.id and not acc_part_brw.vat:
return False
return True
def _validate(self, cr, uid, ids, context=None):
""" Validates the fields
"""
#In the original orm.py openerp does not allow using
#context within the constraint because we have to yield
# the same result always,
# we have overridden this behaviour
# TO ALLOW PASSING CONTEXT TO THE RESTRICTION IN RES.PARTNER
context = context or {}
lng = context.get('lang')
trans = self.pool.get('ir.translation')
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
# We don't pass around the context here: validation code
# must always yield the same results.
if not fun(self, cr, uid, ids, context=context):
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
if hasattr(msg, '__call__'):
tmp_msg = msg(self, cr, uid, ids, context=context)
if isinstance(tmp_msg, tuple):
tmp_msg, params = tmp_msg
translated_msg = tmp_msg % params
else:
translated_msg = tmp_msg
else:
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
error_msgs.append(
_("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
)
self._invalids.update(fields)
if error_msgs:
raise except_orm('ValidateError', '\n'.join(error_msgs))
else:
self._invalids.clear()
_constraints = [
(_check_vat_mandatory, _("Error ! VAT is mandatory in the Accounting Partner"), []),
(_check_vat_uniqueness, _("Error ! Partner's VAT must be a unique value or empty"), []),
#~ (_check_partner_invoice_addr, _('Error ! The partner does not have an invoice address.'), []),
]
def vat_change_fiscal_requirements(self, cr, uid, ids, value, context=None):
""" Checks the syntax of the vat
"""
if context is None:
context={}
if not value:
return super(res_partner,self).vat_change(cr, uid, ids, value, context=context)
res = self.search(cr, uid, [('vat', 'ilike', value)])
if res:
rp = self.browse(cr, uid, res[0],context=context)
return {'warning': {
'title':_('Vat Error !'),
'message':_('The VAT [%s] looks like '%value +
'[%s] which is'%rp.vat.upper()+
' already being used by: %s'%rp.name.upper())
}
}
else:
return super(res_partner,self).vat_change(cr, uid, ids, value, context=context)
def check_vat_ve(self, vat, context = None):
""" Check Venezuelan VAT number, locally called RIF.
RIF: JXXXXXXXXX RIF VENEZOLAN IDENTIFICATION CARD: VXXXXXXXXX FOREIGN IDENTIFICATION CARD: EXXXXXXXXX
"""
if context is None:
context={}
if re.search(r'^[VJEGP][0-9]{9}$', vat):
return True
if re.search(r'^([VE][0-9]{1,8}|[D][0-9]{9})$', vat):
return True
return False
def vies_vat_check(self, cr, uid, country_code, vat_number, context=None):
"""
Validate against VAT Information Exchange System (VIES)
"""
if country_code.upper() != "VE":
return super(res_partner, self).vies_vat_check(cr, uid, country_code, vat_number,context=context)
else:
return super(res_partner, self).simple_vat_check(cr, uid, country_code, vat_number, context=context)
def update_rif(self, cr, uid, ids, context=None):
""" Load the rif and name of the partner from the database seniat
"""
if context is None:
context = {}
su_obj = self.pool.get('seniat.url')
return su_obj.update_rif(cr, uid, ids, context=context)
def button_check_vat(self, cr, uid, ids, context=None):
""" Is called by the button that load information of the partner from database
SENIAT
"""
if context is None: context = {}
context.update({'update_fiscal_information':True})
super(res_partner, self).check_vat(cr, uid, ids, context=context)
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
if user_company.vat_check_vies:
# force full VIES online check
self.update_rif(cr, uid, ids, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| turn False
| conditional_block |
partner.py | # -*- coding: utf-8 -*-
##############################################################################
#
#
# Programmed by: Alexander Olivares <olivaresa@gmail.com>
#
# This the script to connect with Seniat website
# for consult the rif asociated with a partner was taken from:
#
# http://siv.cenditel.gob.ve/svn/sigesic/ramas/sigesic-1.1.x/sigesic/apps/comun/seniat.py
#
# This script was modify by:
# Javier Duran <javier@vauxoo.com>
# Miguel Delgado <miguel@openerp.com.ve>
# Israel Fermín Montilla <israel@openerp.com.ve>
# Juan Márquez <jmarquez@tecvemar.com.ve>
# Humberto Arocha <hbto@vauxoo.com>
# Yanina Aular <yanina.aular@vauxoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import except_orm
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons import decimal_precision as dp
import re
class res_partner(osv.osv):
_inherit = 'res.partner'
def _get_country_code(self, cr, uid, context=None):
""" Return the country code of the user company. If not exists, return XX.
"""
context = context or {}
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
return user_company.partner_id and user_company.partner_id.country_id \
and user_company.partner_id.country_id.code or 'XX'
def default_get(self, cr, uid, fields, context=None):
""" Load the country code of the user company to form to be created.
"""
context = context or {}
res = super(res_partner, self).default_get(cr, uid, fields, context=context)
res.update({'uid_country': self._get_country_code(cr,uid,context=context)})
return res
def _get_uid_country(self, cr, uid, ids, field_name, args, context=None):
""" Return a dictionary of key ids as invoices, and value the country code
of the user company.
"""
context = context or {}
res= {}.fromkeys(ids,self._get_country_code(cr,uid,context=context))
return res
_columns = {
'seniat_updated': fields.boolean('Seniat Updated', help="This field indicates if partner was updated using SENIAT button"),
'uid_country': fields.function(_get_uid_country, type='char', string="uid_country", size=20, help="country code of the current company"),
'wh_iva_rate': fields.float(
string='Rate',
digits_compute=dp.get_precision('Withhold'),
help="Vat Withholding rate"),
'wh_iva_agent': fields.boolean('Wh. Agent',
help="Indicate if the partner is a withholding vat agent"),
}
_default = {
'seniat_updated': False,
}
def name_search(self,cr,uid,name='',args=[],operator='ilike',context=None,limit=80):
""" Gets el id of the partner with the vat or the name and return the name
"""
if context is None:
context={}
ids= []
if len(name) >= 2:
ids = self.search(cr, uid, [('vat',operator,name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr,uid,[('name',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr,uid,ids,context=context)
'''
Required Invoice Address
'''
def _check_partner_invoice_addr(self,cr,uid,ids,context={}):
""" Return true if the partner is a company of Venezuela and if the
address is for billing.
"""
partner_obj = self.browse(cr,uid,ids[0])
if partner_obj.vat and partner_obj.vat[:2].upper() == 'VE' and not partner_obj.parent_id:
res = partner_obj.type == 'invoice'
if res:
return True
else:
return False
else:
return True
return True
def _check_vat_uniqueness(self, cr, uid, ids, context=None):
""" Check that the vat is unique in the level where the partner in the tree
"""
if context is None: context = {}
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
acc_part_brw = self._find_accounting_partner(user_company.partner_id)
#User must be of VE
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
return True
for rp_brw in self.browse(cr, uid,ids):
acc_part_brw = self._find_accounting_partner(rp_brw)
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
continue
elif not acc_part_brw.country_id:
continue
if rp_brw.id == acc_part_brw.id and not acc_part_brw.vat:
return False
elif rp_brw.id == acc_part_brw.id and acc_part_brw.vat:
duplicates = self.search(cr, uid, [ ('vat', '=', rp_brw.vat), ('parent_id','=',False), ('id','!=',rp_brw.id) ])
if duplicates: return False
continue
return True
def _check_vat_mandatory(self, cr, uid, ids, context=None):
""" This method will check the vat mandatoriness in partners
for those user logged on with a Venezuelan Company
The method will return True when:
*) The user's company is not from Venezuela
*) The partner being created is the one for the a company being created [TODO]
The method will return False when:
*) The user's company is from Venezuela AND the vat field is empty AND:
+) partner is_company=True AND parent_id is not NULL
+) partner with parent_id is NULL
+) partner with parent_id is NOT NULL AND type of address is invoice
"""
if context is None: context = {}
# Avoiding Egg-Chicken Syndrome
# TODO: Refine this approach this is big exception
# One that can be handle by end user, I hope so!!!
if context.get('create_company',False):
return True
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
acc_part_brw = self._find_accounting_partner(user_company.partner_id)
#Check if the user is not from a VE Company
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
return True
for rp_brw in self.browse(cr, uid,ids):
acc_part_brw = self._find_accounting_partner(rp_brw)
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
continue
elif not acc_part_brw.country_id:
continue
if rp_brw.id == acc_part_brw.id and not acc_part_brw.vat:
return False
return True
def _validate(self, cr, uid, ids, context=None):
""" Validates the fields
"""
#In the original orm.py openerp does not allow using
#context within the constraint because we have to yield
# the same result always,
# we have overridden this behaviour
# TO ALLOW PASSING CONTEXT TO THE RESTRICTION IN RES.PARTNER
context = context or {}
lng = context.get('lang')
trans = self.pool.get('ir.translation')
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
# We don't pass around the context here: validation code
# must always yield the same results.
if not fun(self, cr, uid, ids, context=context):
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
if hasattr(msg, '__call__'):
tmp_msg = msg(self, cr, uid, ids, context=context)
if isinstance(tmp_msg, tuple):
tmp_msg, params = tmp_msg
translated_msg = tmp_msg % params
else:
translated_msg = tmp_msg
else:
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
error_msgs.append(
_("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
)
self._invalids.update(fields)
if error_msgs:
raise except_orm('ValidateError', '\n'.join(error_msgs))
else:
self._invalids.clear()
_constraints = [
(_check_vat_mandatory, _("Error ! VAT is mandatory in the Accounting Partner"), []),
(_check_vat_uniqueness, _("Error ! Partner's VAT must be a unique value or empty"), []),
#~ (_check_partner_invoice_addr, _('Error ! The partner does not have an invoice address.'), []),
]
def vat_change_fiscal_requirements(self, cr, uid, ids, value, context=None):
""" Checks the syntax of the vat
"""
if context is None:
context={}
if not value:
return super(res_partner,self).vat_change(cr, uid, ids, value, context=context)
res = self.search(cr, uid, [('vat', 'ilike', value)])
if res:
rp = self.browse(cr, uid, res[0],context=context)
return {'warning': {
'title':_('Vat Error !'),
'message':_('The VAT [%s] looks like '%value +
'[%s] which is'%rp.vat.upper()+
' already being used by: %s'%rp.name.upper())
}
}
else:
return super(res_partner,self).vat_change(cr, uid, ids, value, context=context)
def check_vat_ve(self, vat, context = None):
""" Check Venezuelan VAT number, locally called RIF.
RIF: JXXXXXXXXX RIF VENEZOLAN IDENTIFICATION CARD: VXXXXXXXXX FOREIGN IDENTIFICATION CARD: EXXXXXXXXX
"""
if context is None:
context={}
if re.search(r'^[VJEGP][0-9]{9}$', vat):
return True
if re.search(r'^([VE][0-9]{1,8}|[D][0-9]{9})$', vat):
return True
return False
def vies_vat_check(self, cr, uid, country_code, vat_number, context=None):
"""
Validate against VAT Information Exchange System (VIES)
"""
if country_code.upper() != "VE":
return super(res_partner, self).vies_vat_check(cr, uid, country_code, vat_number,context=context)
else:
return super(res_partner, self).simple_vat_check(cr, uid, country_code, vat_number, context=context)
def update_rif(self, cr, uid, ids, context=None):
"" | def button_check_vat(self, cr, uid, ids, context=None):
""" Is called by the button that load information of the partner from database
SENIAT
"""
if context is None: context = {}
context.update({'update_fiscal_information':True})
super(res_partner, self).check_vat(cr, uid, ids, context=context)
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
if user_company.vat_check_vies:
# force full VIES online check
self.update_rif(cr, uid, ids, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| " Load the rif and name of the partner from the database seniat
"""
if context is None:
context = {}
su_obj = self.pool.get('seniat.url')
return su_obj.update_rif(cr, uid, ids, context=context)
| identifier_body |
partner.py | # -*- coding: utf-8 -*-
##############################################################################
#
#
# Programmed by: Alexander Olivares <olivaresa@gmail.com>
#
# This the script to connect with Seniat website
# for consult the rif asociated with a partner was taken from:
#
# http://siv.cenditel.gob.ve/svn/sigesic/ramas/sigesic-1.1.x/sigesic/apps/comun/seniat.py
#
# This script was modify by:
# Javier Duran <javier@vauxoo.com>
# Miguel Delgado <miguel@openerp.com.ve>
# Israel Fermín Montilla <israel@openerp.com.ve>
# Juan Márquez <jmarquez@tecvemar.com.ve>
# Humberto Arocha <hbto@vauxoo.com>
# Yanina Aular <yanina.aular@vauxoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import except_orm
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons import decimal_precision as dp
import re
class res_partner(osv.osv):
_inherit = 'res.partner'
def _get_country_code(self, cr, uid, context=None): | """
context = context or {}
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
return user_company.partner_id and user_company.partner_id.country_id \
and user_company.partner_id.country_id.code or 'XX'
def default_get(self, cr, uid, fields, context=None):
""" Load the country code of the user company to form to be created.
"""
context = context or {}
res = super(res_partner, self).default_get(cr, uid, fields, context=context)
res.update({'uid_country': self._get_country_code(cr,uid,context=context)})
return res
def _get_uid_country(self, cr, uid, ids, field_name, args, context=None):
""" Return a dictionary of key ids as invoices, and value the country code
of the user company.
"""
context = context or {}
res= {}.fromkeys(ids,self._get_country_code(cr,uid,context=context))
return res
_columns = {
'seniat_updated': fields.boolean('Seniat Updated', help="This field indicates if partner was updated using SENIAT button"),
'uid_country': fields.function(_get_uid_country, type='char', string="uid_country", size=20, help="country code of the current company"),
'wh_iva_rate': fields.float(
string='Rate',
digits_compute=dp.get_precision('Withhold'),
help="Vat Withholding rate"),
'wh_iva_agent': fields.boolean('Wh. Agent',
help="Indicate if the partner is a withholding vat agent"),
}
_default = {
'seniat_updated': False,
}
def name_search(self,cr,uid,name='',args=[],operator='ilike',context=None,limit=80):
""" Gets el id of the partner with the vat or the name and return the name
"""
if context is None:
context={}
ids= []
if len(name) >= 2:
ids = self.search(cr, uid, [('vat',operator,name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr,uid,[('name',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr,uid,ids,context=context)
'''
Required Invoice Address
'''
def _check_partner_invoice_addr(self,cr,uid,ids,context={}):
""" Return true if the partner is a company of Venezuela and if the
address is for billing.
"""
partner_obj = self.browse(cr,uid,ids[0])
if partner_obj.vat and partner_obj.vat[:2].upper() == 'VE' and not partner_obj.parent_id:
res = partner_obj.type == 'invoice'
if res:
return True
else:
return False
else:
return True
return True
def _check_vat_uniqueness(self, cr, uid, ids, context=None):
""" Check that the vat is unique in the level where the partner in the tree
"""
if context is None: context = {}
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
acc_part_brw = self._find_accounting_partner(user_company.partner_id)
#User must be of VE
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
return True
for rp_brw in self.browse(cr, uid,ids):
acc_part_brw = self._find_accounting_partner(rp_brw)
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
continue
elif not acc_part_brw.country_id:
continue
if rp_brw.id == acc_part_brw.id and not acc_part_brw.vat:
return False
elif rp_brw.id == acc_part_brw.id and acc_part_brw.vat:
duplicates = self.search(cr, uid, [ ('vat', '=', rp_brw.vat), ('parent_id','=',False), ('id','!=',rp_brw.id) ])
if duplicates: return False
continue
return True
def _check_vat_mandatory(self, cr, uid, ids, context=None):
""" This method will check the vat mandatoriness in partners
for those user logged on with a Venezuelan Company
The method will return True when:
*) The user's company is not from Venezuela
*) The partner being created is the one for the a company being created [TODO]
The method will return False when:
*) The user's company is from Venezuela AND the vat field is empty AND:
+) partner is_company=True AND parent_id is not NULL
+) partner with parent_id is NULL
+) partner with parent_id is NOT NULL AND type of address is invoice
"""
if context is None: context = {}
# Avoiding Egg-Chicken Syndrome
# TODO: Refine this approach this is big exception
# One that can be handle by end user, I hope so!!!
if context.get('create_company',False):
return True
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
acc_part_brw = self._find_accounting_partner(user_company.partner_id)
#Check if the user is not from a VE Company
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
return True
for rp_brw in self.browse(cr, uid,ids):
acc_part_brw = self._find_accounting_partner(rp_brw)
if acc_part_brw.country_id and acc_part_brw.country_id.code != 'VE':
continue
elif not acc_part_brw.country_id:
continue
if rp_brw.id == acc_part_brw.id and not acc_part_brw.vat:
return False
return True
def _validate(self, cr, uid, ids, context=None):
""" Validates the fields
"""
#In the original orm.py openerp does not allow using
#context within the constraint because we have to yield
# the same result always,
# we have overridden this behaviour
# TO ALLOW PASSING CONTEXT TO THE RESTRICTION IN RES.PARTNER
context = context or {}
lng = context.get('lang')
trans = self.pool.get('ir.translation')
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
# We don't pass around the context here: validation code
# must always yield the same results.
if not fun(self, cr, uid, ids, context=context):
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
if hasattr(msg, '__call__'):
tmp_msg = msg(self, cr, uid, ids, context=context)
if isinstance(tmp_msg, tuple):
tmp_msg, params = tmp_msg
translated_msg = tmp_msg % params
else:
translated_msg = tmp_msg
else:
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
error_msgs.append(
_("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
)
self._invalids.update(fields)
if error_msgs:
raise except_orm('ValidateError', '\n'.join(error_msgs))
else:
self._invalids.clear()
_constraints = [
(_check_vat_mandatory, _("Error ! VAT is mandatory in the Accounting Partner"), []),
(_check_vat_uniqueness, _("Error ! Partner's VAT must be a unique value or empty"), []),
#~ (_check_partner_invoice_addr, _('Error ! The partner does not have an invoice address.'), []),
]
def vat_change_fiscal_requirements(self, cr, uid, ids, value, context=None):
""" Checks the syntax of the vat
"""
if context is None:
context={}
if not value:
return super(res_partner,self).vat_change(cr, uid, ids, value, context=context)
res = self.search(cr, uid, [('vat', 'ilike', value)])
if res:
rp = self.browse(cr, uid, res[0],context=context)
return {'warning': {
'title':_('Vat Error !'),
'message':_('The VAT [%s] looks like '%value +
'[%s] which is'%rp.vat.upper()+
' already being used by: %s'%rp.name.upper())
}
}
else:
return super(res_partner,self).vat_change(cr, uid, ids, value, context=context)
def check_vat_ve(self, vat, context = None):
""" Check Venezuelan VAT number, locally called RIF.
RIF: JXXXXXXXXX RIF VENEZOLAN IDENTIFICATION CARD: VXXXXXXXXX FOREIGN IDENTIFICATION CARD: EXXXXXXXXX
"""
if context is None:
context={}
if re.search(r'^[VJEGP][0-9]{9}$', vat):
return True
if re.search(r'^([VE][0-9]{1,8}|[D][0-9]{9})$', vat):
return True
return False
def vies_vat_check(self, cr, uid, country_code, vat_number, context=None):
"""
Validate against VAT Information Exchange System (VIES)
"""
if country_code.upper() != "VE":
return super(res_partner, self).vies_vat_check(cr, uid, country_code, vat_number,context=context)
else:
return super(res_partner, self).simple_vat_check(cr, uid, country_code, vat_number, context=context)
def update_rif(self, cr, uid, ids, context=None):
""" Load the rif and name of the partner from the database seniat
"""
if context is None:
context = {}
su_obj = self.pool.get('seniat.url')
return su_obj.update_rif(cr, uid, ids, context=context)
def button_check_vat(self, cr, uid, ids, context=None):
""" Is called by the button that load information of the partner from database
SENIAT
"""
if context is None: context = {}
context.update({'update_fiscal_information':True})
super(res_partner, self).check_vat(cr, uid, ids, context=context)
user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id
if user_company.vat_check_vies:
# force full VIES online check
self.update_rif(cr, uid, ids, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | """ Return the country code of the user company. If not exists, return XX. | random_line_split |
flex.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Demonstrates alignment of children in the flex container.
//! This example showcases the full set of functionality of flex, giving you
//! knobs to change all the parameters. 99% of the time you will want to
//! hard-code these parameters, which will simplify your code considerably.
use druid::text::format::ParseFormatter;
use druid::widget::prelude::*;
use druid::widget::{
Button, Checkbox, CrossAxisAlignment, Flex, Label, MainAxisAlignment, ProgressBar, RadioGroup,
SizedBox, Slider, Stepper, Switch, TextBox, WidgetExt,
};
use druid::{AppLauncher, Color, Data, Lens, WidgetId, WindowDesc};
const DEFAULT_SPACER_SIZE: f64 = 8.;
const SPACER_OPTIONS: [(&str, Spacers); 4] = [
("None", Spacers::None),
("Default", Spacers::Default),
("Flex", Spacers::Flex),
("Fixed:", Spacers::Fixed),
];
const MAIN_AXIS_ALIGNMENT_OPTIONS: [(&str, MainAxisAlignment); 6] = [
("Start", MainAxisAlignment::Start),
("Center", MainAxisAlignment::Center),
("End", MainAxisAlignment::End),
("Between", MainAxisAlignment::SpaceBetween),
("Evenly", MainAxisAlignment::SpaceEvenly),
("Around", MainAxisAlignment::SpaceAround),
];
const CROSS_AXIS_ALIGNMENT_OPTIONS: [(&str, CrossAxisAlignment); 4] = [
("Start", CrossAxisAlignment::Start),
("Center", CrossAxisAlignment::Center),
("End", CrossAxisAlignment::End),
("Baseline", CrossAxisAlignment::Baseline),
];
const FLEX_TYPE_OPTIONS: [(&str, FlexType); 2] =
[("Row", FlexType::Row), ("Column", FlexType::Column)];
#[derive(Clone, Data, Lens)]
struct AppState {
demo_state: DemoState,
params: Params,
}
#[derive(Clone, Data, Lens)]
struct DemoState {
pub input_text: String,
pub enabled: bool,
volume: f64,
}
#[derive(Clone, Data, Lens)]
struct Params {
axis: FlexType,
cross_alignment: CrossAxisAlignment,
main_alignment: MainAxisAlignment,
fill_major_axis: bool,
debug_layout: bool,
fix_minor_axis: bool,
fix_major_axis: bool,
spacers: Spacers,
spacer_size: f64,
}
#[derive(Clone, Copy, PartialEq, Data)]
enum Spacers {
None,
Default,
Flex,
Fixed,
}
#[derive(Clone, Copy, PartialEq, Data)]
enum FlexType {
Row,
Column,
}
/// builds a child Flex widget from some paramaters.
struct Rebuilder {
inner: Box<dyn Widget<AppState>>,
}
impl Rebuilder {
fn new() -> Rebuilder {
Rebuilder {
inner: SizedBox::empty().boxed(),
}
}
fn rebuild_inner(&mut self, data: &AppState) {
self.inner = build_widget(&data.params);
}
}
impl Widget<AppState> for Rebuilder {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut AppState, env: &Env) {
self.inner.event(ctx, event, data, env)
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &AppState, env: &Env) {
if let LifeCycle::WidgetAdded = event {
self.rebuild_inner(data);
}
self.inner.lifecycle(ctx, event, data, env)
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &AppState, data: &AppState, env: &Env) {
if !old_data.params.same(&data.params) {
self.rebuild_inner(data);
ctx.children_changed();
} else |
}
fn layout(
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
data: &AppState,
env: &Env,
) -> Size {
self.inner.layout(ctx, bc, data, env)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &AppState, env: &Env) {
self.inner.paint(ctx, data, env)
}
fn id(&self) -> Option<WidgetId> {
self.inner.id()
}
}
fn make_control_row() -> impl Widget<AppState> {
Flex::row()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Type:"))
.with_default_spacer()
.with_child(RadioGroup::new(FLEX_TYPE_OPTIONS.to_vec()).lens(Params::axis)),
)
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("CrossAxis:"))
.with_default_spacer()
.with_child(
RadioGroup::new(CROSS_AXIS_ALIGNMENT_OPTIONS.to_vec())
.lens(Params::cross_alignment),
),
)
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("MainAxis:"))
.with_default_spacer()
.with_child(
RadioGroup::new(MAIN_AXIS_ALIGNMENT_OPTIONS.to_vec())
.lens(Params::main_alignment),
),
)
.with_default_spacer()
.with_child(make_spacer_select())
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Misc:"))
.with_default_spacer()
.with_child(Checkbox::new("Debug layout").lens(Params::debug_layout))
.with_default_spacer()
.with_child(Checkbox::new("Fill main axis").lens(Params::fill_major_axis))
.with_default_spacer()
.with_child(Checkbox::new("Fix minor axis size").lens(Params::fix_minor_axis))
.with_default_spacer()
.with_child(Checkbox::new("Fix major axis size").lens(Params::fix_major_axis)),
)
.padding(10.0)
.border(Color::grey(0.6), 2.0)
.rounded(5.0)
.lens(AppState::params)
}
fn make_spacer_select() -> impl Widget<Params> {
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Insert Spacers:"))
.with_default_spacer()
.with_child(RadioGroup::new(SPACER_OPTIONS.to_vec()).lens(Params::spacers))
.with_default_spacer()
.with_child(
Flex::row()
.with_child(
TextBox::new()
.with_formatter(ParseFormatter::new())
.lens(Params::spacer_size)
.fix_width(60.0),
)
.with_spacer(druid::theme::WIDGET_CONTROL_COMPONENT_PADDING)
.with_child(
Stepper::new()
.with_range(2.0, 50.0)
.with_step(2.0)
.lens(Params::spacer_size),
),
)
}
fn space_if_needed<T: Data>(flex: &mut Flex<T>, params: &Params) {
match params.spacers {
Spacers::None => (),
Spacers::Default => flex.add_default_spacer(),
Spacers::Fixed => flex.add_spacer(params.spacer_size),
Spacers::Flex => flex.add_flex_spacer(1.0),
}
}
fn build_widget(state: &Params) -> Box<dyn Widget<AppState>> {
let mut flex = match state.axis {
FlexType::Column => Flex::column(),
FlexType::Row => Flex::row(),
}
.cross_axis_alignment(state.cross_alignment)
.main_axis_alignment(state.main_alignment)
.must_fill_main_axis(state.fill_major_axis);
flex.add_child(
TextBox::new()
.with_placeholder("Sample text")
.lens(DemoState::input_text),
);
space_if_needed(&mut flex, state);
flex.add_child(
Button::new("Clear").on_click(|_ctx, data: &mut DemoState, _env| {
data.input_text.clear();
data.enabled = false;
data.volume = 0.0;
}),
);
space_if_needed(&mut flex, state);
flex.add_child(
Label::new(|data: &DemoState, _: &Env| data.input_text.clone()).with_text_size(32.0),
);
space_if_needed(&mut flex, state);
flex.add_child(Checkbox::new("Demo").lens(DemoState::enabled));
space_if_needed(&mut flex, state);
flex.add_child(Switch::new().lens(DemoState::enabled));
space_if_needed(&mut flex, state);
flex.add_child(Slider::new().lens(DemoState::volume));
space_if_needed(&mut flex, state);
flex.add_child(ProgressBar::new().lens(DemoState::volume));
space_if_needed(&mut flex, state);
flex.add_child(
Stepper::new()
.with_range(0.0, 1.0)
.with_step(0.1)
.with_wraparound(true)
.lens(DemoState::volume),
);
let mut flex = SizedBox::new(flex);
if state.fix_minor_axis {
match state.axis {
FlexType::Row => flex = flex.height(200.),
FlexType::Column => flex = flex.width(200.),
}
}
if state.fix_major_axis {
match state.axis {
FlexType::Row => flex = flex.width(600.),
FlexType::Column => flex = flex.height(300.),
}
}
let flex = flex
.padding(8.0)
.border(Color::grey(0.6), 2.0)
.rounded(5.0)
.lens(AppState::demo_state);
if state.debug_layout {
flex.debug_paint_layout().boxed()
} else {
flex.boxed()
}
}
fn make_ui() -> impl Widget<AppState> {
Flex::column()
.must_fill_main_axis(true)
.with_child(make_control_row())
.with_default_spacer()
.with_flex_child(Rebuilder::new().center(), 1.0)
.padding(10.0)
}
pub fn main() {
let main_window = WindowDesc::new(make_ui)
.window_size((720., 600.))
.with_min_size((620., 300.))
.title("Flex Container Options");
let demo_state = DemoState {
input_text: "hello".into(),
enabled: false,
volume: 0.0,
};
let params = Params {
axis: FlexType::Row,
cross_alignment: CrossAxisAlignment::Center,
main_alignment: MainAxisAlignment::Start,
debug_layout: false,
fix_minor_axis: false,
fix_major_axis: false,
spacers: Spacers::None,
spacer_size: DEFAULT_SPACER_SIZE,
fill_major_axis: false,
};
AppLauncher::with_window(main_window)
.use_simple_logger()
.launch(AppState { demo_state, params })
.expect("Failed to launch application");
}
| {
self.inner.update(ctx, old_data, data, env);
} | conditional_block |
flex.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Demonstrates alignment of children in the flex container.
//! This example showcases the full set of functionality of flex, giving you
//! knobs to change all the parameters. 99% of the time you will want to
//! hard-code these parameters, which will simplify your code considerably.
use druid::text::format::ParseFormatter;
use druid::widget::prelude::*;
use druid::widget::{
Button, Checkbox, CrossAxisAlignment, Flex, Label, MainAxisAlignment, ProgressBar, RadioGroup,
SizedBox, Slider, Stepper, Switch, TextBox, WidgetExt,
};
use druid::{AppLauncher, Color, Data, Lens, WidgetId, WindowDesc};
const DEFAULT_SPACER_SIZE: f64 = 8.;
const SPACER_OPTIONS: [(&str, Spacers); 4] = [
("None", Spacers::None),
("Default", Spacers::Default),
("Flex", Spacers::Flex),
("Fixed:", Spacers::Fixed),
];
const MAIN_AXIS_ALIGNMENT_OPTIONS: [(&str, MainAxisAlignment); 6] = [
("Start", MainAxisAlignment::Start),
("Center", MainAxisAlignment::Center),
("End", MainAxisAlignment::End),
("Between", MainAxisAlignment::SpaceBetween),
("Evenly", MainAxisAlignment::SpaceEvenly),
("Around", MainAxisAlignment::SpaceAround),
];
const CROSS_AXIS_ALIGNMENT_OPTIONS: [(&str, CrossAxisAlignment); 4] = [
("Start", CrossAxisAlignment::Start),
("Center", CrossAxisAlignment::Center),
("End", CrossAxisAlignment::End),
("Baseline", CrossAxisAlignment::Baseline),
];
const FLEX_TYPE_OPTIONS: [(&str, FlexType); 2] =
[("Row", FlexType::Row), ("Column", FlexType::Column)];
#[derive(Clone, Data, Lens)]
struct AppState {
demo_state: DemoState,
params: Params,
}
#[derive(Clone, Data, Lens)]
struct DemoState {
pub input_text: String,
pub enabled: bool,
volume: f64,
}
#[derive(Clone, Data, Lens)]
struct Params {
axis: FlexType,
cross_alignment: CrossAxisAlignment,
main_alignment: MainAxisAlignment,
fill_major_axis: bool,
debug_layout: bool,
fix_minor_axis: bool,
fix_major_axis: bool,
spacers: Spacers,
spacer_size: f64,
}
#[derive(Clone, Copy, PartialEq, Data)]
enum Spacers {
None,
Default,
Flex,
Fixed,
}
#[derive(Clone, Copy, PartialEq, Data)]
enum FlexType {
Row,
Column,
}
/// builds a child Flex widget from some paramaters.
struct Rebuilder {
inner: Box<dyn Widget<AppState>>,
}
impl Rebuilder {
fn new() -> Rebuilder {
Rebuilder {
inner: SizedBox::empty().boxed(),
}
}
fn rebuild_inner(&mut self, data: &AppState) {
self.inner = build_widget(&data.params);
}
}
impl Widget<AppState> for Rebuilder {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut AppState, env: &Env) {
self.inner.event(ctx, event, data, env)
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &AppState, env: &Env) {
if let LifeCycle::WidgetAdded = event {
self.rebuild_inner(data);
}
self.inner.lifecycle(ctx, event, data, env)
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &AppState, data: &AppState, env: &Env) {
if !old_data.params.same(&data.params) {
self.rebuild_inner(data);
ctx.children_changed();
} else {
self.inner.update(ctx, old_data, data, env);
}
}
fn | (
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
data: &AppState,
env: &Env,
) -> Size {
self.inner.layout(ctx, bc, data, env)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &AppState, env: &Env) {
self.inner.paint(ctx, data, env)
}
fn id(&self) -> Option<WidgetId> {
self.inner.id()
}
}
fn make_control_row() -> impl Widget<AppState> {
Flex::row()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Type:"))
.with_default_spacer()
.with_child(RadioGroup::new(FLEX_TYPE_OPTIONS.to_vec()).lens(Params::axis)),
)
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("CrossAxis:"))
.with_default_spacer()
.with_child(
RadioGroup::new(CROSS_AXIS_ALIGNMENT_OPTIONS.to_vec())
.lens(Params::cross_alignment),
),
)
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("MainAxis:"))
.with_default_spacer()
.with_child(
RadioGroup::new(MAIN_AXIS_ALIGNMENT_OPTIONS.to_vec())
.lens(Params::main_alignment),
),
)
.with_default_spacer()
.with_child(make_spacer_select())
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Misc:"))
.with_default_spacer()
.with_child(Checkbox::new("Debug layout").lens(Params::debug_layout))
.with_default_spacer()
.with_child(Checkbox::new("Fill main axis").lens(Params::fill_major_axis))
.with_default_spacer()
.with_child(Checkbox::new("Fix minor axis size").lens(Params::fix_minor_axis))
.with_default_spacer()
.with_child(Checkbox::new("Fix major axis size").lens(Params::fix_major_axis)),
)
.padding(10.0)
.border(Color::grey(0.6), 2.0)
.rounded(5.0)
.lens(AppState::params)
}
fn make_spacer_select() -> impl Widget<Params> {
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Insert Spacers:"))
.with_default_spacer()
.with_child(RadioGroup::new(SPACER_OPTIONS.to_vec()).lens(Params::spacers))
.with_default_spacer()
.with_child(
Flex::row()
.with_child(
TextBox::new()
.with_formatter(ParseFormatter::new())
.lens(Params::spacer_size)
.fix_width(60.0),
)
.with_spacer(druid::theme::WIDGET_CONTROL_COMPONENT_PADDING)
.with_child(
Stepper::new()
.with_range(2.0, 50.0)
.with_step(2.0)
.lens(Params::spacer_size),
),
)
}
fn space_if_needed<T: Data>(flex: &mut Flex<T>, params: &Params) {
match params.spacers {
Spacers::None => (),
Spacers::Default => flex.add_default_spacer(),
Spacers::Fixed => flex.add_spacer(params.spacer_size),
Spacers::Flex => flex.add_flex_spacer(1.0),
}
}
fn build_widget(state: &Params) -> Box<dyn Widget<AppState>> {
let mut flex = match state.axis {
FlexType::Column => Flex::column(),
FlexType::Row => Flex::row(),
}
.cross_axis_alignment(state.cross_alignment)
.main_axis_alignment(state.main_alignment)
.must_fill_main_axis(state.fill_major_axis);
flex.add_child(
TextBox::new()
.with_placeholder("Sample text")
.lens(DemoState::input_text),
);
space_if_needed(&mut flex, state);
flex.add_child(
Button::new("Clear").on_click(|_ctx, data: &mut DemoState, _env| {
data.input_text.clear();
data.enabled = false;
data.volume = 0.0;
}),
);
space_if_needed(&mut flex, state);
flex.add_child(
Label::new(|data: &DemoState, _: &Env| data.input_text.clone()).with_text_size(32.0),
);
space_if_needed(&mut flex, state);
flex.add_child(Checkbox::new("Demo").lens(DemoState::enabled));
space_if_needed(&mut flex, state);
flex.add_child(Switch::new().lens(DemoState::enabled));
space_if_needed(&mut flex, state);
flex.add_child(Slider::new().lens(DemoState::volume));
space_if_needed(&mut flex, state);
flex.add_child(ProgressBar::new().lens(DemoState::volume));
space_if_needed(&mut flex, state);
flex.add_child(
Stepper::new()
.with_range(0.0, 1.0)
.with_step(0.1)
.with_wraparound(true)
.lens(DemoState::volume),
);
let mut flex = SizedBox::new(flex);
if state.fix_minor_axis {
match state.axis {
FlexType::Row => flex = flex.height(200.),
FlexType::Column => flex = flex.width(200.),
}
}
if state.fix_major_axis {
match state.axis {
FlexType::Row => flex = flex.width(600.),
FlexType::Column => flex = flex.height(300.),
}
}
let flex = flex
.padding(8.0)
.border(Color::grey(0.6), 2.0)
.rounded(5.0)
.lens(AppState::demo_state);
if state.debug_layout {
flex.debug_paint_layout().boxed()
} else {
flex.boxed()
}
}
fn make_ui() -> impl Widget<AppState> {
Flex::column()
.must_fill_main_axis(true)
.with_child(make_control_row())
.with_default_spacer()
.with_flex_child(Rebuilder::new().center(), 1.0)
.padding(10.0)
}
pub fn main() {
let main_window = WindowDesc::new(make_ui)
.window_size((720., 600.))
.with_min_size((620., 300.))
.title("Flex Container Options");
let demo_state = DemoState {
input_text: "hello".into(),
enabled: false,
volume: 0.0,
};
let params = Params {
axis: FlexType::Row,
cross_alignment: CrossAxisAlignment::Center,
main_alignment: MainAxisAlignment::Start,
debug_layout: false,
fix_minor_axis: false,
fix_major_axis: false,
spacers: Spacers::None,
spacer_size: DEFAULT_SPACER_SIZE,
fill_major_axis: false,
};
AppLauncher::with_window(main_window)
.use_simple_logger()
.launch(AppState { demo_state, params })
.expect("Failed to launch application");
}
| layout | identifier_name |
flex.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Demonstrates alignment of children in the flex container.
//! This example showcases the full set of functionality of flex, giving you
//! knobs to change all the parameters. 99% of the time you will want to
//! hard-code these parameters, which will simplify your code considerably.
use druid::text::format::ParseFormatter;
use druid::widget::prelude::*;
use druid::widget::{
Button, Checkbox, CrossAxisAlignment, Flex, Label, MainAxisAlignment, ProgressBar, RadioGroup,
SizedBox, Slider, Stepper, Switch, TextBox, WidgetExt,
};
use druid::{AppLauncher, Color, Data, Lens, WidgetId, WindowDesc};
const DEFAULT_SPACER_SIZE: f64 = 8.;
const SPACER_OPTIONS: [(&str, Spacers); 4] = [
("None", Spacers::None),
("Default", Spacers::Default),
("Flex", Spacers::Flex), | const MAIN_AXIS_ALIGNMENT_OPTIONS: [(&str, MainAxisAlignment); 6] = [
("Start", MainAxisAlignment::Start),
("Center", MainAxisAlignment::Center),
("End", MainAxisAlignment::End),
("Between", MainAxisAlignment::SpaceBetween),
("Evenly", MainAxisAlignment::SpaceEvenly),
("Around", MainAxisAlignment::SpaceAround),
];
const CROSS_AXIS_ALIGNMENT_OPTIONS: [(&str, CrossAxisAlignment); 4] = [
("Start", CrossAxisAlignment::Start),
("Center", CrossAxisAlignment::Center),
("End", CrossAxisAlignment::End),
("Baseline", CrossAxisAlignment::Baseline),
];
const FLEX_TYPE_OPTIONS: [(&str, FlexType); 2] =
[("Row", FlexType::Row), ("Column", FlexType::Column)];
#[derive(Clone, Data, Lens)]
struct AppState {
demo_state: DemoState,
params: Params,
}
#[derive(Clone, Data, Lens)]
struct DemoState {
pub input_text: String,
pub enabled: bool,
volume: f64,
}
#[derive(Clone, Data, Lens)]
struct Params {
axis: FlexType,
cross_alignment: CrossAxisAlignment,
main_alignment: MainAxisAlignment,
fill_major_axis: bool,
debug_layout: bool,
fix_minor_axis: bool,
fix_major_axis: bool,
spacers: Spacers,
spacer_size: f64,
}
#[derive(Clone, Copy, PartialEq, Data)]
enum Spacers {
None,
Default,
Flex,
Fixed,
}
#[derive(Clone, Copy, PartialEq, Data)]
enum FlexType {
Row,
Column,
}
/// builds a child Flex widget from some paramaters.
struct Rebuilder {
inner: Box<dyn Widget<AppState>>,
}
impl Rebuilder {
fn new() -> Rebuilder {
Rebuilder {
inner: SizedBox::empty().boxed(),
}
}
fn rebuild_inner(&mut self, data: &AppState) {
self.inner = build_widget(&data.params);
}
}
impl Widget<AppState> for Rebuilder {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut AppState, env: &Env) {
self.inner.event(ctx, event, data, env)
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &AppState, env: &Env) {
if let LifeCycle::WidgetAdded = event {
self.rebuild_inner(data);
}
self.inner.lifecycle(ctx, event, data, env)
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &AppState, data: &AppState, env: &Env) {
if !old_data.params.same(&data.params) {
self.rebuild_inner(data);
ctx.children_changed();
} else {
self.inner.update(ctx, old_data, data, env);
}
}
fn layout(
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
data: &AppState,
env: &Env,
) -> Size {
self.inner.layout(ctx, bc, data, env)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &AppState, env: &Env) {
self.inner.paint(ctx, data, env)
}
fn id(&self) -> Option<WidgetId> {
self.inner.id()
}
}
fn make_control_row() -> impl Widget<AppState> {
Flex::row()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Type:"))
.with_default_spacer()
.with_child(RadioGroup::new(FLEX_TYPE_OPTIONS.to_vec()).lens(Params::axis)),
)
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("CrossAxis:"))
.with_default_spacer()
.with_child(
RadioGroup::new(CROSS_AXIS_ALIGNMENT_OPTIONS.to_vec())
.lens(Params::cross_alignment),
),
)
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("MainAxis:"))
.with_default_spacer()
.with_child(
RadioGroup::new(MAIN_AXIS_ALIGNMENT_OPTIONS.to_vec())
.lens(Params::main_alignment),
),
)
.with_default_spacer()
.with_child(make_spacer_select())
.with_default_spacer()
.with_child(
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Misc:"))
.with_default_spacer()
.with_child(Checkbox::new("Debug layout").lens(Params::debug_layout))
.with_default_spacer()
.with_child(Checkbox::new("Fill main axis").lens(Params::fill_major_axis))
.with_default_spacer()
.with_child(Checkbox::new("Fix minor axis size").lens(Params::fix_minor_axis))
.with_default_spacer()
.with_child(Checkbox::new("Fix major axis size").lens(Params::fix_major_axis)),
)
.padding(10.0)
.border(Color::grey(0.6), 2.0)
.rounded(5.0)
.lens(AppState::params)
}
fn make_spacer_select() -> impl Widget<Params> {
Flex::column()
.cross_axis_alignment(CrossAxisAlignment::Start)
.with_child(Label::new("Insert Spacers:"))
.with_default_spacer()
.with_child(RadioGroup::new(SPACER_OPTIONS.to_vec()).lens(Params::spacers))
.with_default_spacer()
.with_child(
Flex::row()
.with_child(
TextBox::new()
.with_formatter(ParseFormatter::new())
.lens(Params::spacer_size)
.fix_width(60.0),
)
.with_spacer(druid::theme::WIDGET_CONTROL_COMPONENT_PADDING)
.with_child(
Stepper::new()
.with_range(2.0, 50.0)
.with_step(2.0)
.lens(Params::spacer_size),
),
)
}
fn space_if_needed<T: Data>(flex: &mut Flex<T>, params: &Params) {
match params.spacers {
Spacers::None => (),
Spacers::Default => flex.add_default_spacer(),
Spacers::Fixed => flex.add_spacer(params.spacer_size),
Spacers::Flex => flex.add_flex_spacer(1.0),
}
}
fn build_widget(state: &Params) -> Box<dyn Widget<AppState>> {
let mut flex = match state.axis {
FlexType::Column => Flex::column(),
FlexType::Row => Flex::row(),
}
.cross_axis_alignment(state.cross_alignment)
.main_axis_alignment(state.main_alignment)
.must_fill_main_axis(state.fill_major_axis);
flex.add_child(
TextBox::new()
.with_placeholder("Sample text")
.lens(DemoState::input_text),
);
space_if_needed(&mut flex, state);
flex.add_child(
Button::new("Clear").on_click(|_ctx, data: &mut DemoState, _env| {
data.input_text.clear();
data.enabled = false;
data.volume = 0.0;
}),
);
space_if_needed(&mut flex, state);
flex.add_child(
Label::new(|data: &DemoState, _: &Env| data.input_text.clone()).with_text_size(32.0),
);
space_if_needed(&mut flex, state);
flex.add_child(Checkbox::new("Demo").lens(DemoState::enabled));
space_if_needed(&mut flex, state);
flex.add_child(Switch::new().lens(DemoState::enabled));
space_if_needed(&mut flex, state);
flex.add_child(Slider::new().lens(DemoState::volume));
space_if_needed(&mut flex, state);
flex.add_child(ProgressBar::new().lens(DemoState::volume));
space_if_needed(&mut flex, state);
flex.add_child(
Stepper::new()
.with_range(0.0, 1.0)
.with_step(0.1)
.with_wraparound(true)
.lens(DemoState::volume),
);
let mut flex = SizedBox::new(flex);
if state.fix_minor_axis {
match state.axis {
FlexType::Row => flex = flex.height(200.),
FlexType::Column => flex = flex.width(200.),
}
}
if state.fix_major_axis {
match state.axis {
FlexType::Row => flex = flex.width(600.),
FlexType::Column => flex = flex.height(300.),
}
}
let flex = flex
.padding(8.0)
.border(Color::grey(0.6), 2.0)
.rounded(5.0)
.lens(AppState::demo_state);
if state.debug_layout {
flex.debug_paint_layout().boxed()
} else {
flex.boxed()
}
}
fn make_ui() -> impl Widget<AppState> {
Flex::column()
.must_fill_main_axis(true)
.with_child(make_control_row())
.with_default_spacer()
.with_flex_child(Rebuilder::new().center(), 1.0)
.padding(10.0)
}
pub fn main() {
let main_window = WindowDesc::new(make_ui)
.window_size((720., 600.))
.with_min_size((620., 300.))
.title("Flex Container Options");
let demo_state = DemoState {
input_text: "hello".into(),
enabled: false,
volume: 0.0,
};
let params = Params {
axis: FlexType::Row,
cross_alignment: CrossAxisAlignment::Center,
main_alignment: MainAxisAlignment::Start,
debug_layout: false,
fix_minor_axis: false,
fix_major_axis: false,
spacers: Spacers::None,
spacer_size: DEFAULT_SPACER_SIZE,
fill_major_axis: false,
};
AppLauncher::with_window(main_window)
.use_simple_logger()
.launch(AppState { demo_state, params })
.expect("Failed to launch application");
} | ("Fixed:", Spacers::Fixed),
]; | random_line_split |
fona_3g.py | # SPDX-FileCopyrightText: Limor Fried/Ladyada for Adafruit Industries
# SPDX-FileCopyrightText: 2020 Brent Rubell for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`fona_3g`
================================================================================
FONA3G cellular module instance.
* Author(s): ladyada, Brent Rubell
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from micropython import const
from .adafruit_fona import FONA, REPLY_OK
try:
from typing import Optional, Tuple, Union
from busio import UART
from digitalio import DigitalInOut
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
except ImportError:
pass
__version__ = "0.0.0+auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_FONA.git"
FONA_MAX_SOCKETS = const(10)
class FONA3G(FONA):
"""FONA 3G module interface.
:param ~busio.UART uart: FONA UART connection.
:param ~digitalio.DigitalInOut rst: FONA RST pin.
:param ~digitalio.DigitalInOut ri: Optional FONA Ring Interrupt (RI) pin.
:param bool debug: Enable debugging output.
"""
def __init__(
self,
uart: UART,
rst: DigitalInOut,
ri: Optional[DigitalInOut] = None,
debug: bool = False,
) -> None:
uart.baudrate = 4800
super().__init__(uart, rst, ri, debug)
def set_baudrate(self, baudrate: int) -> bool:
"""Sets the FONA's UART baudrate."""
if not self._send_check_reply(
b"AT+IPREX=" + str(baudrate).encode(), reply=REPLY_OK
):
return False
return True
@property
def gps(self) -> bool:
"""Module's GPS status."""
if not self._send_check_reply(b"AT+CGPS?", reply=b"+CGPS: 1,1"):
return False
return True
@gps.setter
def gps(self, gps_on: bool = False) -> bool:
# check if GPS is already enabled
if not self._send_parse_reply(b"AT+CGPS?", b"+CGPS: "):
return False
state = self._buf
if gps_on and not state:
self._read_line()
if not self._send_check_reply(b"AT+CGPS=1", reply=REPLY_OK):
return False
else:
if not self._send_check_reply(b"AT+CGPS=0", reply=REPLY_OK):
return False
self._read_line(2000) # eat '+CGPS: 0'
return True
@property
def ue_system_info(self) -> bool:
"""UE System status."""
self._send_parse_reply(b"AT+CPSI?\r\n", b"+CPSI: ")
if not self._buf == "GSM" or self._buf == "WCDMA": # 5.15
return False
return True
@property
def local_ip(self) -> Optional[str]:
"""Module's local IP address, None if not set."""
if not self._send_parse_reply(b"AT+IPADDR", b"+IPADDR:"):
return None
return self._buf
# pylint: disable=too-many-return-statements
def set_gprs(
self,
apn: Optional[Tuple[str, Optional[str], Optional[str]]] = None,
enable: bool = True,
) -> bool:
"""Configures and brings up GPRS.
:param tuple apn: APN configuration settings
:param bool enable: Enables or disables GPRS.
"""
if enable:
if not self._send_check_reply(b"AT+CGATT=1", reply=REPLY_OK, timeout=10000):
return False
if apn is not None: # Configure APN
apn_name, apn_user, apn_pass = apn
if not self._send_check_reply_quoted(
b'AT+CGSOCKCONT=1,"IP",', apn_name.encode(), REPLY_OK, 10000
):
return False
if apn_user is not None:
self._uart_write(b"AT+CGAUTH=1,1,")
self._uart_write(b'"' + apn_pass.encode() + b'"')
self._uart_write(b',"' + apn_user.encode() + b'"\r\n')
if not self._get_reply(REPLY_OK, timeout=10000):
return False
# Enable PDP Context
if not self._send_check_reply(
b"AT+CIPMODE=1", reply=REPLY_OK, timeout=10000
): # Transparent mode
return False
# Open network
if not self._send_check_reply(
b"AT+NETOPEN=,,1", reply=b"Network opened", timeout=120000
):
return False
self._read_line()
if not self.local_ip:
return True
else:
# reset PDP state
if not self._send_check_reply(
b"AT+NETCLOSE", reply=b"Network closed", timeout=20000
):
return False
return True
### Socket API (TCP, UDP) ###
@property
def tx_timeout(self) -> bool:
"""CIPSEND timeout, in milliseconds."""
self._read_line()
if not self._send_parse_reply(b"AT+CIPTIMEOUT?", b"+CIPTIMEOUT:", idx=2):
return False
return True
@tx_timeout.setter
def tx_timeout(self, timeout: int) -> bool:
self._read_line()
if not self._send_check_reply(
b"AT+CIPTIMEOUT=" + str(timeout).encode(), reply=REPLY_OK
):
return False
return True
def get_host_by_name(self, hostname: str) -> Union[str, Literal[False]]:
"""Converts a hostname to a 4-byte IP address.
:param str hostname: Domain name.
"""
self._read_line()
if self._debug:
print("*** Get host by name")
if isinstance(hostname, str):
hostname = bytes(hostname, "utf-8")
self._uart_write(b'AT+CDNSGIP="' + hostname + b'"\r\n')
self._read_line(10000) # Read the +CDNSGIP, takes a while
if not self._parse_reply(b"+CDNSGIP: ", idx=2):
return False
return self._buf
def get_socket(self) -> int:
"""Returns an unused socket."""
if self._debug:
print("*** Get socket")
self._read_line()
self._uart_write(b"AT+CIPOPEN?\r\n") # Query which sockets are busy
socket = 0
for socket in range(0, FONA_MAX_SOCKETS):
self._read_line(120000)
try: # SIMCOM5320 lacks a socket connection status, this is a workaround
self._parse_reply(b"+CIPOPEN: ", idx=1)
except IndexError:
break
for _ in range(socket, FONA_MAX_SOCKETS):
self._read_line() # eat the rest of '+CIPOPEN' responses
if self._debug:
print("Allocated socket #%d" % socket)
return socket
def socket_connect(
self, sock_num: int, dest: str, port: int, conn_mode: int = 0
) -> bool:
"""Connects to a destination IP address or hostname.
By default, we use conn_mode TCP_MODE but we may also use UDP_MODE.
:param int sock_num: Desired socket number
:param str dest: Destination dest address.
:param int port: Destination dest port.
:param int conn_mode: Connection mode (TCP/UDP)
"""
if self._debug:
print(
"*** Socket connect, protocol={}, port={}, ip={}".format(
conn_mode, port, dest
)
)
self._uart.reset_input_buffer()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._send_check_reply(b"AT+CIPHEAD=0", reply=REPLY_OK) # do not show ip header
self._send_check_reply(
b"AT+CIPSRIP=0", reply=REPLY_OK
) # do not show remote ip/port
self._send_check_reply(b"AT+CIPRXGET=1", reply=REPLY_OK) # manually get data
self._uart_write(b"AT+CIPOPEN=" + str(sock_num).encode())
if conn_mode == 0:
self._uart_write(b',"TCP","')
else:
self._uart_write(b',"UDP","')
self._uart_write(dest.encode() + b'",' + str(port).encode() + b"\r\n")
if not self._expect_reply(b"Connect ok"):
return False
return True
def | (self, sock_num: int) -> str:
"""Returns the IP address of the remote connection.
:param int sock_num: Desired socket number
"""
self._read_line()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._uart_write(b"AT+CIPOPEN?\r\n")
for _ in range(0, sock_num + 1):
self._read_line()
self._parse_reply(b"+CIPOPEN:", idx=2)
ip_addr = self._buf
for _ in range(sock_num, FONA_MAX_SOCKETS):
self._read_line() # eat the rest of '+CIPOPEN' responses
return ip_addr
def socket_write(self, sock_num: int, buffer: bytes, timeout: int = 120000) -> bool:
"""Writes len(buffer) bytes to the socket.
:param int sock_num: Desired socket number to write to.
:param bytes buffer: Bytes to write to socket.
:param int timeout: Socket write timeout, in milliseconds. Defaults to 120000ms.
"""
self._read_line()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._uart.reset_input_buffer()
self._uart_write(
b"AT+CIPSEND="
+ str(sock_num).encode()
+ b","
+ str(len(buffer)).encode()
+ b"\r\n"
)
self._read_line()
if self._buf[0] != 62:
# promoting mark ('>') not found
return False
self._uart_write(buffer + b"\r\n")
self._read_line() # eat 'OK'
self._read_line(3000) # expect +CIPSEND: rx,tx
if not self._parse_reply(b"+CIPSEND:", idx=1):
return False
if not self._buf == len(buffer): # assert data sent == buffer size
return False
self._read_line(timeout)
if "Send ok" not in self._buf.decode():
return False
return True
def socket_status(self, sock_num: int) -> bool:
"""Returns socket status, True if connected. False otherwise.
:param int sock_num: Desired socket number.
"""
if not self._send_parse_reply(b"AT+CIPCLOSE?", b"+CIPCLOSE:", idx=sock_num):
return False
if not self._buf == 1:
return False
return True
| remote_ip | identifier_name |
fona_3g.py | # SPDX-FileCopyrightText: Limor Fried/Ladyada for Adafruit Industries
# SPDX-FileCopyrightText: 2020 Brent Rubell for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`fona_3g`
================================================================================
FONA3G cellular module instance.
* Author(s): ladyada, Brent Rubell
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from micropython import const
from .adafruit_fona import FONA, REPLY_OK
try:
from typing import Optional, Tuple, Union
from busio import UART
from digitalio import DigitalInOut
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
except ImportError:
pass
__version__ = "0.0.0+auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_FONA.git"
FONA_MAX_SOCKETS = const(10)
class FONA3G(FONA):
"""FONA 3G module interface.
:param ~busio.UART uart: FONA UART connection.
:param ~digitalio.DigitalInOut rst: FONA RST pin.
:param ~digitalio.DigitalInOut ri: Optional FONA Ring Interrupt (RI) pin.
:param bool debug: Enable debugging output.
"""
def __init__(
self,
uart: UART,
rst: DigitalInOut,
ri: Optional[DigitalInOut] = None,
debug: bool = False,
) -> None:
uart.baudrate = 4800
super().__init__(uart, rst, ri, debug)
def set_baudrate(self, baudrate: int) -> bool:
"""Sets the FONA's UART baudrate."""
if not self._send_check_reply(
b"AT+IPREX=" + str(baudrate).encode(), reply=REPLY_OK
):
return False
return True
@property
def gps(self) -> bool:
"""Module's GPS status."""
if not self._send_check_reply(b"AT+CGPS?", reply=b"+CGPS: 1,1"):
return False
return True
@gps.setter
def gps(self, gps_on: bool = False) -> bool:
# check if GPS is already enabled
if not self._send_parse_reply(b"AT+CGPS?", b"+CGPS: "):
|
state = self._buf
if gps_on and not state:
self._read_line()
if not self._send_check_reply(b"AT+CGPS=1", reply=REPLY_OK):
return False
else:
if not self._send_check_reply(b"AT+CGPS=0", reply=REPLY_OK):
return False
self._read_line(2000) # eat '+CGPS: 0'
return True
@property
def ue_system_info(self) -> bool:
"""UE System status."""
self._send_parse_reply(b"AT+CPSI?\r\n", b"+CPSI: ")
if not self._buf == "GSM" or self._buf == "WCDMA": # 5.15
return False
return True
@property
def local_ip(self) -> Optional[str]:
"""Module's local IP address, None if not set."""
if not self._send_parse_reply(b"AT+IPADDR", b"+IPADDR:"):
return None
return self._buf
# pylint: disable=too-many-return-statements
def set_gprs(
self,
apn: Optional[Tuple[str, Optional[str], Optional[str]]] = None,
enable: bool = True,
) -> bool:
"""Configures and brings up GPRS.
:param tuple apn: APN configuration settings
:param bool enable: Enables or disables GPRS.
"""
if enable:
if not self._send_check_reply(b"AT+CGATT=1", reply=REPLY_OK, timeout=10000):
return False
if apn is not None: # Configure APN
apn_name, apn_user, apn_pass = apn
if not self._send_check_reply_quoted(
b'AT+CGSOCKCONT=1,"IP",', apn_name.encode(), REPLY_OK, 10000
):
return False
if apn_user is not None:
self._uart_write(b"AT+CGAUTH=1,1,")
self._uart_write(b'"' + apn_pass.encode() + b'"')
self._uart_write(b',"' + apn_user.encode() + b'"\r\n')
if not self._get_reply(REPLY_OK, timeout=10000):
return False
# Enable PDP Context
if not self._send_check_reply(
b"AT+CIPMODE=1", reply=REPLY_OK, timeout=10000
): # Transparent mode
return False
# Open network
if not self._send_check_reply(
b"AT+NETOPEN=,,1", reply=b"Network opened", timeout=120000
):
return False
self._read_line()
if not self.local_ip:
return True
else:
# reset PDP state
if not self._send_check_reply(
b"AT+NETCLOSE", reply=b"Network closed", timeout=20000
):
return False
return True
### Socket API (TCP, UDP) ###
@property
def tx_timeout(self) -> bool:
"""CIPSEND timeout, in milliseconds."""
self._read_line()
if not self._send_parse_reply(b"AT+CIPTIMEOUT?", b"+CIPTIMEOUT:", idx=2):
return False
return True
@tx_timeout.setter
def tx_timeout(self, timeout: int) -> bool:
self._read_line()
if not self._send_check_reply(
b"AT+CIPTIMEOUT=" + str(timeout).encode(), reply=REPLY_OK
):
return False
return True
def get_host_by_name(self, hostname: str) -> Union[str, Literal[False]]:
"""Converts a hostname to a 4-byte IP address.
:param str hostname: Domain name.
"""
self._read_line()
if self._debug:
print("*** Get host by name")
if isinstance(hostname, str):
hostname = bytes(hostname, "utf-8")
self._uart_write(b'AT+CDNSGIP="' + hostname + b'"\r\n')
self._read_line(10000) # Read the +CDNSGIP, takes a while
if not self._parse_reply(b"+CDNSGIP: ", idx=2):
return False
return self._buf
def get_socket(self) -> int:
"""Returns an unused socket."""
if self._debug:
print("*** Get socket")
self._read_line()
self._uart_write(b"AT+CIPOPEN?\r\n") # Query which sockets are busy
socket = 0
for socket in range(0, FONA_MAX_SOCKETS):
self._read_line(120000)
try: # SIMCOM5320 lacks a socket connection status, this is a workaround
self._parse_reply(b"+CIPOPEN: ", idx=1)
except IndexError:
break
for _ in range(socket, FONA_MAX_SOCKETS):
self._read_line() # eat the rest of '+CIPOPEN' responses
if self._debug:
print("Allocated socket #%d" % socket)
return socket
def socket_connect(
self, sock_num: int, dest: str, port: int, conn_mode: int = 0
) -> bool:
"""Connects to a destination IP address or hostname.
By default, we use conn_mode TCP_MODE but we may also use UDP_MODE.
:param int sock_num: Desired socket number
:param str dest: Destination dest address.
:param int port: Destination dest port.
:param int conn_mode: Connection mode (TCP/UDP)
"""
if self._debug:
print(
"*** Socket connect, protocol={}, port={}, ip={}".format(
conn_mode, port, dest
)
)
self._uart.reset_input_buffer()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._send_check_reply(b"AT+CIPHEAD=0", reply=REPLY_OK) # do not show ip header
self._send_check_reply(
b"AT+CIPSRIP=0", reply=REPLY_OK
) # do not show remote ip/port
self._send_check_reply(b"AT+CIPRXGET=1", reply=REPLY_OK) # manually get data
self._uart_write(b"AT+CIPOPEN=" + str(sock_num).encode())
if conn_mode == 0:
self._uart_write(b',"TCP","')
else:
self._uart_write(b',"UDP","')
self._uart_write(dest.encode() + b'",' + str(port).encode() + b"\r\n")
if not self._expect_reply(b"Connect ok"):
return False
return True
def remote_ip(self, sock_num: int) -> str:
"""Returns the IP address of the remote connection.
:param int sock_num: Desired socket number
"""
self._read_line()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._uart_write(b"AT+CIPOPEN?\r\n")
for _ in range(0, sock_num + 1):
self._read_line()
self._parse_reply(b"+CIPOPEN:", idx=2)
ip_addr = self._buf
for _ in range(sock_num, FONA_MAX_SOCKETS):
self._read_line() # eat the rest of '+CIPOPEN' responses
return ip_addr
def socket_write(self, sock_num: int, buffer: bytes, timeout: int = 120000) -> bool:
"""Writes len(buffer) bytes to the socket.
:param int sock_num: Desired socket number to write to.
:param bytes buffer: Bytes to write to socket.
:param int timeout: Socket write timeout, in milliseconds. Defaults to 120000ms.
"""
self._read_line()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._uart.reset_input_buffer()
self._uart_write(
b"AT+CIPSEND="
+ str(sock_num).encode()
+ b","
+ str(len(buffer)).encode()
+ b"\r\n"
)
self._read_line()
if self._buf[0] != 62:
# promoting mark ('>') not found
return False
self._uart_write(buffer + b"\r\n")
self._read_line() # eat 'OK'
self._read_line(3000) # expect +CIPSEND: rx,tx
if not self._parse_reply(b"+CIPSEND:", idx=1):
return False
if not self._buf == len(buffer): # assert data sent == buffer size
return False
self._read_line(timeout)
if "Send ok" not in self._buf.decode():
return False
return True
def socket_status(self, sock_num: int) -> bool:
"""Returns socket status, True if connected. False otherwise.
:param int sock_num: Desired socket number.
"""
if not self._send_parse_reply(b"AT+CIPCLOSE?", b"+CIPCLOSE:", idx=sock_num):
return False
if not self._buf == 1:
return False
return True
| return False | conditional_block |
fona_3g.py | # SPDX-FileCopyrightText: Limor Fried/Ladyada for Adafruit Industries
# SPDX-FileCopyrightText: 2020 Brent Rubell for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`fona_3g`
================================================================================
FONA3G cellular module instance.
* Author(s): ladyada, Brent Rubell
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from micropython import const
from .adafruit_fona import FONA, REPLY_OK
try:
from typing import Optional, Tuple, Union
from busio import UART
from digitalio import DigitalInOut
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
except ImportError:
pass
__version__ = "0.0.0+auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_FONA.git"
FONA_MAX_SOCKETS = const(10)
class FONA3G(FONA):
| """FONA 3G module interface.
:param ~busio.UART uart: FONA UART connection.
:param ~digitalio.DigitalInOut rst: FONA RST pin.
:param ~digitalio.DigitalInOut ri: Optional FONA Ring Interrupt (RI) pin.
:param bool debug: Enable debugging output.
"""
def __init__(
self,
uart: UART,
rst: DigitalInOut,
ri: Optional[DigitalInOut] = None,
debug: bool = False,
) -> None:
uart.baudrate = 4800
super().__init__(uart, rst, ri, debug)
def set_baudrate(self, baudrate: int) -> bool:
"""Sets the FONA's UART baudrate."""
if not self._send_check_reply(
b"AT+IPREX=" + str(baudrate).encode(), reply=REPLY_OK
):
return False
return True
@property
def gps(self) -> bool:
"""Module's GPS status."""
if not self._send_check_reply(b"AT+CGPS?", reply=b"+CGPS: 1,1"):
return False
return True
@gps.setter
def gps(self, gps_on: bool = False) -> bool:
# check if GPS is already enabled
if not self._send_parse_reply(b"AT+CGPS?", b"+CGPS: "):
return False
state = self._buf
if gps_on and not state:
self._read_line()
if not self._send_check_reply(b"AT+CGPS=1", reply=REPLY_OK):
return False
else:
if not self._send_check_reply(b"AT+CGPS=0", reply=REPLY_OK):
return False
self._read_line(2000) # eat '+CGPS: 0'
return True
@property
def ue_system_info(self) -> bool:
"""UE System status."""
self._send_parse_reply(b"AT+CPSI?\r\n", b"+CPSI: ")
if not self._buf == "GSM" or self._buf == "WCDMA": # 5.15
return False
return True
@property
def local_ip(self) -> Optional[str]:
"""Module's local IP address, None if not set."""
if not self._send_parse_reply(b"AT+IPADDR", b"+IPADDR:"):
return None
return self._buf
# pylint: disable=too-many-return-statements
def set_gprs(
self,
apn: Optional[Tuple[str, Optional[str], Optional[str]]] = None,
enable: bool = True,
) -> bool:
"""Configures and brings up GPRS.
:param tuple apn: APN configuration settings
:param bool enable: Enables or disables GPRS.
"""
if enable:
if not self._send_check_reply(b"AT+CGATT=1", reply=REPLY_OK, timeout=10000):
return False
if apn is not None: # Configure APN
apn_name, apn_user, apn_pass = apn
if not self._send_check_reply_quoted(
b'AT+CGSOCKCONT=1,"IP",', apn_name.encode(), REPLY_OK, 10000
):
return False
if apn_user is not None:
self._uart_write(b"AT+CGAUTH=1,1,")
self._uart_write(b'"' + apn_pass.encode() + b'"')
self._uart_write(b',"' + apn_user.encode() + b'"\r\n')
if not self._get_reply(REPLY_OK, timeout=10000):
return False
# Enable PDP Context
if not self._send_check_reply(
b"AT+CIPMODE=1", reply=REPLY_OK, timeout=10000
): # Transparent mode
return False
# Open network
if not self._send_check_reply(
b"AT+NETOPEN=,,1", reply=b"Network opened", timeout=120000
):
return False
self._read_line()
if not self.local_ip:
return True
else:
# reset PDP state
if not self._send_check_reply(
b"AT+NETCLOSE", reply=b"Network closed", timeout=20000
):
return False
return True
### Socket API (TCP, UDP) ###
@property
def tx_timeout(self) -> bool:
"""CIPSEND timeout, in milliseconds."""
self._read_line()
if not self._send_parse_reply(b"AT+CIPTIMEOUT?", b"+CIPTIMEOUT:", idx=2):
return False
return True
@tx_timeout.setter
def tx_timeout(self, timeout: int) -> bool:
self._read_line()
if not self._send_check_reply(
b"AT+CIPTIMEOUT=" + str(timeout).encode(), reply=REPLY_OK
):
return False
return True
def get_host_by_name(self, hostname: str) -> Union[str, Literal[False]]:
"""Converts a hostname to a 4-byte IP address.
:param str hostname: Domain name.
"""
self._read_line()
if self._debug:
print("*** Get host by name")
if isinstance(hostname, str):
hostname = bytes(hostname, "utf-8")
self._uart_write(b'AT+CDNSGIP="' + hostname + b'"\r\n')
self._read_line(10000) # Read the +CDNSGIP, takes a while
if not self._parse_reply(b"+CDNSGIP: ", idx=2):
return False
return self._buf
def get_socket(self) -> int:
"""Returns an unused socket."""
if self._debug:
print("*** Get socket")
self._read_line()
self._uart_write(b"AT+CIPOPEN?\r\n") # Query which sockets are busy
socket = 0
for socket in range(0, FONA_MAX_SOCKETS):
self._read_line(120000)
try: # SIMCOM5320 lacks a socket connection status, this is a workaround
self._parse_reply(b"+CIPOPEN: ", idx=1)
except IndexError:
break
for _ in range(socket, FONA_MAX_SOCKETS):
self._read_line() # eat the rest of '+CIPOPEN' responses
if self._debug:
print("Allocated socket #%d" % socket)
return socket
def socket_connect(
self, sock_num: int, dest: str, port: int, conn_mode: int = 0
) -> bool:
"""Connects to a destination IP address or hostname.
By default, we use conn_mode TCP_MODE but we may also use UDP_MODE.
:param int sock_num: Desired socket number
:param str dest: Destination dest address.
:param int port: Destination dest port.
:param int conn_mode: Connection mode (TCP/UDP)
"""
if self._debug:
print(
"*** Socket connect, protocol={}, port={}, ip={}".format(
conn_mode, port, dest
)
)
self._uart.reset_input_buffer()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._send_check_reply(b"AT+CIPHEAD=0", reply=REPLY_OK) # do not show ip header
self._send_check_reply(
b"AT+CIPSRIP=0", reply=REPLY_OK
) # do not show remote ip/port
self._send_check_reply(b"AT+CIPRXGET=1", reply=REPLY_OK) # manually get data
self._uart_write(b"AT+CIPOPEN=" + str(sock_num).encode())
if conn_mode == 0:
self._uart_write(b',"TCP","')
else:
self._uart_write(b',"UDP","')
self._uart_write(dest.encode() + b'",' + str(port).encode() + b"\r\n")
if not self._expect_reply(b"Connect ok"):
return False
return True
def remote_ip(self, sock_num: int) -> str:
"""Returns the IP address of the remote connection.
:param int sock_num: Desired socket number
"""
self._read_line()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._uart_write(b"AT+CIPOPEN?\r\n")
for _ in range(0, sock_num + 1):
self._read_line()
self._parse_reply(b"+CIPOPEN:", idx=2)
ip_addr = self._buf
for _ in range(sock_num, FONA_MAX_SOCKETS):
self._read_line() # eat the rest of '+CIPOPEN' responses
return ip_addr
def socket_write(self, sock_num: int, buffer: bytes, timeout: int = 120000) -> bool:
"""Writes len(buffer) bytes to the socket.
:param int sock_num: Desired socket number to write to.
:param bytes buffer: Bytes to write to socket.
:param int timeout: Socket write timeout, in milliseconds. Defaults to 120000ms.
"""
self._read_line()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._uart.reset_input_buffer()
self._uart_write(
b"AT+CIPSEND="
+ str(sock_num).encode()
+ b","
+ str(len(buffer)).encode()
+ b"\r\n"
)
self._read_line()
if self._buf[0] != 62:
# promoting mark ('>') not found
return False
self._uart_write(buffer + b"\r\n")
self._read_line() # eat 'OK'
self._read_line(3000) # expect +CIPSEND: rx,tx
if not self._parse_reply(b"+CIPSEND:", idx=1):
return False
if not self._buf == len(buffer): # assert data sent == buffer size
return False
self._read_line(timeout)
if "Send ok" not in self._buf.decode():
return False
return True
def socket_status(self, sock_num: int) -> bool:
"""Returns socket status, True if connected. False otherwise.
:param int sock_num: Desired socket number.
"""
if not self._send_parse_reply(b"AT+CIPCLOSE?", b"+CIPCLOSE:", idx=sock_num):
return False
if not self._buf == 1:
return False
return True | identifier_body | |
fona_3g.py | # SPDX-FileCopyrightText: Limor Fried/Ladyada for Adafruit Industries
# SPDX-FileCopyrightText: 2020 Brent Rubell for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`fona_3g`
================================================================================
FONA3G cellular module instance.
* Author(s): ladyada, Brent Rubell
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from micropython import const
from .adafruit_fona import FONA, REPLY_OK
try:
from typing import Optional, Tuple, Union
from busio import UART
from digitalio import DigitalInOut
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
except ImportError:
pass
__version__ = "0.0.0+auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_FONA.git"
FONA_MAX_SOCKETS = const(10)
class FONA3G(FONA):
"""FONA 3G module interface.
:param ~busio.UART uart: FONA UART connection.
:param ~digitalio.DigitalInOut rst: FONA RST pin.
:param ~digitalio.DigitalInOut ri: Optional FONA Ring Interrupt (RI) pin.
:param bool debug: Enable debugging output.
"""
def __init__(
self,
uart: UART,
rst: DigitalInOut,
ri: Optional[DigitalInOut] = None,
debug: bool = False,
) -> None:
uart.baudrate = 4800
super().__init__(uart, rst, ri, debug)
def set_baudrate(self, baudrate: int) -> bool:
"""Sets the FONA's UART baudrate."""
if not self._send_check_reply(
b"AT+IPREX=" + str(baudrate).encode(), reply=REPLY_OK
):
return False
return True
@property
def gps(self) -> bool:
"""Module's GPS status."""
if not self._send_check_reply(b"AT+CGPS?", reply=b"+CGPS: 1,1"):
return False
return True
@gps.setter
def gps(self, gps_on: bool = False) -> bool:
# check if GPS is already enabled
if not self._send_parse_reply(b"AT+CGPS?", b"+CGPS: "):
return False
state = self._buf
if gps_on and not state:
self._read_line()
if not self._send_check_reply(b"AT+CGPS=1", reply=REPLY_OK):
return False
else:
if not self._send_check_reply(b"AT+CGPS=0", reply=REPLY_OK):
return False
self._read_line(2000) # eat '+CGPS: 0'
return True
@property
def ue_system_info(self) -> bool:
"""UE System status."""
self._send_parse_reply(b"AT+CPSI?\r\n", b"+CPSI: ")
if not self._buf == "GSM" or self._buf == "WCDMA": # 5.15
return False
return True
@property
def local_ip(self) -> Optional[str]:
"""Module's local IP address, None if not set."""
if not self._send_parse_reply(b"AT+IPADDR", b"+IPADDR:"):
return None
return self._buf
# pylint: disable=too-many-return-statements
def set_gprs(
self,
apn: Optional[Tuple[str, Optional[str], Optional[str]]] = None,
enable: bool = True,
) -> bool:
"""Configures and brings up GPRS.
:param tuple apn: APN configuration settings
:param bool enable: Enables or disables GPRS.
"""
if enable:
if not self._send_check_reply(b"AT+CGATT=1", reply=REPLY_OK, timeout=10000):
return False
if apn is not None: # Configure APN
apn_name, apn_user, apn_pass = apn
if not self._send_check_reply_quoted(
b'AT+CGSOCKCONT=1,"IP",', apn_name.encode(), REPLY_OK, 10000
):
return False
if apn_user is not None:
self._uart_write(b"AT+CGAUTH=1,1,")
self._uart_write(b'"' + apn_pass.encode() + b'"')
self._uart_write(b',"' + apn_user.encode() + b'"\r\n')
if not self._get_reply(REPLY_OK, timeout=10000):
return False
# Enable PDP Context
if not self._send_check_reply(
b"AT+CIPMODE=1", reply=REPLY_OK, timeout=10000
): # Transparent mode
return False
# Open network
if not self._send_check_reply(
b"AT+NETOPEN=,,1", reply=b"Network opened", timeout=120000
):
return False
self._read_line()
if not self.local_ip:
return True
else:
# reset PDP state
if not self._send_check_reply(
b"AT+NETCLOSE", reply=b"Network closed", timeout=20000
):
return False
return True
### Socket API (TCP, UDP) ###
@property
def tx_timeout(self) -> bool:
"""CIPSEND timeout, in milliseconds."""
self._read_line()
if not self._send_parse_reply(b"AT+CIPTIMEOUT?", b"+CIPTIMEOUT:", idx=2):
return False
return True
@tx_timeout.setter
def tx_timeout(self, timeout: int) -> bool:
self._read_line()
if not self._send_check_reply(
b"AT+CIPTIMEOUT=" + str(timeout).encode(), reply=REPLY_OK
):
return False
return True
def get_host_by_name(self, hostname: str) -> Union[str, Literal[False]]:
"""Converts a hostname to a 4-byte IP address.
:param str hostname: Domain name.
"""
self._read_line()
if self._debug:
print("*** Get host by name")
if isinstance(hostname, str):
hostname = bytes(hostname, "utf-8")
self._uart_write(b'AT+CDNSGIP="' + hostname + b'"\r\n')
self._read_line(10000) # Read the +CDNSGIP, takes a while
if not self._parse_reply(b"+CDNSGIP: ", idx=2):
return False
return self._buf
def get_socket(self) -> int:
"""Returns an unused socket."""
if self._debug:
print("*** Get socket")
self._read_line()
self._uart_write(b"AT+CIPOPEN?\r\n") # Query which sockets are busy
socket = 0
for socket in range(0, FONA_MAX_SOCKETS):
self._read_line(120000)
try: # SIMCOM5320 lacks a socket connection status, this is a workaround
self._parse_reply(b"+CIPOPEN: ", idx=1)
except IndexError:
break
for _ in range(socket, FONA_MAX_SOCKETS):
self._read_line() # eat the rest of '+CIPOPEN' responses
if self._debug:
print("Allocated socket #%d" % socket)
return socket
def socket_connect(
self, sock_num: int, dest: str, port: int, conn_mode: int = 0
) -> bool:
"""Connects to a destination IP address or hostname.
By default, we use conn_mode TCP_MODE but we may also use UDP_MODE.
:param int sock_num: Desired socket number
:param str dest: Destination dest address.
:param int port: Destination dest port.
:param int conn_mode: Connection mode (TCP/UDP)
"""
if self._debug:
print(
"*** Socket connect, protocol={}, port={}, ip={}".format(
conn_mode, port, dest
)
)
self._uart.reset_input_buffer()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._send_check_reply(b"AT+CIPHEAD=0", reply=REPLY_OK) # do not show ip header
self._send_check_reply(
b"AT+CIPSRIP=0", reply=REPLY_OK
) # do not show remote ip/port
self._send_check_reply(b"AT+CIPRXGET=1", reply=REPLY_OK) # manually get data
self._uart_write(b"AT+CIPOPEN=" + str(sock_num).encode())
if conn_mode == 0:
self._uart_write(b',"TCP","')
else:
self._uart_write(b',"UDP","')
self._uart_write(dest.encode() + b'",' + str(port).encode() + b"\r\n")
if not self._expect_reply(b"Connect ok"):
return False
return True
def remote_ip(self, sock_num: int) -> str: | """Returns the IP address of the remote connection.
:param int sock_num: Desired socket number
"""
self._read_line()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._uart_write(b"AT+CIPOPEN?\r\n")
for _ in range(0, sock_num + 1):
self._read_line()
self._parse_reply(b"+CIPOPEN:", idx=2)
ip_addr = self._buf
for _ in range(sock_num, FONA_MAX_SOCKETS):
self._read_line() # eat the rest of '+CIPOPEN' responses
return ip_addr
def socket_write(self, sock_num: int, buffer: bytes, timeout: int = 120000) -> bool:
"""Writes len(buffer) bytes to the socket.
:param int sock_num: Desired socket number to write to.
:param bytes buffer: Bytes to write to socket.
:param int timeout: Socket write timeout, in milliseconds. Defaults to 120000ms.
"""
self._read_line()
assert (
sock_num < FONA_MAX_SOCKETS
), "Provided socket exceeds the maximum number of \
sockets for the FONA module."
self._uart.reset_input_buffer()
self._uart_write(
b"AT+CIPSEND="
+ str(sock_num).encode()
+ b","
+ str(len(buffer)).encode()
+ b"\r\n"
)
self._read_line()
if self._buf[0] != 62:
# promoting mark ('>') not found
return False
self._uart_write(buffer + b"\r\n")
self._read_line() # eat 'OK'
self._read_line(3000) # expect +CIPSEND: rx,tx
if not self._parse_reply(b"+CIPSEND:", idx=1):
return False
if not self._buf == len(buffer): # assert data sent == buffer size
return False
self._read_line(timeout)
if "Send ok" not in self._buf.decode():
return False
return True
def socket_status(self, sock_num: int) -> bool:
"""Returns socket status, True if connected. False otherwise.
:param int sock_num: Desired socket number.
"""
if not self._send_parse_reply(b"AT+CIPCLOSE?", b"+CIPCLOSE:", idx=sock_num):
return False
if not self._buf == 1:
return False
return True | random_line_split | |
circuit_drawer.py | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the CircuitDrawer class which is used to draw CircuitGraph instances.
"""
from collections import OrderedDict
import pennylane as qml
from pennylane.wires import Wires
from .charsets import UnicodeCharSet
from .representation_resolver import RepresentationResolver
from .grid import Grid
# pylint: disable=too-many-branches,too-many-arguments,too-many-return-statements,too-many-statements,consider-using-enumerate,too-many-instance-attributes
def _remove_duplicates(input_list):
"""Remove duplicate entries from a list.
This operation preserves the order of the list's elements.
Args:
input_list (list[Hashable]): The list whose duplicate entries shall be removed
Returns:
list[Hashable]: The input list without duplicate entries
"""
return list(OrderedDict.fromkeys(input_list))
class CircuitDrawer:
"""Creates a circuit diagram from the operators of a CircuitGraph in grid form.
Args:
raw_operation_grid (list[list[~.Operation]]): The CircuitGraph's operations
raw_observable_grid (list[list[qml.operation.Observable]]): The CircuitGraph's observables
wires (Wires): all wires on the device for which the circuit is drawn
charset (pennylane.circuit_drawer.CharSet, optional): The CharSet that shall be used for drawing.
show_all_wires (bool): If True, all wires, including empty wires, are printed.
"""
def __init__(
self,
raw_operation_grid,
raw_observable_grid,
wires,
charset=UnicodeCharSet,
show_all_wires=False,
):
self.operation_grid = Grid(raw_operation_grid)
self.observable_grid = Grid(raw_observable_grid)
self.wires = wires
self.active_wires = self.extract_active_wires(raw_operation_grid, raw_observable_grid)
self.charset = charset
if show_all_wires:
# if the provided wires include empty wires, make sure they are included
# as active wires
self.active_wires = wires.all_wires([wires, self.active_wires])
self.representation_resolver = RepresentationResolver(charset)
self.operation_representation_grid = Grid()
self.observable_representation_grid = Grid()
self.operation_decoration_indices = []
self.observable_decoration_indices = []
self.move_multi_wire_gates(self.operation_grid)
# Resolve operator names
self.resolve_representation(self.operation_grid, self.operation_representation_grid)
self.resolve_representation(self.observable_grid, self.observable_representation_grid)
# Add multi-wire gate lines
self.operation_decoration_indices = self.resolve_decorations(
self.operation_grid, self.operation_representation_grid
)
self.observable_decoration_indices = self.resolve_decorations(
self.observable_grid, self.observable_representation_grid
)
CircuitDrawer.pad_representation(
self.operation_representation_grid,
charset.WIRE,
"",
2 * charset.WIRE,
self.operation_decoration_indices,
)
CircuitDrawer.pad_representation(
self.operation_representation_grid,
charset.WIRE,
"",
"",
set(range(self.operation_grid.num_layers)) - set(self.operation_decoration_indices),
)
CircuitDrawer.pad_representation(
self.observable_representation_grid,
" ",
charset.MEASUREMENT + " ",
" ",
self.observable_decoration_indices,
)
CircuitDrawer.pad_representation(
self.observable_representation_grid,
charset.WIRE,
"",
"",
set(range(self.observable_grid.num_layers)) - set(self.observable_decoration_indices),
)
self.full_representation_grid = self.operation_representation_grid.copy()
self.full_representation_grid.append_grid_by_layers(self.observable_representation_grid)
def extract_active_wires(self, raw_operation_grid, raw_observable_grid):
"""Get the subset of wires on the device that are used in the circuit.
Args:
raw_operation_grid (Iterable[~.Operator]): The raw grid of operations
raw_observable_grid (Iterable[~.Operator]): The raw grid of observables
Return:
Wires: active wires on the device
"""
# pylint: disable=protected-access
all_operators = list(qml.utils._flatten(raw_operation_grid)) + list(
qml.utils._flatten(raw_observable_grid)
)
all_wires_with_duplicates = [op.wires for op in all_operators if op is not None]
# make Wires object containing all used wires
all_wires = Wires.all_wires(all_wires_with_duplicates)
# shared wires will observe the ordering of the device's wires
shared_wires = Wires.shared_wires([self.wires, all_wires])
return shared_wires
def resolve_representation(self, grid, representation_grid):
"""Resolve the string representation of the given Grid.
Args:
grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information
representation_grid (pennylane.circuit_drawer.Grid): Grid that is used to store the string representations
"""
for i in range(grid.num_layers):
representation_layer = [""] * grid.num_wires
for wire_indices, operator in enumerate(grid.layer(i)):
wire = self.active_wires[wire_indices]
representation_layer[
wire_indices
] = self.representation_resolver.element_representation(operator, wire)
representation_grid.append_layer(representation_layer)
def add_multi_wire_connectors_to_layer(self, wire_indices, decoration_layer):
"""Add multi wire connectors for the given wires to a layer.
Args:
wire_indices (list[int]): The indices of wires that are to be connected
decoration_layer (list[str]): The decoration layer to which the wires will be added
"""
min_wire = min(wire_indices)
max_wire = max(wire_indices)
decoration_layer[min_wire] = self.charset.TOP_MULTI_LINE_GATE_CONNECTOR
for k in range(min_wire + 1, max_wire):
if k in wire_indices:
decoration_layer[k] = self.charset.MIDDLE_MULTI_LINE_GATE_CONNECTOR
else:
decoration_layer[k] = self.charset.EMPTY_MULTI_LINE_GATE_CONNECTOR
decoration_layer[max_wire] = self.charset.BOTTOM_MULTI_LINE_GATE_CONNECTOR
def resolve_decorations(self, grid, representation_grid):
"""Resolve the decorations of the given Grid.
If decorations are in conflict, they are automatically spread over multiple layers.
Args:
grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information
representation_grid (pennylane.circuit_drawer.Grid): Grid that holds the string representations and into
which the decorations will be inserted
Returns:
list[int]: List with indices of inserted decoration layers
"""
j = 0
inserted_indices = []
for i in range(grid.num_layers):
layer_operators = _remove_duplicates(grid.layer(i))
decoration_layer = [""] * grid.num_wires
for op in layer_operators:
if op is None:
continue
wires = op.wires
wire_indices = self.active_wires.indices(wires)
if len(wire_indices) > 1:
min_wire = min(wire_indices)
max_wire = max(wire_indices)
# If there is a conflict between decorations, we start a new decoration_layer
if any(
[decoration_layer[wire] != "" for wire in range(min_wire, max_wire + 1)]
):
representation_grid.insert_layer(i + j, decoration_layer)
inserted_indices.append(i + j)
j += 1
decoration_layer = [""] * grid.num_wires
self.add_multi_wire_connectors_to_layer(wire_indices, decoration_layer)
representation_grid.insert_layer(i + j, decoration_layer)
inserted_indices.append(i + j)
j += 1
return inserted_indices
@staticmethod
def pad_representation(representation_grid, pad_str, prepend_str, suffix_str, skip_indices):
"""Pads the given representation so that width inside layers is constant.
Args:
representation_grid (pennylane.circuit_drawer.Grid): Grid that holds the string representations that will be padded
pad_str (str): String that shall be used for padding
prepend_str (str): String that is prepended to all representations that are not skipped
suffix_str (str): String that is appended to all representations
skip_indices (list[int]): Indices of layers that should be skipped
"""
for i in range(representation_grid.num_layers):
layer = representation_grid.layer(i)
max_width = max(map(len, layer))
if i in skip_indices:
continue
# Take the current layer and pad it with the pad_str
# and also prepend with prepend_str and append the suffix_str
# pylint: disable=cell-var-from-loop
representation_grid.replace_layer(
i,
list(
map(
lambda x: prepend_str + str.ljust(x, max_width, pad_str) + suffix_str, layer
)
),
)
def move_multi_wire_gates(self, operator_grid):
|
def draw(self):
"""Draw the circuit diagram.
Returns:
str: The circuit diagram
"""
rendered_string = ""
# extract the wire labels as strings and get their maximum length
wire_names = []
padding = 0
for i in range(self.full_representation_grid.num_wires):
wire_name = str(self.active_wires.labels[i])
padding = max(padding, len(wire_name))
wire_names.append(wire_name)
for i in range(self.full_representation_grid.num_wires):
# format wire name nicely
wire = self.full_representation_grid.wire(i)
s = " {:>" + str(padding) + "}: {}"
rendered_string += s.format(wire_names[i], 2 * self.charset.WIRE)
for s in wire:
rendered_string += s
rendered_string += "\n"
for symbol, cache in [
("U", self.representation_resolver.unitary_matrix_cache),
("H", self.representation_resolver.hermitian_matrix_cache),
("M", self.representation_resolver.matrix_cache),
]:
for idx, matrix in enumerate(cache):
rendered_string += "{}{} =\n{}\n".format(symbol, idx, matrix)
return rendered_string
| """Move multi-wire gates so that there are no interlocking multi-wire gates in the same layer.
Args:
operator_grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information and that will be edited.
"""
n = operator_grid.num_layers
i = -1
while i < n - 1:
i += 1
this_layer = operator_grid.layer(i)
layer_ops = _remove_duplicates(this_layer)
other_layer = [None] * operator_grid.num_wires
for j in range(len(layer_ops)):
op = layer_ops[j]
if op is None:
continue
# translate wires to their indices on the device
wire_indices = self.active_wires.indices(op.wires)
if len(op.wires) > 1:
sorted_wires = wire_indices.copy()
sorted_wires.sort()
blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))
for k in range(j + 1, len(layer_ops)):
other_op = layer_ops[k]
if other_op is None:
continue
# translate wires to their indices on the device
other_wire_indices = self.active_wires.indices(other_op.wires)
other_sorted_wire_indices = other_wire_indices.copy()
other_sorted_wire_indices.sort()
other_blocked_wires = list(
range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)
)
if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):
op_indices = [
idx for idx, layer_op in enumerate(this_layer) if layer_op == op
]
for l in op_indices:
other_layer[l] = op
this_layer[l] = None
break
if not all([item is None for item in other_layer]):
operator_grid.insert_layer(i + 1, other_layer)
n += 1 | identifier_body |
circuit_drawer.py | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the CircuitDrawer class which is used to draw CircuitGraph instances.
"""
from collections import OrderedDict
import pennylane as qml
from pennylane.wires import Wires
from .charsets import UnicodeCharSet
from .representation_resolver import RepresentationResolver
from .grid import Grid
# pylint: disable=too-many-branches,too-many-arguments,too-many-return-statements,too-many-statements,consider-using-enumerate,too-many-instance-attributes
def _remove_duplicates(input_list):
"""Remove duplicate entries from a list.
This operation preserves the order of the list's elements.
Args:
input_list (list[Hashable]): The list whose duplicate entries shall be removed
Returns:
list[Hashable]: The input list without duplicate entries
"""
return list(OrderedDict.fromkeys(input_list))
class CircuitDrawer:
"""Creates a circuit diagram from the operators of a CircuitGraph in grid form.
Args:
raw_operation_grid (list[list[~.Operation]]): The CircuitGraph's operations
raw_observable_grid (list[list[qml.operation.Observable]]): The CircuitGraph's observables
wires (Wires): all wires on the device for which the circuit is drawn
charset (pennylane.circuit_drawer.CharSet, optional): The CharSet that shall be used for drawing.
show_all_wires (bool): If True, all wires, including empty wires, are printed.
"""
def __init__(
self,
raw_operation_grid,
raw_observable_grid,
wires,
charset=UnicodeCharSet,
show_all_wires=False,
):
self.operation_grid = Grid(raw_operation_grid)
self.observable_grid = Grid(raw_observable_grid)
self.wires = wires
self.active_wires = self.extract_active_wires(raw_operation_grid, raw_observable_grid)
self.charset = charset
if show_all_wires:
# if the provided wires include empty wires, make sure they are included
# as active wires
self.active_wires = wires.all_wires([wires, self.active_wires])
self.representation_resolver = RepresentationResolver(charset)
self.operation_representation_grid = Grid()
self.observable_representation_grid = Grid()
self.operation_decoration_indices = []
self.observable_decoration_indices = []
self.move_multi_wire_gates(self.operation_grid)
# Resolve operator names
self.resolve_representation(self.operation_grid, self.operation_representation_grid)
self.resolve_representation(self.observable_grid, self.observable_representation_grid)
# Add multi-wire gate lines
self.operation_decoration_indices = self.resolve_decorations(
self.operation_grid, self.operation_representation_grid
)
self.observable_decoration_indices = self.resolve_decorations(
self.observable_grid, self.observable_representation_grid
)
CircuitDrawer.pad_representation(
self.operation_representation_grid,
charset.WIRE,
"",
2 * charset.WIRE,
self.operation_decoration_indices,
)
CircuitDrawer.pad_representation(
self.operation_representation_grid,
charset.WIRE,
"",
"",
set(range(self.operation_grid.num_layers)) - set(self.operation_decoration_indices),
)
CircuitDrawer.pad_representation(
self.observable_representation_grid,
" ",
charset.MEASUREMENT + " ",
" ",
self.observable_decoration_indices,
)
CircuitDrawer.pad_representation(
self.observable_representation_grid,
charset.WIRE,
"",
"",
set(range(self.observable_grid.num_layers)) - set(self.observable_decoration_indices),
)
self.full_representation_grid = self.operation_representation_grid.copy()
self.full_representation_grid.append_grid_by_layers(self.observable_representation_grid)
def extract_active_wires(self, raw_operation_grid, raw_observable_grid):
"""Get the subset of wires on the device that are used in the circuit.
Args:
raw_operation_grid (Iterable[~.Operator]): The raw grid of operations
raw_observable_grid (Iterable[~.Operator]): The raw grid of observables
Return:
Wires: active wires on the device
"""
# pylint: disable=protected-access
all_operators = list(qml.utils._flatten(raw_operation_grid)) + list(
qml.utils._flatten(raw_observable_grid)
)
all_wires_with_duplicates = [op.wires for op in all_operators if op is not None]
# make Wires object containing all used wires
all_wires = Wires.all_wires(all_wires_with_duplicates)
# shared wires will observe the ordering of the device's wires
shared_wires = Wires.shared_wires([self.wires, all_wires])
return shared_wires
def resolve_representation(self, grid, representation_grid):
"""Resolve the string representation of the given Grid.
Args:
grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information
representation_grid (pennylane.circuit_drawer.Grid): Grid that is used to store the string representations
"""
for i in range(grid.num_layers):
representation_layer = [""] * grid.num_wires
for wire_indices, operator in enumerate(grid.layer(i)):
wire = self.active_wires[wire_indices]
representation_layer[
wire_indices
] = self.representation_resolver.element_representation(operator, wire)
representation_grid.append_layer(representation_layer)
def add_multi_wire_connectors_to_layer(self, wire_indices, decoration_layer):
"""Add multi wire connectors for the given wires to a layer.
Args:
wire_indices (list[int]): The indices of wires that are to be connected
decoration_layer (list[str]): The decoration layer to which the wires will be added
"""
min_wire = min(wire_indices)
max_wire = max(wire_indices)
decoration_layer[min_wire] = self.charset.TOP_MULTI_LINE_GATE_CONNECTOR
for k in range(min_wire + 1, max_wire):
if k in wire_indices:
decoration_layer[k] = self.charset.MIDDLE_MULTI_LINE_GATE_CONNECTOR
else:
decoration_layer[k] = self.charset.EMPTY_MULTI_LINE_GATE_CONNECTOR
decoration_layer[max_wire] = self.charset.BOTTOM_MULTI_LINE_GATE_CONNECTOR
def resolve_decorations(self, grid, representation_grid):
"""Resolve the decorations of the given Grid.
If decorations are in conflict, they are automatically spread over multiple layers.
Args:
grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information
representation_grid (pennylane.circuit_drawer.Grid): Grid that holds the string representations and into
which the decorations will be inserted
Returns:
list[int]: List with indices of inserted decoration layers
"""
j = 0
inserted_indices = []
for i in range(grid.num_layers):
layer_operators = _remove_duplicates(grid.layer(i))
decoration_layer = [""] * grid.num_wires
for op in layer_operators:
if op is None:
continue
wires = op.wires
wire_indices = self.active_wires.indices(wires)
if len(wire_indices) > 1:
min_wire = min(wire_indices)
max_wire = max(wire_indices)
# If there is a conflict between decorations, we start a new decoration_layer
if any(
[decoration_layer[wire] != "" for wire in range(min_wire, max_wire + 1)]
):
representation_grid.insert_layer(i + j, decoration_layer)
inserted_indices.append(i + j)
j += 1
decoration_layer = [""] * grid.num_wires
self.add_multi_wire_connectors_to_layer(wire_indices, decoration_layer)
representation_grid.insert_layer(i + j, decoration_layer)
inserted_indices.append(i + j)
j += 1
return inserted_indices
@staticmethod
def pad_representation(representation_grid, pad_str, prepend_str, suffix_str, skip_indices):
"""Pads the given representation so that width inside layers is constant.
Args:
representation_grid (pennylane.circuit_drawer.Grid): Grid that holds the string representations that will be padded
pad_str (str): String that shall be used for padding
prepend_str (str): String that is prepended to all representations that are not skipped
suffix_str (str): String that is appended to all representations
skip_indices (list[int]): Indices of layers that should be skipped
"""
for i in range(representation_grid.num_layers):
|
def move_multi_wire_gates(self, operator_grid):
"""Move multi-wire gates so that there are no interlocking multi-wire gates in the same layer.
Args:
operator_grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information and that will be edited.
"""
n = operator_grid.num_layers
i = -1
while i < n - 1:
i += 1
this_layer = operator_grid.layer(i)
layer_ops = _remove_duplicates(this_layer)
other_layer = [None] * operator_grid.num_wires
for j in range(len(layer_ops)):
op = layer_ops[j]
if op is None:
continue
# translate wires to their indices on the device
wire_indices = self.active_wires.indices(op.wires)
if len(op.wires) > 1:
sorted_wires = wire_indices.copy()
sorted_wires.sort()
blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))
for k in range(j + 1, len(layer_ops)):
other_op = layer_ops[k]
if other_op is None:
continue
# translate wires to their indices on the device
other_wire_indices = self.active_wires.indices(other_op.wires)
other_sorted_wire_indices = other_wire_indices.copy()
other_sorted_wire_indices.sort()
other_blocked_wires = list(
range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)
)
if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):
op_indices = [
idx for idx, layer_op in enumerate(this_layer) if layer_op == op
]
for l in op_indices:
other_layer[l] = op
this_layer[l] = None
break
if not all([item is None for item in other_layer]):
operator_grid.insert_layer(i + 1, other_layer)
n += 1
def draw(self):
"""Draw the circuit diagram.
Returns:
str: The circuit diagram
"""
rendered_string = ""
# extract the wire labels as strings and get their maximum length
wire_names = []
padding = 0
for i in range(self.full_representation_grid.num_wires):
wire_name = str(self.active_wires.labels[i])
padding = max(padding, len(wire_name))
wire_names.append(wire_name)
for i in range(self.full_representation_grid.num_wires):
# format wire name nicely
wire = self.full_representation_grid.wire(i)
s = " {:>" + str(padding) + "}: {}"
rendered_string += s.format(wire_names[i], 2 * self.charset.WIRE)
for s in wire:
rendered_string += s
rendered_string += "\n"
for symbol, cache in [
("U", self.representation_resolver.unitary_matrix_cache),
("H", self.representation_resolver.hermitian_matrix_cache),
("M", self.representation_resolver.matrix_cache),
]:
for idx, matrix in enumerate(cache):
rendered_string += "{}{} =\n{}\n".format(symbol, idx, matrix)
return rendered_string
| layer = representation_grid.layer(i)
max_width = max(map(len, layer))
if i in skip_indices:
continue
# Take the current layer and pad it with the pad_str
# and also prepend with prepend_str and append the suffix_str
# pylint: disable=cell-var-from-loop
representation_grid.replace_layer(
i,
list(
map(
lambda x: prepend_str + str.ljust(x, max_width, pad_str) + suffix_str, layer
)
),
) | conditional_block |
circuit_drawer.py | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the CircuitDrawer class which is used to draw CircuitGraph instances.
"""
from collections import OrderedDict
import pennylane as qml
from pennylane.wires import Wires
from .charsets import UnicodeCharSet
from .representation_resolver import RepresentationResolver
from .grid import Grid
# pylint: disable=too-many-branches,too-many-arguments,too-many-return-statements,too-many-statements,consider-using-enumerate,too-many-instance-attributes
def | (input_list):
"""Remove duplicate entries from a list.
This operation preserves the order of the list's elements.
Args:
input_list (list[Hashable]): The list whose duplicate entries shall be removed
Returns:
list[Hashable]: The input list without duplicate entries
"""
return list(OrderedDict.fromkeys(input_list))
class CircuitDrawer:
"""Creates a circuit diagram from the operators of a CircuitGraph in grid form.
Args:
raw_operation_grid (list[list[~.Operation]]): The CircuitGraph's operations
raw_observable_grid (list[list[qml.operation.Observable]]): The CircuitGraph's observables
wires (Wires): all wires on the device for which the circuit is drawn
charset (pennylane.circuit_drawer.CharSet, optional): The CharSet that shall be used for drawing.
show_all_wires (bool): If True, all wires, including empty wires, are printed.
"""
def __init__(
self,
raw_operation_grid,
raw_observable_grid,
wires,
charset=UnicodeCharSet,
show_all_wires=False,
):
self.operation_grid = Grid(raw_operation_grid)
self.observable_grid = Grid(raw_observable_grid)
self.wires = wires
self.active_wires = self.extract_active_wires(raw_operation_grid, raw_observable_grid)
self.charset = charset
if show_all_wires:
# if the provided wires include empty wires, make sure they are included
# as active wires
self.active_wires = wires.all_wires([wires, self.active_wires])
self.representation_resolver = RepresentationResolver(charset)
self.operation_representation_grid = Grid()
self.observable_representation_grid = Grid()
self.operation_decoration_indices = []
self.observable_decoration_indices = []
self.move_multi_wire_gates(self.operation_grid)
# Resolve operator names
self.resolve_representation(self.operation_grid, self.operation_representation_grid)
self.resolve_representation(self.observable_grid, self.observable_representation_grid)
# Add multi-wire gate lines
self.operation_decoration_indices = self.resolve_decorations(
self.operation_grid, self.operation_representation_grid
)
self.observable_decoration_indices = self.resolve_decorations(
self.observable_grid, self.observable_representation_grid
)
CircuitDrawer.pad_representation(
self.operation_representation_grid,
charset.WIRE,
"",
2 * charset.WIRE,
self.operation_decoration_indices,
)
CircuitDrawer.pad_representation(
self.operation_representation_grid,
charset.WIRE,
"",
"",
set(range(self.operation_grid.num_layers)) - set(self.operation_decoration_indices),
)
CircuitDrawer.pad_representation(
self.observable_representation_grid,
" ",
charset.MEASUREMENT + " ",
" ",
self.observable_decoration_indices,
)
CircuitDrawer.pad_representation(
self.observable_representation_grid,
charset.WIRE,
"",
"",
set(range(self.observable_grid.num_layers)) - set(self.observable_decoration_indices),
)
self.full_representation_grid = self.operation_representation_grid.copy()
self.full_representation_grid.append_grid_by_layers(self.observable_representation_grid)
def extract_active_wires(self, raw_operation_grid, raw_observable_grid):
"""Get the subset of wires on the device that are used in the circuit.
Args:
raw_operation_grid (Iterable[~.Operator]): The raw grid of operations
raw_observable_grid (Iterable[~.Operator]): The raw grid of observables
Return:
Wires: active wires on the device
"""
# pylint: disable=protected-access
all_operators = list(qml.utils._flatten(raw_operation_grid)) + list(
qml.utils._flatten(raw_observable_grid)
)
all_wires_with_duplicates = [op.wires for op in all_operators if op is not None]
# make Wires object containing all used wires
all_wires = Wires.all_wires(all_wires_with_duplicates)
# shared wires will observe the ordering of the device's wires
shared_wires = Wires.shared_wires([self.wires, all_wires])
return shared_wires
def resolve_representation(self, grid, representation_grid):
"""Resolve the string representation of the given Grid.
Args:
grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information
representation_grid (pennylane.circuit_drawer.Grid): Grid that is used to store the string representations
"""
for i in range(grid.num_layers):
representation_layer = [""] * grid.num_wires
for wire_indices, operator in enumerate(grid.layer(i)):
wire = self.active_wires[wire_indices]
representation_layer[
wire_indices
] = self.representation_resolver.element_representation(operator, wire)
representation_grid.append_layer(representation_layer)
def add_multi_wire_connectors_to_layer(self, wire_indices, decoration_layer):
"""Add multi wire connectors for the given wires to a layer.
Args:
wire_indices (list[int]): The indices of wires that are to be connected
decoration_layer (list[str]): The decoration layer to which the wires will be added
"""
min_wire = min(wire_indices)
max_wire = max(wire_indices)
decoration_layer[min_wire] = self.charset.TOP_MULTI_LINE_GATE_CONNECTOR
for k in range(min_wire + 1, max_wire):
if k in wire_indices:
decoration_layer[k] = self.charset.MIDDLE_MULTI_LINE_GATE_CONNECTOR
else:
decoration_layer[k] = self.charset.EMPTY_MULTI_LINE_GATE_CONNECTOR
decoration_layer[max_wire] = self.charset.BOTTOM_MULTI_LINE_GATE_CONNECTOR
def resolve_decorations(self, grid, representation_grid):
"""Resolve the decorations of the given Grid.
If decorations are in conflict, they are automatically spread over multiple layers.
Args:
grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information
representation_grid (pennylane.circuit_drawer.Grid): Grid that holds the string representations and into
which the decorations will be inserted
Returns:
list[int]: List with indices of inserted decoration layers
"""
j = 0
inserted_indices = []
for i in range(grid.num_layers):
layer_operators = _remove_duplicates(grid.layer(i))
decoration_layer = [""] * grid.num_wires
for op in layer_operators:
if op is None:
continue
wires = op.wires
wire_indices = self.active_wires.indices(wires)
if len(wire_indices) > 1:
min_wire = min(wire_indices)
max_wire = max(wire_indices)
# If there is a conflict between decorations, we start a new decoration_layer
if any(
[decoration_layer[wire] != "" for wire in range(min_wire, max_wire + 1)]
):
representation_grid.insert_layer(i + j, decoration_layer)
inserted_indices.append(i + j)
j += 1
decoration_layer = [""] * grid.num_wires
self.add_multi_wire_connectors_to_layer(wire_indices, decoration_layer)
representation_grid.insert_layer(i + j, decoration_layer)
inserted_indices.append(i + j)
j += 1
return inserted_indices
@staticmethod
def pad_representation(representation_grid, pad_str, prepend_str, suffix_str, skip_indices):
"""Pads the given representation so that width inside layers is constant.
Args:
representation_grid (pennylane.circuit_drawer.Grid): Grid that holds the string representations that will be padded
pad_str (str): String that shall be used for padding
prepend_str (str): String that is prepended to all representations that are not skipped
suffix_str (str): String that is appended to all representations
skip_indices (list[int]): Indices of layers that should be skipped
"""
for i in range(representation_grid.num_layers):
layer = representation_grid.layer(i)
max_width = max(map(len, layer))
if i in skip_indices:
continue
# Take the current layer and pad it with the pad_str
# and also prepend with prepend_str and append the suffix_str
# pylint: disable=cell-var-from-loop
representation_grid.replace_layer(
i,
list(
map(
lambda x: prepend_str + str.ljust(x, max_width, pad_str) + suffix_str, layer
)
),
)
def move_multi_wire_gates(self, operator_grid):
"""Move multi-wire gates so that there are no interlocking multi-wire gates in the same layer.
Args:
operator_grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information and that will be edited.
"""
n = operator_grid.num_layers
i = -1
while i < n - 1:
i += 1
this_layer = operator_grid.layer(i)
layer_ops = _remove_duplicates(this_layer)
other_layer = [None] * operator_grid.num_wires
for j in range(len(layer_ops)):
op = layer_ops[j]
if op is None:
continue
# translate wires to their indices on the device
wire_indices = self.active_wires.indices(op.wires)
if len(op.wires) > 1:
sorted_wires = wire_indices.copy()
sorted_wires.sort()
blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))
for k in range(j + 1, len(layer_ops)):
other_op = layer_ops[k]
if other_op is None:
continue
# translate wires to their indices on the device
other_wire_indices = self.active_wires.indices(other_op.wires)
other_sorted_wire_indices = other_wire_indices.copy()
other_sorted_wire_indices.sort()
other_blocked_wires = list(
range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)
)
if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):
op_indices = [
idx for idx, layer_op in enumerate(this_layer) if layer_op == op
]
for l in op_indices:
other_layer[l] = op
this_layer[l] = None
break
if not all([item is None for item in other_layer]):
operator_grid.insert_layer(i + 1, other_layer)
n += 1
def draw(self):
"""Draw the circuit diagram.
Returns:
str: The circuit diagram
"""
rendered_string = ""
# extract the wire labels as strings and get their maximum length
wire_names = []
padding = 0
for i in range(self.full_representation_grid.num_wires):
wire_name = str(self.active_wires.labels[i])
padding = max(padding, len(wire_name))
wire_names.append(wire_name)
for i in range(self.full_representation_grid.num_wires):
# format wire name nicely
wire = self.full_representation_grid.wire(i)
s = " {:>" + str(padding) + "}: {}"
rendered_string += s.format(wire_names[i], 2 * self.charset.WIRE)
for s in wire:
rendered_string += s
rendered_string += "\n"
for symbol, cache in [
("U", self.representation_resolver.unitary_matrix_cache),
("H", self.representation_resolver.hermitian_matrix_cache),
("M", self.representation_resolver.matrix_cache),
]:
for idx, matrix in enumerate(cache):
rendered_string += "{}{} =\n{}\n".format(symbol, idx, matrix)
return rendered_string
| _remove_duplicates | identifier_name |
circuit_drawer.py | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the CircuitDrawer class which is used to draw CircuitGraph instances.
"""
from collections import OrderedDict
import pennylane as qml
from pennylane.wires import Wires
from .charsets import UnicodeCharSet
from .representation_resolver import RepresentationResolver
from .grid import Grid
# pylint: disable=too-many-branches,too-many-arguments,too-many-return-statements,too-many-statements,consider-using-enumerate,too-many-instance-attributes
def _remove_duplicates(input_list):
"""Remove duplicate entries from a list.
This operation preserves the order of the list's elements.
Args:
input_list (list[Hashable]): The list whose duplicate entries shall be removed
Returns:
list[Hashable]: The input list without duplicate entries
"""
return list(OrderedDict.fromkeys(input_list))
class CircuitDrawer:
"""Creates a circuit diagram from the operators of a CircuitGraph in grid form.
Args:
raw_operation_grid (list[list[~.Operation]]): The CircuitGraph's operations
raw_observable_grid (list[list[qml.operation.Observable]]): The CircuitGraph's observables
wires (Wires): all wires on the device for which the circuit is drawn
charset (pennylane.circuit_drawer.CharSet, optional): The CharSet that shall be used for drawing.
show_all_wires (bool): If True, all wires, including empty wires, are printed.
"""
def __init__(
self,
raw_operation_grid,
raw_observable_grid,
wires,
charset=UnicodeCharSet,
show_all_wires=False,
):
self.operation_grid = Grid(raw_operation_grid)
self.observable_grid = Grid(raw_observable_grid)
self.wires = wires
self.active_wires = self.extract_active_wires(raw_operation_grid, raw_observable_grid)
self.charset = charset
if show_all_wires:
# if the provided wires include empty wires, make sure they are included
# as active wires
self.active_wires = wires.all_wires([wires, self.active_wires])
self.representation_resolver = RepresentationResolver(charset)
self.operation_representation_grid = Grid()
self.observable_representation_grid = Grid()
self.operation_decoration_indices = []
self.observable_decoration_indices = []
self.move_multi_wire_gates(self.operation_grid)
# Resolve operator names
self.resolve_representation(self.operation_grid, self.operation_representation_grid)
self.resolve_representation(self.observable_grid, self.observable_representation_grid)
# Add multi-wire gate lines
self.operation_decoration_indices = self.resolve_decorations(
self.operation_grid, self.operation_representation_grid
)
self.observable_decoration_indices = self.resolve_decorations(
self.observable_grid, self.observable_representation_grid
)
CircuitDrawer.pad_representation(
self.operation_representation_grid,
charset.WIRE,
"",
2 * charset.WIRE,
self.operation_decoration_indices,
)
CircuitDrawer.pad_representation(
self.operation_representation_grid,
charset.WIRE,
"",
"",
set(range(self.operation_grid.num_layers)) - set(self.operation_decoration_indices),
)
CircuitDrawer.pad_representation(
self.observable_representation_grid,
" ",
charset.MEASUREMENT + " ",
" ",
self.observable_decoration_indices,
)
CircuitDrawer.pad_representation(
self.observable_representation_grid,
charset.WIRE,
"",
"",
set(range(self.observable_grid.num_layers)) - set(self.observable_decoration_indices),
)
self.full_representation_grid = self.operation_representation_grid.copy()
self.full_representation_grid.append_grid_by_layers(self.observable_representation_grid)
def extract_active_wires(self, raw_operation_grid, raw_observable_grid):
"""Get the subset of wires on the device that are used in the circuit.
Args:
raw_operation_grid (Iterable[~.Operator]): The raw grid of operations
raw_observable_grid (Iterable[~.Operator]): The raw grid of observables
Return:
Wires: active wires on the device
"""
# pylint: disable=protected-access
all_operators = list(qml.utils._flatten(raw_operation_grid)) + list(
qml.utils._flatten(raw_observable_grid)
) | all_wires_with_duplicates = [op.wires for op in all_operators if op is not None]
# make Wires object containing all used wires
all_wires = Wires.all_wires(all_wires_with_duplicates)
# shared wires will observe the ordering of the device's wires
shared_wires = Wires.shared_wires([self.wires, all_wires])
return shared_wires
def resolve_representation(self, grid, representation_grid):
"""Resolve the string representation of the given Grid.
Args:
grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information
representation_grid (pennylane.circuit_drawer.Grid): Grid that is used to store the string representations
"""
for i in range(grid.num_layers):
representation_layer = [""] * grid.num_wires
for wire_indices, operator in enumerate(grid.layer(i)):
wire = self.active_wires[wire_indices]
representation_layer[
wire_indices
] = self.representation_resolver.element_representation(operator, wire)
representation_grid.append_layer(representation_layer)
def add_multi_wire_connectors_to_layer(self, wire_indices, decoration_layer):
"""Add multi wire connectors for the given wires to a layer.
Args:
wire_indices (list[int]): The indices of wires that are to be connected
decoration_layer (list[str]): The decoration layer to which the wires will be added
"""
min_wire = min(wire_indices)
max_wire = max(wire_indices)
decoration_layer[min_wire] = self.charset.TOP_MULTI_LINE_GATE_CONNECTOR
for k in range(min_wire + 1, max_wire):
if k in wire_indices:
decoration_layer[k] = self.charset.MIDDLE_MULTI_LINE_GATE_CONNECTOR
else:
decoration_layer[k] = self.charset.EMPTY_MULTI_LINE_GATE_CONNECTOR
decoration_layer[max_wire] = self.charset.BOTTOM_MULTI_LINE_GATE_CONNECTOR
def resolve_decorations(self, grid, representation_grid):
"""Resolve the decorations of the given Grid.
If decorations are in conflict, they are automatically spread over multiple layers.
Args:
grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information
representation_grid (pennylane.circuit_drawer.Grid): Grid that holds the string representations and into
which the decorations will be inserted
Returns:
list[int]: List with indices of inserted decoration layers
"""
j = 0
inserted_indices = []
for i in range(grid.num_layers):
layer_operators = _remove_duplicates(grid.layer(i))
decoration_layer = [""] * grid.num_wires
for op in layer_operators:
if op is None:
continue
wires = op.wires
wire_indices = self.active_wires.indices(wires)
if len(wire_indices) > 1:
min_wire = min(wire_indices)
max_wire = max(wire_indices)
# If there is a conflict between decorations, we start a new decoration_layer
if any(
[decoration_layer[wire] != "" for wire in range(min_wire, max_wire + 1)]
):
representation_grid.insert_layer(i + j, decoration_layer)
inserted_indices.append(i + j)
j += 1
decoration_layer = [""] * grid.num_wires
self.add_multi_wire_connectors_to_layer(wire_indices, decoration_layer)
representation_grid.insert_layer(i + j, decoration_layer)
inserted_indices.append(i + j)
j += 1
return inserted_indices
@staticmethod
def pad_representation(representation_grid, pad_str, prepend_str, suffix_str, skip_indices):
"""Pads the given representation so that width inside layers is constant.
Args:
representation_grid (pennylane.circuit_drawer.Grid): Grid that holds the string representations that will be padded
pad_str (str): String that shall be used for padding
prepend_str (str): String that is prepended to all representations that are not skipped
suffix_str (str): String that is appended to all representations
skip_indices (list[int]): Indices of layers that should be skipped
"""
for i in range(representation_grid.num_layers):
layer = representation_grid.layer(i)
max_width = max(map(len, layer))
if i in skip_indices:
continue
# Take the current layer and pad it with the pad_str
# and also prepend with prepend_str and append the suffix_str
# pylint: disable=cell-var-from-loop
representation_grid.replace_layer(
i,
list(
map(
lambda x: prepend_str + str.ljust(x, max_width, pad_str) + suffix_str, layer
)
),
)
def move_multi_wire_gates(self, operator_grid):
"""Move multi-wire gates so that there are no interlocking multi-wire gates in the same layer.
Args:
operator_grid (pennylane.circuit_drawer.Grid): Grid that holds the circuit information and that will be edited.
"""
n = operator_grid.num_layers
i = -1
while i < n - 1:
i += 1
this_layer = operator_grid.layer(i)
layer_ops = _remove_duplicates(this_layer)
other_layer = [None] * operator_grid.num_wires
for j in range(len(layer_ops)):
op = layer_ops[j]
if op is None:
continue
# translate wires to their indices on the device
wire_indices = self.active_wires.indices(op.wires)
if len(op.wires) > 1:
sorted_wires = wire_indices.copy()
sorted_wires.sort()
blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))
for k in range(j + 1, len(layer_ops)):
other_op = layer_ops[k]
if other_op is None:
continue
# translate wires to their indices on the device
other_wire_indices = self.active_wires.indices(other_op.wires)
other_sorted_wire_indices = other_wire_indices.copy()
other_sorted_wire_indices.sort()
other_blocked_wires = list(
range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)
)
if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):
op_indices = [
idx for idx, layer_op in enumerate(this_layer) if layer_op == op
]
for l in op_indices:
other_layer[l] = op
this_layer[l] = None
break
if not all([item is None for item in other_layer]):
operator_grid.insert_layer(i + 1, other_layer)
n += 1
def draw(self):
"""Draw the circuit diagram.
Returns:
str: The circuit diagram
"""
rendered_string = ""
# extract the wire labels as strings and get their maximum length
wire_names = []
padding = 0
for i in range(self.full_representation_grid.num_wires):
wire_name = str(self.active_wires.labels[i])
padding = max(padding, len(wire_name))
wire_names.append(wire_name)
for i in range(self.full_representation_grid.num_wires):
# format wire name nicely
wire = self.full_representation_grid.wire(i)
s = " {:>" + str(padding) + "}: {}"
rendered_string += s.format(wire_names[i], 2 * self.charset.WIRE)
for s in wire:
rendered_string += s
rendered_string += "\n"
for symbol, cache in [
("U", self.representation_resolver.unitary_matrix_cache),
("H", self.representation_resolver.hermitian_matrix_cache),
("M", self.representation_resolver.matrix_cache),
]:
for idx, matrix in enumerate(cache):
rendered_string += "{}{} =\n{}\n".format(symbol, idx, matrix)
return rendered_string | random_line_split | |
eventlog.go | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not
// use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package logger is used to initialize the logger(main logger and event logger). This package should be imported once, usually from main, then call GetLogger.
package logger
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/aws/amazon-ssm-agent/agent/appconfig"
"github.com/aws/amazon-ssm-agent/agent/version"
"github.com/aws/amazon-ssm-agent/core/workerprovider/longrunningprovider/datastore/filesystem"
)
var (
eventLogInst *EventLog
singleSpacePattern = regexp.MustCompile(`\s+`)
AuditFolderName = "audits"
)
// GetEventLog returns the Event log instance and is called by the SSM Logger during app startup
func GetEventLog(logFilePath string, logFileName string) (eventLog *EventLog) {
if eventLogInst != nil {
return eventLogInst
}
var maxRollsDay int = appconfig.DefaultAuditExpirationDay
config, err := appconfig.Config(true)
if err == nil |
eventLogInstance := EventLog{
eventChannel: make(chan string, 2),
noOfHistoryFiles: maxRollsDay,
schemaVersion: "1",
eventLogPath: filepath.Join(logFilePath, AuditFolderName),
eventLogName: logFileName,
datePattern: "2006-01-02",
fileSystem: filesystem.NewFileSystem(),
timePattern: "15:04:05", // HH:MM:SS
}
eventLogInstance.init()
eventLogInstance.rotateEventLog()
eventLogInst = &eventLogInstance
return eventLogInst
}
// Creates events with the appropriate file parameters passed to this instance
type EventLog struct {
eventChannel chan string // Used for passing events to file write go routine.
noOfHistoryFiles int // Number of audit files to maintain in log folder
eventLogPath string // Log file path
eventLogName string // Event Log Name
schemaVersion string // Schema version
datePattern string // Date Pattern used for creating files
fileSystem filesystem.IFileSystem
timePattern string
currentFileName string // Name of File currently being used for logging in this instance. On app startup, it will be empty
nextFileName string // Current day's log file name
fileDelimiter string
}
// Init sets the Default value for instance
func (e *EventLog) init() {
e.currentFileName = ""
e.fileDelimiter = "-"
e.nextFileName = e.eventLogName + e.fileDelimiter + time.Now().Format(e.datePattern)
if err := e.fileSystem.MkdirAll(e.eventLogPath, appconfig.ReadWriteExecuteAccess); err != nil {
fmt.Println("Failed to create directory for audits", err)
}
e.eventWriter()
}
//Getters
// GetTodayAuditFileName will return the audit file name of currently used one
func (e *EventLog) GetTodayAuditFileName() string {
return e.nextFileName
}
// GetAuditFileName will return the audit file name without the date pattern
func (e *EventLog) GetAuditFileName() string {
return e.eventLogName
}
// GetAuditFilePath will return the audit file path
func (e *EventLog) GetAuditFilePath() string {
return e.eventLogPath
}
// GetAuditFilePath will return the file system instance
func (e *EventLog) GetFileSystem() filesystem.IFileSystem {
return e.fileSystem
}
// loadEvent loads the event to the channel to be passed to the write file go routine
func (e *EventLog) loadEvent(eventType string, agentVersion string, eventContent string) {
// Time appended to the message in the format HH:MM:SS
if agentVersion == "" {
agentVersion = version.Version
}
eventContent = eventType + " " + eventContent + " " + agentVersion + " " + time.Now().Format(e.timePattern) + "\n"
e.eventChannel <- eventContent
}
// close closes the buffered channel
func (e *EventLog) close() {
close(e.eventChannel)
}
// rotateEventLog checks for the deletion of files and deleted it
func (e *EventLog) rotateEventLog() {
validFileNames := e.getFilesWithMatchDatePattern()
deleteFileCount := len(validFileNames) - e.noOfHistoryFiles
for i := 0; i < deleteFileCount; i++ {
logFilePathWithDate := filepath.Join(e.eventLogPath, validFileNames[i])
e.fileSystem.DeleteFile(logFilePathWithDate)
}
}
// eventWriter triggers the go routine once and then waits for data from buffer channel
func (e *EventLog) eventWriter() {
go func() {
defer func() {
if r := recover(); r != nil {
fmt.Println("Event writer panic: ", r)
}
}()
for event := range e.eventChannel {
header := SchemaVersionHeader + e.schemaVersion + "\n"
if createdFlag := e.writeFile(event, header); createdFlag {
e.rotateEventLog()
}
}
}()
}
// getFilesWithMatchDatePattern gets the files matching with the date pattern
func (e *EventLog) getFilesWithMatchDatePattern() []string {
var validFileNames []string
if allFiles, err := e.fileSystem.ReadDir(e.eventLogPath); err == nil {
for _, fileInfo := range allFiles {
fileName := fileInfo.Name()
if !fileInfo.Mode().IsDir() && e.isValidFileName(fileName) {
validFileNames = append(validFileNames, fileName)
}
}
}
return validFileNames
}
// isValidFileName checks whether the file matches the Date pattern
func (e *EventLog) isValidFileName(fileName string) bool {
logFileWithDelim := e.eventLogName + e.fileDelimiter
if !strings.HasPrefix(fileName, logFileWithDelim) {
return false
}
datePart := fileName[len(logFileWithDelim):]
_, err := time.ParseInLocation(e.datePattern, datePart, time.Local)
if err != nil {
return false
}
return true
}
// writeFile writes events and header to the file.
// When the file is not available, Creates a new file and inserts the header
// When the file is available, updates the file
func (e *EventLog) writeFile(content string, header string) (createdFlag bool) {
logFilePathWithDate := filepath.Join(e.eventLogPath, e.nextFileName)
if !e.currentDateFileExists() {
createdFlag = true
content = header + content
}
if err := e.fileSystem.AppendToFile(logFilePathWithDate, content, appconfig.ReadWriteAccess); err != nil {
fmt.Println("Failed to write on the event log.", err)
return
}
e.currentFileName = e.nextFileName
return
}
// currentDateFileExists checks whether the current day file exists
func (e *EventLog) currentDateFileExists() bool {
if e.currentFileName == "" {
if _, err := e.fileSystem.Stat(filepath.Join(e.eventLogPath, e.nextFileName)); e.fileSystem.IsNotExist(err) {
return false
}
return true
}
return e.currentFileName == e.nextFileName
}
// The below functions uses the eventlog singleton instance and use only the old audit logs to work on.
// EventCounter contains the audit count, file name and the audit date to be sent to MGS
type EventCounter struct {
AuditFileName string // audit file name
AuditDate string // Can be used later. Date to which the audit file belongs to.
CountMap map[string]int // count of events found in audit log
SchemaVersion string // schema version from audit file
AgentVersion string // denotes agent version found in the audit log
LastReadTime string // denotes last read time stamp from file
LastReadByte int // denotes last read byte from file
IsFirstChunk bool // denotes first chunk taken from file
EventChunkType string // denotes message type used to send to MGS
}
// createUpdateEventCounter creates and updates event counter object based on the event line and time marker. This function creates new object when the version is new.
func createUpdateEventCounter(eventCounterObj *EventCounter, eventLine string, byteMarker int) (*EventCounter, bool) {
var eventChunkType string
eventLine = singleSpacePattern.ReplaceAllString(strings.TrimSpace(eventLine), " ")
eventSplitVal := strings.Split(eventLine, " ")
// For Invalid Data (skips lines with less than 4 words)
if len(eventSplitVal) < 4 {
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, false
}
eventChunkType, eventName, version, timeStamp := eventSplitVal[0], eventSplitVal[1], eventSplitVal[2], eventSplitVal[3]
if matched, err := regexp.MatchString(VersionRegexPattern, version); matched == false || err != nil {
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, false
}
// Will create a new object and load data for it from new chunk. For now, the chunks are divided based on version and Update events.
newlyCreated := false
if eventCounterObj.AgentVersion != "" && (version != eventCounterObj.AgentVersion ||
eventChunkType != eventCounterObj.EventChunkType ||
eventChunkType == AgentUpdateResultMessage) {
newlyCreated = true
eventCounterObj = &EventCounter{
AuditFileName: eventCounterObj.AuditFileName,
AuditDate: eventCounterObj.AuditDate,
CountMap: make(map[string]int),
SchemaVersion: eventCounterObj.SchemaVersion,
IsFirstChunk: false,
}
}
eventCounterObj.EventChunkType = eventChunkType
eventCounterObj.AgentVersion = version
eventCounterObj.CountMap[eventName]++
eventCounterObj.LastReadTime = timeStamp
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, newlyCreated
}
// WriteLastLineFile updates the file name with the Audit success status. This should be locked by the caller if called by multiple threads.
func WriteLastLineFile(eventCounter *EventCounter) error {
if eventLogInst == nil {
return nil
}
// generates byte marker with padding zeros
byteMarker := fmt.Sprintf("%0"+strconv.Itoa(BytePatternLen)+"d", eventCounter.LastReadByte)
logfilePath := filepath.Join(eventLogInst.eventLogPath, eventCounter.AuditFileName)
// Creates footer with last read byte padded by zeros
if eventCounter.IsFirstChunk {
// Appends the footer
if err := eventLogInst.fileSystem.AppendToFile(logfilePath, AuditSentSuccessFooter+byteMarker, appconfig.ReadWriteAccess); err != nil {
return err
}
return nil
}
// Updates footer of Audit file with last read byte padded by zeros
stat, err := os.Stat(logfilePath)
if err != nil {
return err
}
file, err := os.OpenFile(logfilePath, os.O_RDWR, appconfig.ReadWriteAccess)
if err != nil {
return err
}
defer file.Close()
lastReadByteBegin := stat.Size() - BytePatternLen
_, err = file.WriteAt([]byte(byteMarker), lastReadByteBegin)
if err != nil {
return err
}
return nil
}
// GetEventCounter returns the count of the audits in the previous days logs.
// Returns empty list when an exception is thrown by file handlers
func GetEventCounter() ([]*EventCounter, error) {
eventCounters := make([]*EventCounter, 0)
if eventLogInst == nil {
return eventCounters, nil
}
nextFileName := eventLogInst.eventLogName + eventLogInst.fileDelimiter + time.Now().Format(eventLogInst.datePattern)
validFileNames := eventLogInst.getFilesWithMatchDatePattern()
// Loop continues till it visits the file with Audit Sent log
validFileNamesLen := len(validFileNames) - 1
for idx := validFileNamesLen; idx >= 0 && idx >= validFileNamesLen-2; idx-- { // Considers only last two files and ignores today's file
if validFileNames[idx] == nextFileName {
continue
}
auditLogFileName := filepath.Join(eventLogInst.eventLogPath, validFileNames[idx])
isAuditFileProcessed, byteMarker, err := isAuditSentToMGS(auditLogFileName)
if err != nil || byteMarker < 0 { // byte marker is set to -1 when the file has been processed
continue
}
eventCounterObj, err := countEvent(auditLogFileName, byteMarker, isAuditFileProcessed)
if err != nil {
continue
}
eventCounters = append(eventCounters, eventCounterObj...)
}
return eventCounters, nil
}
// countEvent returns the count of the audits for the file passed and stores event greater than the time marker
func countEvent(fileName string, byteMarker int, isAuditFileProcessed bool) ([]*EventCounter, error) {
// reads header
eventCounterObj, offset, err := readEventLogHeaders(fileName)
if err != nil {
return nil, err
}
eventCounterObj.IsFirstChunk = !isAuditFileProcessed // denotes that the file is untouched. Not even a single section is sent
// sets the offset value to be forwarded in file.
// retrieved value from footer of processed file
if byteMarker > 0 {
offset = byteMarker
}
// reads footer
eventCounter, err := readEventLogBodyFooter(fileName, eventCounterObj, offset)
if err != nil {
return nil, err
}
return eventCounter, nil
}
// readHeaders reads body from audit file and returns the event counter object loaded with header information
func readEventLogBodyFooter(fileName string, eventCounterObj *EventCounter, offset int) ([]*EventCounter, error) {
eventCounter := make([]*EventCounter, 0)
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
// seek offset bytes from beginning of file
file.Seek(int64(offset), 0)
// creates a new scanner with custom split function
scanner := bufio.NewScanner(file)
scanner.Split(splitAuditLine(&offset))
for scanner.Scan() {
// For the footer
if strings.HasPrefix(scanner.Text(), AuditSentSuccessFooter) {
break
}
//For the data part
if newObj, created := createUpdateEventCounter(eventCounterObj, scanner.Text(), offset); created { // TODO when file grows pass line number to createUpdateEventCounter and break
if len(eventCounter) == 4 { // Reads only 4 + 1 ( Added after for loop) chunks from the file
break
}
eventCounter = append(eventCounter, eventCounterObj)
eventCounterObj = newObj
}
}
eventCounter = append(eventCounter, eventCounterObj)
// Reverse array - For now, two elem with two versions
for i, j := 0, len(eventCounter)-1; i < j; i, j = i+1, j-1 {
eventCounter[i], eventCounter[j] = eventCounter[j], eventCounter[i]
}
return eventCounter, nil
}
// readHeaders reads headers from audit file and returns the event counter object loaded with header information
func readEventLogHeaders(fileName string) (*EventCounter, int, error) {
noOfBytesRead := 0
file, err := os.Open(fileName)
if err != nil {
return nil, noOfBytesRead, err
}
defer file.Close()
eventCounterObj := &EventCounter{
CountMap: make(map[string]int),
AuditFileName: filepath.Base(fileName),
AgentVersion: "",
}
filePrefixLen := len(eventLogInst.eventLogName + eventLogInst.fileDelimiter)
eventCounterObj.AuditDate = eventCounterObj.AuditFileName[filePrefixLen:]
scanner := bufio.NewScanner(file)
scanner.Split(splitAuditLine(&noOfBytesRead))
skipLineCount := 1
for scanner.Scan() {
// For the Header
if skipLineCount > 0 {
skipLineCount--
eventCounterObj.SchemaVersion = getValidAuditFileHeaderFooterVal(scanner.Text(), SchemaVersionHeader) // Can be iterated when the headers are more
}
if skipLineCount == 0 {
break
}
}
return eventCounterObj, noOfBytesRead, nil
}
// splitAuditLine helps in splitting event log using default bufio.ScanLines and retrieves number of bytes read
func splitAuditLine(byteLen *int) bufio.SplitFunc {
return func(data []byte, atEOF bool) (advance int, token []byte, err error) {
advance, token, err = bufio.ScanLines(data, atEOF)
*byteLen += advance
return advance, token, err
}
}
// getValidAuditFileHeaderFooterVal returns valid(current when the field blank) header and footer val from the audit file
func getValidAuditFileHeaderFooterVal(event string, headerFooterTag string) string {
val := getEventHeaderVal(event, headerFooterTag)
switch {
case strings.HasPrefix(event, SchemaVersionHeader):
if val == "" {
val = eventLogInst.schemaVersion // Current schema version
}
}
return val
}
// getEventHeaderVal returns the value from header and footer
func getEventHeaderVal(event string, headerFooterTag string) string {
if strings.HasPrefix(event, headerFooterTag) {
headerSplitVal := strings.Split(event, headerFooterTag)
if len(headerSplitVal) > 1 {
return headerSplitVal[1]
}
}
return ""
}
// isAuditSentToMGS checks whether AuditSentSuccess status is present or not and returns the last audit sent time if present
func isAuditSentToMGS(auditFileName string) (bool, int, error) {
byteMarker := 0
stat, err := os.Stat(auditFileName)
if err != nil {
return false, byteMarker, err
}
file, err := os.Open(auditFileName)
if err != nil {
return false, byteMarker, err
}
defer file.Close()
buf := make([]byte, len(AuditSentSuccessFooter)+BytePatternLen)
start := stat.Size() - int64(len(buf))
_, err = file.ReadAt(buf, start)
if err != nil {
return false, byteMarker, err
}
lastLine := string(buf)
if strings.HasPrefix(lastLine, AuditSentSuccessFooter) {
lastLineSplitVal := strings.Split(lastLine, AuditSentSuccessFooter)
if len(lastLineSplitVal) > 1 {
byteMarker, _ = strconv.Atoi(lastLineSplitVal[1])
if byteMarker < 0 { // Using int instead of uint here for avoiding extra conversions
byteMarker = 0
}
// when the file is processed completely
if int64(byteMarker) == start {
return true, -1, nil
}
}
return true, byteMarker, nil
}
return false, byteMarker, nil
}
| {
maxRollsDay = config.Agent.AuditExpirationDay
} | conditional_block |
eventlog.go | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not
// use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package logger is used to initialize the logger(main logger and event logger). This package should be imported once, usually from main, then call GetLogger.
package logger
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/aws/amazon-ssm-agent/agent/appconfig"
"github.com/aws/amazon-ssm-agent/agent/version"
"github.com/aws/amazon-ssm-agent/core/workerprovider/longrunningprovider/datastore/filesystem"
)
var (
eventLogInst *EventLog
singleSpacePattern = regexp.MustCompile(`\s+`)
AuditFolderName = "audits"
)
// GetEventLog returns the Event log instance and is called by the SSM Logger during app startup
func GetEventLog(logFilePath string, logFileName string) (eventLog *EventLog) {
if eventLogInst != nil {
return eventLogInst
}
var maxRollsDay int = appconfig.DefaultAuditExpirationDay
config, err := appconfig.Config(true)
if err == nil {
maxRollsDay = config.Agent.AuditExpirationDay
}
eventLogInstance := EventLog{
eventChannel: make(chan string, 2),
noOfHistoryFiles: maxRollsDay,
schemaVersion: "1",
eventLogPath: filepath.Join(logFilePath, AuditFolderName),
eventLogName: logFileName,
datePattern: "2006-01-02",
fileSystem: filesystem.NewFileSystem(),
timePattern: "15:04:05", // HH:MM:SS
}
eventLogInstance.init()
eventLogInstance.rotateEventLog()
eventLogInst = &eventLogInstance
return eventLogInst
}
// Creates events with the appropriate file parameters passed to this instance
type EventLog struct {
eventChannel chan string // Used for passing events to file write go routine.
noOfHistoryFiles int // Number of audit files to maintain in log folder
eventLogPath string // Log file path
eventLogName string // Event Log Name
schemaVersion string // Schema version
datePattern string // Date Pattern used for creating files
fileSystem filesystem.IFileSystem
timePattern string
currentFileName string // Name of File currently being used for logging in this instance. On app startup, it will be empty
nextFileName string // Current day's log file name
fileDelimiter string
}
// Init sets the Default value for instance
func (e *EventLog) init() {
e.currentFileName = ""
e.fileDelimiter = "-"
e.nextFileName = e.eventLogName + e.fileDelimiter + time.Now().Format(e.datePattern)
if err := e.fileSystem.MkdirAll(e.eventLogPath, appconfig.ReadWriteExecuteAccess); err != nil {
fmt.Println("Failed to create directory for audits", err)
}
e.eventWriter()
}
//Getters
// GetTodayAuditFileName will return the audit file name of currently used one
func (e *EventLog) GetTodayAuditFileName() string {
return e.nextFileName
}
// GetAuditFileName will return the audit file name without the date pattern
func (e *EventLog) GetAuditFileName() string {
return e.eventLogName
}
// GetAuditFilePath will return the audit file path
func (e *EventLog) GetAuditFilePath() string {
return e.eventLogPath
}
// GetAuditFilePath will return the file system instance
func (e *EventLog) | () filesystem.IFileSystem {
return e.fileSystem
}
// loadEvent loads the event to the channel to be passed to the write file go routine
func (e *EventLog) loadEvent(eventType string, agentVersion string, eventContent string) {
// Time appended to the message in the format HH:MM:SS
if agentVersion == "" {
agentVersion = version.Version
}
eventContent = eventType + " " + eventContent + " " + agentVersion + " " + time.Now().Format(e.timePattern) + "\n"
e.eventChannel <- eventContent
}
// close closes the buffered channel
func (e *EventLog) close() {
close(e.eventChannel)
}
// rotateEventLog checks for the deletion of files and deleted it
func (e *EventLog) rotateEventLog() {
validFileNames := e.getFilesWithMatchDatePattern()
deleteFileCount := len(validFileNames) - e.noOfHistoryFiles
for i := 0; i < deleteFileCount; i++ {
logFilePathWithDate := filepath.Join(e.eventLogPath, validFileNames[i])
e.fileSystem.DeleteFile(logFilePathWithDate)
}
}
// eventWriter triggers the go routine once and then waits for data from buffer channel
func (e *EventLog) eventWriter() {
go func() {
defer func() {
if r := recover(); r != nil {
fmt.Println("Event writer panic: ", r)
}
}()
for event := range e.eventChannel {
header := SchemaVersionHeader + e.schemaVersion + "\n"
if createdFlag := e.writeFile(event, header); createdFlag {
e.rotateEventLog()
}
}
}()
}
// getFilesWithMatchDatePattern gets the files matching with the date pattern
func (e *EventLog) getFilesWithMatchDatePattern() []string {
var validFileNames []string
if allFiles, err := e.fileSystem.ReadDir(e.eventLogPath); err == nil {
for _, fileInfo := range allFiles {
fileName := fileInfo.Name()
if !fileInfo.Mode().IsDir() && e.isValidFileName(fileName) {
validFileNames = append(validFileNames, fileName)
}
}
}
return validFileNames
}
// isValidFileName checks whether the file matches the Date pattern
func (e *EventLog) isValidFileName(fileName string) bool {
logFileWithDelim := e.eventLogName + e.fileDelimiter
if !strings.HasPrefix(fileName, logFileWithDelim) {
return false
}
datePart := fileName[len(logFileWithDelim):]
_, err := time.ParseInLocation(e.datePattern, datePart, time.Local)
if err != nil {
return false
}
return true
}
// writeFile writes events and header to the file.
// When the file is not available, Creates a new file and inserts the header
// When the file is available, updates the file
func (e *EventLog) writeFile(content string, header string) (createdFlag bool) {
logFilePathWithDate := filepath.Join(e.eventLogPath, e.nextFileName)
if !e.currentDateFileExists() {
createdFlag = true
content = header + content
}
if err := e.fileSystem.AppendToFile(logFilePathWithDate, content, appconfig.ReadWriteAccess); err != nil {
fmt.Println("Failed to write on the event log.", err)
return
}
e.currentFileName = e.nextFileName
return
}
// currentDateFileExists checks whether the current day file exists
func (e *EventLog) currentDateFileExists() bool {
if e.currentFileName == "" {
if _, err := e.fileSystem.Stat(filepath.Join(e.eventLogPath, e.nextFileName)); e.fileSystem.IsNotExist(err) {
return false
}
return true
}
return e.currentFileName == e.nextFileName
}
// The below functions uses the eventlog singleton instance and use only the old audit logs to work on.
// EventCounter contains the audit count, file name and the audit date to be sent to MGS
type EventCounter struct {
AuditFileName string // audit file name
AuditDate string // Can be used later. Date to which the audit file belongs to.
CountMap map[string]int // count of events found in audit log
SchemaVersion string // schema version from audit file
AgentVersion string // denotes agent version found in the audit log
LastReadTime string // denotes last read time stamp from file
LastReadByte int // denotes last read byte from file
IsFirstChunk bool // denotes first chunk taken from file
EventChunkType string // denotes message type used to send to MGS
}
// createUpdateEventCounter creates and updates event counter object based on the event line and time marker. This function creates new object when the version is new.
func createUpdateEventCounter(eventCounterObj *EventCounter, eventLine string, byteMarker int) (*EventCounter, bool) {
var eventChunkType string
eventLine = singleSpacePattern.ReplaceAllString(strings.TrimSpace(eventLine), " ")
eventSplitVal := strings.Split(eventLine, " ")
// For Invalid Data (skips lines with less than 4 words)
if len(eventSplitVal) < 4 {
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, false
}
eventChunkType, eventName, version, timeStamp := eventSplitVal[0], eventSplitVal[1], eventSplitVal[2], eventSplitVal[3]
if matched, err := regexp.MatchString(VersionRegexPattern, version); matched == false || err != nil {
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, false
}
// Will create a new object and load data for it from new chunk. For now, the chunks are divided based on version and Update events.
newlyCreated := false
if eventCounterObj.AgentVersion != "" && (version != eventCounterObj.AgentVersion ||
eventChunkType != eventCounterObj.EventChunkType ||
eventChunkType == AgentUpdateResultMessage) {
newlyCreated = true
eventCounterObj = &EventCounter{
AuditFileName: eventCounterObj.AuditFileName,
AuditDate: eventCounterObj.AuditDate,
CountMap: make(map[string]int),
SchemaVersion: eventCounterObj.SchemaVersion,
IsFirstChunk: false,
}
}
eventCounterObj.EventChunkType = eventChunkType
eventCounterObj.AgentVersion = version
eventCounterObj.CountMap[eventName]++
eventCounterObj.LastReadTime = timeStamp
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, newlyCreated
}
// WriteLastLineFile updates the file name with the Audit success status. This should be locked by the caller if called by multiple threads.
func WriteLastLineFile(eventCounter *EventCounter) error {
if eventLogInst == nil {
return nil
}
// generates byte marker with padding zeros
byteMarker := fmt.Sprintf("%0"+strconv.Itoa(BytePatternLen)+"d", eventCounter.LastReadByte)
logfilePath := filepath.Join(eventLogInst.eventLogPath, eventCounter.AuditFileName)
// Creates footer with last read byte padded by zeros
if eventCounter.IsFirstChunk {
// Appends the footer
if err := eventLogInst.fileSystem.AppendToFile(logfilePath, AuditSentSuccessFooter+byteMarker, appconfig.ReadWriteAccess); err != nil {
return err
}
return nil
}
// Updates footer of Audit file with last read byte padded by zeros
stat, err := os.Stat(logfilePath)
if err != nil {
return err
}
file, err := os.OpenFile(logfilePath, os.O_RDWR, appconfig.ReadWriteAccess)
if err != nil {
return err
}
defer file.Close()
lastReadByteBegin := stat.Size() - BytePatternLen
_, err = file.WriteAt([]byte(byteMarker), lastReadByteBegin)
if err != nil {
return err
}
return nil
}
// GetEventCounter returns the count of the audits in the previous days logs.
// Returns empty list when an exception is thrown by file handlers
func GetEventCounter() ([]*EventCounter, error) {
eventCounters := make([]*EventCounter, 0)
if eventLogInst == nil {
return eventCounters, nil
}
nextFileName := eventLogInst.eventLogName + eventLogInst.fileDelimiter + time.Now().Format(eventLogInst.datePattern)
validFileNames := eventLogInst.getFilesWithMatchDatePattern()
// Loop continues till it visits the file with Audit Sent log
validFileNamesLen := len(validFileNames) - 1
for idx := validFileNamesLen; idx >= 0 && idx >= validFileNamesLen-2; idx-- { // Considers only last two files and ignores today's file
if validFileNames[idx] == nextFileName {
continue
}
auditLogFileName := filepath.Join(eventLogInst.eventLogPath, validFileNames[idx])
isAuditFileProcessed, byteMarker, err := isAuditSentToMGS(auditLogFileName)
if err != nil || byteMarker < 0 { // byte marker is set to -1 when the file has been processed
continue
}
eventCounterObj, err := countEvent(auditLogFileName, byteMarker, isAuditFileProcessed)
if err != nil {
continue
}
eventCounters = append(eventCounters, eventCounterObj...)
}
return eventCounters, nil
}
// countEvent returns the count of the audits for the file passed and stores event greater than the time marker
func countEvent(fileName string, byteMarker int, isAuditFileProcessed bool) ([]*EventCounter, error) {
// reads header
eventCounterObj, offset, err := readEventLogHeaders(fileName)
if err != nil {
return nil, err
}
eventCounterObj.IsFirstChunk = !isAuditFileProcessed // denotes that the file is untouched. Not even a single section is sent
// sets the offset value to be forwarded in file.
// retrieved value from footer of processed file
if byteMarker > 0 {
offset = byteMarker
}
// reads footer
eventCounter, err := readEventLogBodyFooter(fileName, eventCounterObj, offset)
if err != nil {
return nil, err
}
return eventCounter, nil
}
// readHeaders reads body from audit file and returns the event counter object loaded with header information
func readEventLogBodyFooter(fileName string, eventCounterObj *EventCounter, offset int) ([]*EventCounter, error) {
eventCounter := make([]*EventCounter, 0)
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
// seek offset bytes from beginning of file
file.Seek(int64(offset), 0)
// creates a new scanner with custom split function
scanner := bufio.NewScanner(file)
scanner.Split(splitAuditLine(&offset))
for scanner.Scan() {
// For the footer
if strings.HasPrefix(scanner.Text(), AuditSentSuccessFooter) {
break
}
//For the data part
if newObj, created := createUpdateEventCounter(eventCounterObj, scanner.Text(), offset); created { // TODO when file grows pass line number to createUpdateEventCounter and break
if len(eventCounter) == 4 { // Reads only 4 + 1 ( Added after for loop) chunks from the file
break
}
eventCounter = append(eventCounter, eventCounterObj)
eventCounterObj = newObj
}
}
eventCounter = append(eventCounter, eventCounterObj)
// Reverse array - For now, two elem with two versions
for i, j := 0, len(eventCounter)-1; i < j; i, j = i+1, j-1 {
eventCounter[i], eventCounter[j] = eventCounter[j], eventCounter[i]
}
return eventCounter, nil
}
// readHeaders reads headers from audit file and returns the event counter object loaded with header information
func readEventLogHeaders(fileName string) (*EventCounter, int, error) {
noOfBytesRead := 0
file, err := os.Open(fileName)
if err != nil {
return nil, noOfBytesRead, err
}
defer file.Close()
eventCounterObj := &EventCounter{
CountMap: make(map[string]int),
AuditFileName: filepath.Base(fileName),
AgentVersion: "",
}
filePrefixLen := len(eventLogInst.eventLogName + eventLogInst.fileDelimiter)
eventCounterObj.AuditDate = eventCounterObj.AuditFileName[filePrefixLen:]
scanner := bufio.NewScanner(file)
scanner.Split(splitAuditLine(&noOfBytesRead))
skipLineCount := 1
for scanner.Scan() {
// For the Header
if skipLineCount > 0 {
skipLineCount--
eventCounterObj.SchemaVersion = getValidAuditFileHeaderFooterVal(scanner.Text(), SchemaVersionHeader) // Can be iterated when the headers are more
}
if skipLineCount == 0 {
break
}
}
return eventCounterObj, noOfBytesRead, nil
}
// splitAuditLine helps in splitting event log using default bufio.ScanLines and retrieves number of bytes read
func splitAuditLine(byteLen *int) bufio.SplitFunc {
return func(data []byte, atEOF bool) (advance int, token []byte, err error) {
advance, token, err = bufio.ScanLines(data, atEOF)
*byteLen += advance
return advance, token, err
}
}
// getValidAuditFileHeaderFooterVal returns valid(current when the field blank) header and footer val from the audit file
func getValidAuditFileHeaderFooterVal(event string, headerFooterTag string) string {
val := getEventHeaderVal(event, headerFooterTag)
switch {
case strings.HasPrefix(event, SchemaVersionHeader):
if val == "" {
val = eventLogInst.schemaVersion // Current schema version
}
}
return val
}
// getEventHeaderVal returns the value from header and footer
func getEventHeaderVal(event string, headerFooterTag string) string {
if strings.HasPrefix(event, headerFooterTag) {
headerSplitVal := strings.Split(event, headerFooterTag)
if len(headerSplitVal) > 1 {
return headerSplitVal[1]
}
}
return ""
}
// isAuditSentToMGS checks whether AuditSentSuccess status is present or not and returns the last audit sent time if present
func isAuditSentToMGS(auditFileName string) (bool, int, error) {
byteMarker := 0
stat, err := os.Stat(auditFileName)
if err != nil {
return false, byteMarker, err
}
file, err := os.Open(auditFileName)
if err != nil {
return false, byteMarker, err
}
defer file.Close()
buf := make([]byte, len(AuditSentSuccessFooter)+BytePatternLen)
start := stat.Size() - int64(len(buf))
_, err = file.ReadAt(buf, start)
if err != nil {
return false, byteMarker, err
}
lastLine := string(buf)
if strings.HasPrefix(lastLine, AuditSentSuccessFooter) {
lastLineSplitVal := strings.Split(lastLine, AuditSentSuccessFooter)
if len(lastLineSplitVal) > 1 {
byteMarker, _ = strconv.Atoi(lastLineSplitVal[1])
if byteMarker < 0 { // Using int instead of uint here for avoiding extra conversions
byteMarker = 0
}
// when the file is processed completely
if int64(byteMarker) == start {
return true, -1, nil
}
}
return true, byteMarker, nil
}
return false, byteMarker, nil
}
| GetFileSystem | identifier_name |
eventlog.go | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not
// use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package logger is used to initialize the logger(main logger and event logger). This package should be imported once, usually from main, then call GetLogger.
package logger
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/aws/amazon-ssm-agent/agent/appconfig"
"github.com/aws/amazon-ssm-agent/agent/version"
"github.com/aws/amazon-ssm-agent/core/workerprovider/longrunningprovider/datastore/filesystem"
)
var (
eventLogInst *EventLog
singleSpacePattern = regexp.MustCompile(`\s+`)
AuditFolderName = "audits"
)
// GetEventLog returns the Event log instance and is called by the SSM Logger during app startup
func GetEventLog(logFilePath string, logFileName string) (eventLog *EventLog) {
if eventLogInst != nil {
return eventLogInst
}
var maxRollsDay int = appconfig.DefaultAuditExpirationDay
config, err := appconfig.Config(true)
if err == nil {
maxRollsDay = config.Agent.AuditExpirationDay
}
eventLogInstance := EventLog{
eventChannel: make(chan string, 2),
noOfHistoryFiles: maxRollsDay,
schemaVersion: "1",
eventLogPath: filepath.Join(logFilePath, AuditFolderName),
eventLogName: logFileName,
datePattern: "2006-01-02",
fileSystem: filesystem.NewFileSystem(),
timePattern: "15:04:05", // HH:MM:SS
}
eventLogInstance.init()
eventLogInstance.rotateEventLog()
eventLogInst = &eventLogInstance
return eventLogInst
}
// Creates events with the appropriate file parameters passed to this instance
type EventLog struct {
eventChannel chan string // Used for passing events to file write go routine.
noOfHistoryFiles int // Number of audit files to maintain in log folder
eventLogPath string // Log file path
eventLogName string // Event Log Name
schemaVersion string // Schema version
datePattern string // Date Pattern used for creating files
fileSystem filesystem.IFileSystem
timePattern string
currentFileName string // Name of File currently being used for logging in this instance. On app startup, it will be empty
nextFileName string // Current day's log file name
fileDelimiter string
}
// Init sets the Default value for instance
func (e *EventLog) init() {
e.currentFileName = ""
e.fileDelimiter = "-"
e.nextFileName = e.eventLogName + e.fileDelimiter + time.Now().Format(e.datePattern)
if err := e.fileSystem.MkdirAll(e.eventLogPath, appconfig.ReadWriteExecuteAccess); err != nil {
fmt.Println("Failed to create directory for audits", err)
}
e.eventWriter()
}
//Getters
// GetTodayAuditFileName will return the audit file name of currently used one
func (e *EventLog) GetTodayAuditFileName() string {
return e.nextFileName
}
// GetAuditFileName will return the audit file name without the date pattern
func (e *EventLog) GetAuditFileName() string {
return e.eventLogName
}
// GetAuditFilePath will return the audit file path
func (e *EventLog) GetAuditFilePath() string |
// GetAuditFilePath will return the file system instance
func (e *EventLog) GetFileSystem() filesystem.IFileSystem {
return e.fileSystem
}
// loadEvent loads the event to the channel to be passed to the write file go routine
func (e *EventLog) loadEvent(eventType string, agentVersion string, eventContent string) {
// Time appended to the message in the format HH:MM:SS
if agentVersion == "" {
agentVersion = version.Version
}
eventContent = eventType + " " + eventContent + " " + agentVersion + " " + time.Now().Format(e.timePattern) + "\n"
e.eventChannel <- eventContent
}
// close closes the buffered channel
func (e *EventLog) close() {
close(e.eventChannel)
}
// rotateEventLog checks for the deletion of files and deleted it
func (e *EventLog) rotateEventLog() {
validFileNames := e.getFilesWithMatchDatePattern()
deleteFileCount := len(validFileNames) - e.noOfHistoryFiles
for i := 0; i < deleteFileCount; i++ {
logFilePathWithDate := filepath.Join(e.eventLogPath, validFileNames[i])
e.fileSystem.DeleteFile(logFilePathWithDate)
}
}
// eventWriter triggers the go routine once and then waits for data from buffer channel
func (e *EventLog) eventWriter() {
go func() {
defer func() {
if r := recover(); r != nil {
fmt.Println("Event writer panic: ", r)
}
}()
for event := range e.eventChannel {
header := SchemaVersionHeader + e.schemaVersion + "\n"
if createdFlag := e.writeFile(event, header); createdFlag {
e.rotateEventLog()
}
}
}()
}
// getFilesWithMatchDatePattern gets the files matching with the date pattern
func (e *EventLog) getFilesWithMatchDatePattern() []string {
var validFileNames []string
if allFiles, err := e.fileSystem.ReadDir(e.eventLogPath); err == nil {
for _, fileInfo := range allFiles {
fileName := fileInfo.Name()
if !fileInfo.Mode().IsDir() && e.isValidFileName(fileName) {
validFileNames = append(validFileNames, fileName)
}
}
}
return validFileNames
}
// isValidFileName checks whether the file matches the Date pattern
func (e *EventLog) isValidFileName(fileName string) bool {
logFileWithDelim := e.eventLogName + e.fileDelimiter
if !strings.HasPrefix(fileName, logFileWithDelim) {
return false
}
datePart := fileName[len(logFileWithDelim):]
_, err := time.ParseInLocation(e.datePattern, datePart, time.Local)
if err != nil {
return false
}
return true
}
// writeFile writes events and header to the file.
// When the file is not available, Creates a new file and inserts the header
// When the file is available, updates the file
func (e *EventLog) writeFile(content string, header string) (createdFlag bool) {
logFilePathWithDate := filepath.Join(e.eventLogPath, e.nextFileName)
if !e.currentDateFileExists() {
createdFlag = true
content = header + content
}
if err := e.fileSystem.AppendToFile(logFilePathWithDate, content, appconfig.ReadWriteAccess); err != nil {
fmt.Println("Failed to write on the event log.", err)
return
}
e.currentFileName = e.nextFileName
return
}
// currentDateFileExists checks whether the current day file exists
func (e *EventLog) currentDateFileExists() bool {
if e.currentFileName == "" {
if _, err := e.fileSystem.Stat(filepath.Join(e.eventLogPath, e.nextFileName)); e.fileSystem.IsNotExist(err) {
return false
}
return true
}
return e.currentFileName == e.nextFileName
}
// The below functions uses the eventlog singleton instance and use only the old audit logs to work on.
// EventCounter contains the audit count, file name and the audit date to be sent to MGS
type EventCounter struct {
AuditFileName string // audit file name
AuditDate string // Can be used later. Date to which the audit file belongs to.
CountMap map[string]int // count of events found in audit log
SchemaVersion string // schema version from audit file
AgentVersion string // denotes agent version found in the audit log
LastReadTime string // denotes last read time stamp from file
LastReadByte int // denotes last read byte from file
IsFirstChunk bool // denotes first chunk taken from file
EventChunkType string // denotes message type used to send to MGS
}
// createUpdateEventCounter creates and updates event counter object based on the event line and time marker. This function creates new object when the version is new.
func createUpdateEventCounter(eventCounterObj *EventCounter, eventLine string, byteMarker int) (*EventCounter, bool) {
var eventChunkType string
eventLine = singleSpacePattern.ReplaceAllString(strings.TrimSpace(eventLine), " ")
eventSplitVal := strings.Split(eventLine, " ")
// For Invalid Data (skips lines with less than 4 words)
if len(eventSplitVal) < 4 {
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, false
}
eventChunkType, eventName, version, timeStamp := eventSplitVal[0], eventSplitVal[1], eventSplitVal[2], eventSplitVal[3]
if matched, err := regexp.MatchString(VersionRegexPattern, version); matched == false || err != nil {
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, false
}
// Will create a new object and load data for it from new chunk. For now, the chunks are divided based on version and Update events.
newlyCreated := false
if eventCounterObj.AgentVersion != "" && (version != eventCounterObj.AgentVersion ||
eventChunkType != eventCounterObj.EventChunkType ||
eventChunkType == AgentUpdateResultMessage) {
newlyCreated = true
eventCounterObj = &EventCounter{
AuditFileName: eventCounterObj.AuditFileName,
AuditDate: eventCounterObj.AuditDate,
CountMap: make(map[string]int),
SchemaVersion: eventCounterObj.SchemaVersion,
IsFirstChunk: false,
}
}
eventCounterObj.EventChunkType = eventChunkType
eventCounterObj.AgentVersion = version
eventCounterObj.CountMap[eventName]++
eventCounterObj.LastReadTime = timeStamp
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, newlyCreated
}
// WriteLastLineFile updates the file name with the Audit success status. This should be locked by the caller if called by multiple threads.
func WriteLastLineFile(eventCounter *EventCounter) error {
if eventLogInst == nil {
return nil
}
// generates byte marker with padding zeros
byteMarker := fmt.Sprintf("%0"+strconv.Itoa(BytePatternLen)+"d", eventCounter.LastReadByte)
logfilePath := filepath.Join(eventLogInst.eventLogPath, eventCounter.AuditFileName)
// Creates footer with last read byte padded by zeros
if eventCounter.IsFirstChunk {
// Appends the footer
if err := eventLogInst.fileSystem.AppendToFile(logfilePath, AuditSentSuccessFooter+byteMarker, appconfig.ReadWriteAccess); err != nil {
return err
}
return nil
}
// Updates footer of Audit file with last read byte padded by zeros
stat, err := os.Stat(logfilePath)
if err != nil {
return err
}
file, err := os.OpenFile(logfilePath, os.O_RDWR, appconfig.ReadWriteAccess)
if err != nil {
return err
}
defer file.Close()
lastReadByteBegin := stat.Size() - BytePatternLen
_, err = file.WriteAt([]byte(byteMarker), lastReadByteBegin)
if err != nil {
return err
}
return nil
}
// GetEventCounter returns the count of the audits in the previous days logs.
// Returns empty list when an exception is thrown by file handlers
func GetEventCounter() ([]*EventCounter, error) {
eventCounters := make([]*EventCounter, 0)
if eventLogInst == nil {
return eventCounters, nil
}
nextFileName := eventLogInst.eventLogName + eventLogInst.fileDelimiter + time.Now().Format(eventLogInst.datePattern)
validFileNames := eventLogInst.getFilesWithMatchDatePattern()
// Loop continues till it visits the file with Audit Sent log
validFileNamesLen := len(validFileNames) - 1
for idx := validFileNamesLen; idx >= 0 && idx >= validFileNamesLen-2; idx-- { // Considers only last two files and ignores today's file
if validFileNames[idx] == nextFileName {
continue
}
auditLogFileName := filepath.Join(eventLogInst.eventLogPath, validFileNames[idx])
isAuditFileProcessed, byteMarker, err := isAuditSentToMGS(auditLogFileName)
if err != nil || byteMarker < 0 { // byte marker is set to -1 when the file has been processed
continue
}
eventCounterObj, err := countEvent(auditLogFileName, byteMarker, isAuditFileProcessed)
if err != nil {
continue
}
eventCounters = append(eventCounters, eventCounterObj...)
}
return eventCounters, nil
}
// countEvent returns the count of the audits for the file passed and stores event greater than the time marker
func countEvent(fileName string, byteMarker int, isAuditFileProcessed bool) ([]*EventCounter, error) {
// reads header
eventCounterObj, offset, err := readEventLogHeaders(fileName)
if err != nil {
return nil, err
}
eventCounterObj.IsFirstChunk = !isAuditFileProcessed // denotes that the file is untouched. Not even a single section is sent
// sets the offset value to be forwarded in file.
// retrieved value from footer of processed file
if byteMarker > 0 {
offset = byteMarker
}
// reads footer
eventCounter, err := readEventLogBodyFooter(fileName, eventCounterObj, offset)
if err != nil {
return nil, err
}
return eventCounter, nil
}
// readHeaders reads body from audit file and returns the event counter object loaded with header information
func readEventLogBodyFooter(fileName string, eventCounterObj *EventCounter, offset int) ([]*EventCounter, error) {
eventCounter := make([]*EventCounter, 0)
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
// seek offset bytes from beginning of file
file.Seek(int64(offset), 0)
// creates a new scanner with custom split function
scanner := bufio.NewScanner(file)
scanner.Split(splitAuditLine(&offset))
for scanner.Scan() {
// For the footer
if strings.HasPrefix(scanner.Text(), AuditSentSuccessFooter) {
break
}
//For the data part
if newObj, created := createUpdateEventCounter(eventCounterObj, scanner.Text(), offset); created { // TODO when file grows pass line number to createUpdateEventCounter and break
if len(eventCounter) == 4 { // Reads only 4 + 1 ( Added after for loop) chunks from the file
break
}
eventCounter = append(eventCounter, eventCounterObj)
eventCounterObj = newObj
}
}
eventCounter = append(eventCounter, eventCounterObj)
// Reverse array - For now, two elem with two versions
for i, j := 0, len(eventCounter)-1; i < j; i, j = i+1, j-1 {
eventCounter[i], eventCounter[j] = eventCounter[j], eventCounter[i]
}
return eventCounter, nil
}
// readHeaders reads headers from audit file and returns the event counter object loaded with header information
func readEventLogHeaders(fileName string) (*EventCounter, int, error) {
noOfBytesRead := 0
file, err := os.Open(fileName)
if err != nil {
return nil, noOfBytesRead, err
}
defer file.Close()
eventCounterObj := &EventCounter{
CountMap: make(map[string]int),
AuditFileName: filepath.Base(fileName),
AgentVersion: "",
}
filePrefixLen := len(eventLogInst.eventLogName + eventLogInst.fileDelimiter)
eventCounterObj.AuditDate = eventCounterObj.AuditFileName[filePrefixLen:]
scanner := bufio.NewScanner(file)
scanner.Split(splitAuditLine(&noOfBytesRead))
skipLineCount := 1
for scanner.Scan() {
// For the Header
if skipLineCount > 0 {
skipLineCount--
eventCounterObj.SchemaVersion = getValidAuditFileHeaderFooterVal(scanner.Text(), SchemaVersionHeader) // Can be iterated when the headers are more
}
if skipLineCount == 0 {
break
}
}
return eventCounterObj, noOfBytesRead, nil
}
// splitAuditLine helps in splitting event log using default bufio.ScanLines and retrieves number of bytes read
func splitAuditLine(byteLen *int) bufio.SplitFunc {
return func(data []byte, atEOF bool) (advance int, token []byte, err error) {
advance, token, err = bufio.ScanLines(data, atEOF)
*byteLen += advance
return advance, token, err
}
}
// getValidAuditFileHeaderFooterVal returns valid(current when the field blank) header and footer val from the audit file
func getValidAuditFileHeaderFooterVal(event string, headerFooterTag string) string {
val := getEventHeaderVal(event, headerFooterTag)
switch {
case strings.HasPrefix(event, SchemaVersionHeader):
if val == "" {
val = eventLogInst.schemaVersion // Current schema version
}
}
return val
}
// getEventHeaderVal returns the value from header and footer
func getEventHeaderVal(event string, headerFooterTag string) string {
if strings.HasPrefix(event, headerFooterTag) {
headerSplitVal := strings.Split(event, headerFooterTag)
if len(headerSplitVal) > 1 {
return headerSplitVal[1]
}
}
return ""
}
// isAuditSentToMGS checks whether AuditSentSuccess status is present or not and returns the last audit sent time if present
func isAuditSentToMGS(auditFileName string) (bool, int, error) {
byteMarker := 0
stat, err := os.Stat(auditFileName)
if err != nil {
return false, byteMarker, err
}
file, err := os.Open(auditFileName)
if err != nil {
return false, byteMarker, err
}
defer file.Close()
buf := make([]byte, len(AuditSentSuccessFooter)+BytePatternLen)
start := stat.Size() - int64(len(buf))
_, err = file.ReadAt(buf, start)
if err != nil {
return false, byteMarker, err
}
lastLine := string(buf)
if strings.HasPrefix(lastLine, AuditSentSuccessFooter) {
lastLineSplitVal := strings.Split(lastLine, AuditSentSuccessFooter)
if len(lastLineSplitVal) > 1 {
byteMarker, _ = strconv.Atoi(lastLineSplitVal[1])
if byteMarker < 0 { // Using int instead of uint here for avoiding extra conversions
byteMarker = 0
}
// when the file is processed completely
if int64(byteMarker) == start {
return true, -1, nil
}
}
return true, byteMarker, nil
}
return false, byteMarker, nil
}
| {
return e.eventLogPath
} | identifier_body |
eventlog.go | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not
// use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package logger is used to initialize the logger(main logger and event logger). This package should be imported once, usually from main, then call GetLogger.
package logger
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/aws/amazon-ssm-agent/agent/appconfig"
"github.com/aws/amazon-ssm-agent/agent/version"
"github.com/aws/amazon-ssm-agent/core/workerprovider/longrunningprovider/datastore/filesystem"
)
var (
eventLogInst *EventLog
singleSpacePattern = regexp.MustCompile(`\s+`)
AuditFolderName = "audits"
)
// GetEventLog returns the Event log instance and is called by the SSM Logger during app startup
func GetEventLog(logFilePath string, logFileName string) (eventLog *EventLog) {
if eventLogInst != nil {
return eventLogInst
}
var maxRollsDay int = appconfig.DefaultAuditExpirationDay
config, err := appconfig.Config(true)
if err == nil {
maxRollsDay = config.Agent.AuditExpirationDay
}
eventLogInstance := EventLog{
eventChannel: make(chan string, 2),
noOfHistoryFiles: maxRollsDay,
schemaVersion: "1",
eventLogPath: filepath.Join(logFilePath, AuditFolderName),
eventLogName: logFileName,
datePattern: "2006-01-02",
fileSystem: filesystem.NewFileSystem(),
timePattern: "15:04:05", // HH:MM:SS
}
eventLogInstance.init()
eventLogInstance.rotateEventLog()
eventLogInst = &eventLogInstance
return eventLogInst
}
// Creates events with the appropriate file parameters passed to this instance
type EventLog struct {
eventChannel chan string // Used for passing events to file write go routine.
noOfHistoryFiles int // Number of audit files to maintain in log folder
eventLogPath string // Log file path
eventLogName string // Event Log Name
schemaVersion string // Schema version
datePattern string // Date Pattern used for creating files
fileSystem filesystem.IFileSystem
timePattern string
currentFileName string // Name of File currently being used for logging in this instance. On app startup, it will be empty
nextFileName string // Current day's log file name
fileDelimiter string
}
// Init sets the Default value for instance
func (e *EventLog) init() {
e.currentFileName = ""
e.fileDelimiter = "-"
e.nextFileName = e.eventLogName + e.fileDelimiter + time.Now().Format(e.datePattern)
if err := e.fileSystem.MkdirAll(e.eventLogPath, appconfig.ReadWriteExecuteAccess); err != nil {
fmt.Println("Failed to create directory for audits", err)
}
e.eventWriter()
}
//Getters
// GetTodayAuditFileName will return the audit file name of currently used one
func (e *EventLog) GetTodayAuditFileName() string {
return e.nextFileName
}
// GetAuditFileName will return the audit file name without the date pattern
func (e *EventLog) GetAuditFileName() string {
return e.eventLogName
}
// GetAuditFilePath will return the audit file path
func (e *EventLog) GetAuditFilePath() string {
return e.eventLogPath
}
// GetAuditFilePath will return the file system instance
func (e *EventLog) GetFileSystem() filesystem.IFileSystem {
return e.fileSystem
}
// loadEvent loads the event to the channel to be passed to the write file go routine
func (e *EventLog) loadEvent(eventType string, agentVersion string, eventContent string) {
// Time appended to the message in the format HH:MM:SS
if agentVersion == "" {
agentVersion = version.Version
}
eventContent = eventType + " " + eventContent + " " + agentVersion + " " + time.Now().Format(e.timePattern) + "\n"
e.eventChannel <- eventContent
}
// close closes the buffered channel
func (e *EventLog) close() {
close(e.eventChannel)
}
// rotateEventLog checks for the deletion of files and deleted it
func (e *EventLog) rotateEventLog() {
validFileNames := e.getFilesWithMatchDatePattern()
deleteFileCount := len(validFileNames) - e.noOfHistoryFiles
for i := 0; i < deleteFileCount; i++ {
logFilePathWithDate := filepath.Join(e.eventLogPath, validFileNames[i])
e.fileSystem.DeleteFile(logFilePathWithDate)
}
}
// eventWriter triggers the go routine once and then waits for data from buffer channel
func (e *EventLog) eventWriter() {
go func() {
defer func() {
if r := recover(); r != nil {
fmt.Println("Event writer panic: ", r)
}
}()
for event := range e.eventChannel {
header := SchemaVersionHeader + e.schemaVersion + "\n"
if createdFlag := e.writeFile(event, header); createdFlag {
e.rotateEventLog()
}
}
}()
}
// getFilesWithMatchDatePattern gets the files matching with the date pattern
func (e *EventLog) getFilesWithMatchDatePattern() []string {
var validFileNames []string
if allFiles, err := e.fileSystem.ReadDir(e.eventLogPath); err == nil {
for _, fileInfo := range allFiles {
fileName := fileInfo.Name()
if !fileInfo.Mode().IsDir() && e.isValidFileName(fileName) {
validFileNames = append(validFileNames, fileName)
}
}
}
return validFileNames
}
// isValidFileName checks whether the file matches the Date pattern
func (e *EventLog) isValidFileName(fileName string) bool {
logFileWithDelim := e.eventLogName + e.fileDelimiter
if !strings.HasPrefix(fileName, logFileWithDelim) {
return false
}
datePart := fileName[len(logFileWithDelim):]
_, err := time.ParseInLocation(e.datePattern, datePart, time.Local)
if err != nil {
return false
}
return true
}
// writeFile writes events and header to the file.
// When the file is not available, Creates a new file and inserts the header
// When the file is available, updates the file
func (e *EventLog) writeFile(content string, header string) (createdFlag bool) {
logFilePathWithDate := filepath.Join(e.eventLogPath, e.nextFileName)
if !e.currentDateFileExists() {
createdFlag = true
content = header + content
}
if err := e.fileSystem.AppendToFile(logFilePathWithDate, content, appconfig.ReadWriteAccess); err != nil {
fmt.Println("Failed to write on the event log.", err)
return
}
e.currentFileName = e.nextFileName
return
}
// currentDateFileExists checks whether the current day file exists
func (e *EventLog) currentDateFileExists() bool {
if e.currentFileName == "" {
if _, err := e.fileSystem.Stat(filepath.Join(e.eventLogPath, e.nextFileName)); e.fileSystem.IsNotExist(err) {
return false
}
return true
}
return e.currentFileName == e.nextFileName
}
// The below functions uses the eventlog singleton instance and use only the old audit logs to work on.
// EventCounter contains the audit count, file name and the audit date to be sent to MGS
type EventCounter struct {
AuditFileName string // audit file name
AuditDate string // Can be used later. Date to which the audit file belongs to.
CountMap map[string]int // count of events found in audit log
SchemaVersion string // schema version from audit file
AgentVersion string // denotes agent version found in the audit log
LastReadTime string // denotes last read time stamp from file
LastReadByte int // denotes last read byte from file
IsFirstChunk bool // denotes first chunk taken from file
EventChunkType string // denotes message type used to send to MGS
}
// createUpdateEventCounter creates and updates event counter object based on the event line and time marker. This function creates new object when the version is new.
func createUpdateEventCounter(eventCounterObj *EventCounter, eventLine string, byteMarker int) (*EventCounter, bool) {
var eventChunkType string
eventLine = singleSpacePattern.ReplaceAllString(strings.TrimSpace(eventLine), " ")
eventSplitVal := strings.Split(eventLine, " ")
// For Invalid Data (skips lines with less than 4 words)
if len(eventSplitVal) < 4 {
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, false
}
eventChunkType, eventName, version, timeStamp := eventSplitVal[0], eventSplitVal[1], eventSplitVal[2], eventSplitVal[3]
if matched, err := regexp.MatchString(VersionRegexPattern, version); matched == false || err != nil {
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, false | if eventCounterObj.AgentVersion != "" && (version != eventCounterObj.AgentVersion ||
eventChunkType != eventCounterObj.EventChunkType ||
eventChunkType == AgentUpdateResultMessage) {
newlyCreated = true
eventCounterObj = &EventCounter{
AuditFileName: eventCounterObj.AuditFileName,
AuditDate: eventCounterObj.AuditDate,
CountMap: make(map[string]int),
SchemaVersion: eventCounterObj.SchemaVersion,
IsFirstChunk: false,
}
}
eventCounterObj.EventChunkType = eventChunkType
eventCounterObj.AgentVersion = version
eventCounterObj.CountMap[eventName]++
eventCounterObj.LastReadTime = timeStamp
eventCounterObj.LastReadByte = byteMarker
return eventCounterObj, newlyCreated
}
// WriteLastLineFile updates the file name with the Audit success status. This should be locked by the caller if called by multiple threads.
func WriteLastLineFile(eventCounter *EventCounter) error {
if eventLogInst == nil {
return nil
}
// generates byte marker with padding zeros
byteMarker := fmt.Sprintf("%0"+strconv.Itoa(BytePatternLen)+"d", eventCounter.LastReadByte)
logfilePath := filepath.Join(eventLogInst.eventLogPath, eventCounter.AuditFileName)
// Creates footer with last read byte padded by zeros
if eventCounter.IsFirstChunk {
// Appends the footer
if err := eventLogInst.fileSystem.AppendToFile(logfilePath, AuditSentSuccessFooter+byteMarker, appconfig.ReadWriteAccess); err != nil {
return err
}
return nil
}
// Updates footer of Audit file with last read byte padded by zeros
stat, err := os.Stat(logfilePath)
if err != nil {
return err
}
file, err := os.OpenFile(logfilePath, os.O_RDWR, appconfig.ReadWriteAccess)
if err != nil {
return err
}
defer file.Close()
lastReadByteBegin := stat.Size() - BytePatternLen
_, err = file.WriteAt([]byte(byteMarker), lastReadByteBegin)
if err != nil {
return err
}
return nil
}
// GetEventCounter returns the count of the audits in the previous days logs.
// Returns empty list when an exception is thrown by file handlers
func GetEventCounter() ([]*EventCounter, error) {
eventCounters := make([]*EventCounter, 0)
if eventLogInst == nil {
return eventCounters, nil
}
nextFileName := eventLogInst.eventLogName + eventLogInst.fileDelimiter + time.Now().Format(eventLogInst.datePattern)
validFileNames := eventLogInst.getFilesWithMatchDatePattern()
// Loop continues till it visits the file with Audit Sent log
validFileNamesLen := len(validFileNames) - 1
for idx := validFileNamesLen; idx >= 0 && idx >= validFileNamesLen-2; idx-- { // Considers only last two files and ignores today's file
if validFileNames[idx] == nextFileName {
continue
}
auditLogFileName := filepath.Join(eventLogInst.eventLogPath, validFileNames[idx])
isAuditFileProcessed, byteMarker, err := isAuditSentToMGS(auditLogFileName)
if err != nil || byteMarker < 0 { // byte marker is set to -1 when the file has been processed
continue
}
eventCounterObj, err := countEvent(auditLogFileName, byteMarker, isAuditFileProcessed)
if err != nil {
continue
}
eventCounters = append(eventCounters, eventCounterObj...)
}
return eventCounters, nil
}
// countEvent returns the count of the audits for the file passed and stores event greater than the time marker
func countEvent(fileName string, byteMarker int, isAuditFileProcessed bool) ([]*EventCounter, error) {
// reads header
eventCounterObj, offset, err := readEventLogHeaders(fileName)
if err != nil {
return nil, err
}
eventCounterObj.IsFirstChunk = !isAuditFileProcessed // denotes that the file is untouched. Not even a single section is sent
// sets the offset value to be forwarded in file.
// retrieved value from footer of processed file
if byteMarker > 0 {
offset = byteMarker
}
// reads footer
eventCounter, err := readEventLogBodyFooter(fileName, eventCounterObj, offset)
if err != nil {
return nil, err
}
return eventCounter, nil
}
// readHeaders reads body from audit file and returns the event counter object loaded with header information
func readEventLogBodyFooter(fileName string, eventCounterObj *EventCounter, offset int) ([]*EventCounter, error) {
eventCounter := make([]*EventCounter, 0)
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
// seek offset bytes from beginning of file
file.Seek(int64(offset), 0)
// creates a new scanner with custom split function
scanner := bufio.NewScanner(file)
scanner.Split(splitAuditLine(&offset))
for scanner.Scan() {
// For the footer
if strings.HasPrefix(scanner.Text(), AuditSentSuccessFooter) {
break
}
//For the data part
if newObj, created := createUpdateEventCounter(eventCounterObj, scanner.Text(), offset); created { // TODO when file grows pass line number to createUpdateEventCounter and break
if len(eventCounter) == 4 { // Reads only 4 + 1 ( Added after for loop) chunks from the file
break
}
eventCounter = append(eventCounter, eventCounterObj)
eventCounterObj = newObj
}
}
eventCounter = append(eventCounter, eventCounterObj)
// Reverse array - For now, two elem with two versions
for i, j := 0, len(eventCounter)-1; i < j; i, j = i+1, j-1 {
eventCounter[i], eventCounter[j] = eventCounter[j], eventCounter[i]
}
return eventCounter, nil
}
// readHeaders reads headers from audit file and returns the event counter object loaded with header information
func readEventLogHeaders(fileName string) (*EventCounter, int, error) {
noOfBytesRead := 0
file, err := os.Open(fileName)
if err != nil {
return nil, noOfBytesRead, err
}
defer file.Close()
eventCounterObj := &EventCounter{
CountMap: make(map[string]int),
AuditFileName: filepath.Base(fileName),
AgentVersion: "",
}
filePrefixLen := len(eventLogInst.eventLogName + eventLogInst.fileDelimiter)
eventCounterObj.AuditDate = eventCounterObj.AuditFileName[filePrefixLen:]
scanner := bufio.NewScanner(file)
scanner.Split(splitAuditLine(&noOfBytesRead))
skipLineCount := 1
for scanner.Scan() {
// For the Header
if skipLineCount > 0 {
skipLineCount--
eventCounterObj.SchemaVersion = getValidAuditFileHeaderFooterVal(scanner.Text(), SchemaVersionHeader) // Can be iterated when the headers are more
}
if skipLineCount == 0 {
break
}
}
return eventCounterObj, noOfBytesRead, nil
}
// splitAuditLine helps in splitting event log using default bufio.ScanLines and retrieves number of bytes read
func splitAuditLine(byteLen *int) bufio.SplitFunc {
return func(data []byte, atEOF bool) (advance int, token []byte, err error) {
advance, token, err = bufio.ScanLines(data, atEOF)
*byteLen += advance
return advance, token, err
}
}
// getValidAuditFileHeaderFooterVal returns valid(current when the field blank) header and footer val from the audit file
func getValidAuditFileHeaderFooterVal(event string, headerFooterTag string) string {
val := getEventHeaderVal(event, headerFooterTag)
switch {
case strings.HasPrefix(event, SchemaVersionHeader):
if val == "" {
val = eventLogInst.schemaVersion // Current schema version
}
}
return val
}
// getEventHeaderVal returns the value from header and footer
func getEventHeaderVal(event string, headerFooterTag string) string {
if strings.HasPrefix(event, headerFooterTag) {
headerSplitVal := strings.Split(event, headerFooterTag)
if len(headerSplitVal) > 1 {
return headerSplitVal[1]
}
}
return ""
}
// isAuditSentToMGS checks whether AuditSentSuccess status is present or not and returns the last audit sent time if present
func isAuditSentToMGS(auditFileName string) (bool, int, error) {
byteMarker := 0
stat, err := os.Stat(auditFileName)
if err != nil {
return false, byteMarker, err
}
file, err := os.Open(auditFileName)
if err != nil {
return false, byteMarker, err
}
defer file.Close()
buf := make([]byte, len(AuditSentSuccessFooter)+BytePatternLen)
start := stat.Size() - int64(len(buf))
_, err = file.ReadAt(buf, start)
if err != nil {
return false, byteMarker, err
}
lastLine := string(buf)
if strings.HasPrefix(lastLine, AuditSentSuccessFooter) {
lastLineSplitVal := strings.Split(lastLine, AuditSentSuccessFooter)
if len(lastLineSplitVal) > 1 {
byteMarker, _ = strconv.Atoi(lastLineSplitVal[1])
if byteMarker < 0 { // Using int instead of uint here for avoiding extra conversions
byteMarker = 0
}
// when the file is processed completely
if int64(byteMarker) == start {
return true, -1, nil
}
}
return true, byteMarker, nil
}
return false, byteMarker, nil
} | }
// Will create a new object and load data for it from new chunk. For now, the chunks are divided based on version and Update events.
newlyCreated := false | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.