text stringlengths 38 1.54M |
|---|
import argparse
from memory_core.memory_core import gen_memory_core, Mode
from memory_core.memory_core_magma import MemCore
from lake.utils.parse_clkwork_csv import generate_data_lists
import glob
import tempfile
import shutil
import fault
import random
import magma
import os
from gemstone.common.testers import ResetTester
from gemstone.common.testers import BasicTester
from gemstone.common.util import compress_config_data
import pytest
from gemstone.generator import Const
from cgra.util import create_cgra
from canal.util import IOSide
from memory_core.memory_core_magma import config_mem_tile
from archipelago import pnr
# @pytest.fixture()
def io_sides():
return IOSide.North | IOSide.East | IOSide.South | IOSide.West
# @pytest.fixture(scope="module")
def cw_files():
filenames = ["CW_fp_add.v", "CW_fp_mult.v"]
dirname = "peak_core"
result_filenames = []
for name in filenames:
filename = os.path.join(dirname, name)
assert os.path.isfile(filename)
result_filenames.append(filename)
return result_filenames
def make_memory_core():
mem_core = MemCore()
mem_circ = mem_core.circuit()
tester = MemoryCoreTester(mem_circ, mem_circ.clk, mem_circ.reset)
tester.poke(mem_circ.clk, 0)
tester.poke(mem_circ.reset, 0)
tester.step(1)
tester.poke(mem_circ.reset, 1)
tester.step(1)
tester.reset()
return [mem_circ, tester, mem_core]
class MemoryCoreTester(BasicTester):
def configure(self, addr, data, feature=0):
self.poke(self.clock, 0)
self.poke(self.reset_port, 0)
if(feature == 0):
exec(f"self.poke(self._circuit.config.config_addr, addr)")
exec(f"self.poke(self._circuit.config.config_data, data)")
exec(f"self.poke(self._circuit.config.write, 1)")
self.step(1)
exec(f"self.poke(self._circuit.config.write, 0)")
exec(f"self.poke(self._circuit.config.config_data, 0)")
else:
exec(f"self.poke(self._circuit.config_{feature}.config_addr, addr)")
exec(f"self.poke(self._circuit.config_{feature}.config_data, data)")
exec(f"self.poke(self._circuit.config_{feature}.write, 1)")
self.step(1)
exec(f"self.poke(self._circuit.config_{feature}.write, 0)")
exec(f"self.poke(self._circuit.config_{feature}.config_data, 0)")
# No longer applicable w/ Diet Lake
@pytest.mark.skip
def test_multiple_output_ports():
# Regular Bootstrap
[circuit, tester, MCore] = make_memory_core()
tester.poke(circuit.stall, 1)
tile_en = 1
depth = 1024
chunk = 128
startup_delay = 4
mode = Mode.DB
config_data = []
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_0", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_0", 3 * chunk * 4))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_wo_0", 256 * 4))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_ss_0", 256 * 4))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_0", int(3 * chunk)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_wo_0", 256))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_ss_0", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_0", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_1", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_dimensionality", 3))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_0", 128))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_1", 3))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_2", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_1", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_2", 256))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_outer", chunk))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_0", 1))
config_data.append(MCore.get_config_data("tile_en", 1))
config_data.append(MCore.get_config_data("fifo_ctrl_fifo_depth", 0))
config_data.append(MCore.get_config_data("mode", 0))
config_data.append(MCore.get_config_data("flush_reg_sel", 1))
config_data.append(MCore.get_config_data("wen_in_1_reg_sel", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_0_input_latency", 4))
config_data.append(MCore.get_config_data("enable_chain_output", 0))
config_data.append(MCore.get_config_data("enable_chain_input", 0))
config_data.append(MCore.get_config_data("chain_idx_input", 0))
config_data.append(MCore.get_config_data("chain_idx_output", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_1", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_1", 3 * chunk * 4))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_1", 3 * chunk))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_dimensionality", 3))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_0", 128))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_1", 3))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_2", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_starting_addr", 128))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_1", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_2", 256))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_outer", chunk))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_1", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_1_input_latency", 4))
config_data = compress_config_data(config_data)
# Configure
for addr, data in config_data:
tester.configure(addr, data)
tester.poke(circuit.stall, 0)
tester.eval()
inputs = []
for z in range(6):
for i in range(depth):
inputs.append(i)
outputs_0 = []
outputs_1 = []
for z in range(6):
for i in range(depth // 2):
outputs_0.append(i)
outputs_1.append(i + 512)
tester.poke(circuit.ren_in_0, 1)
tester.poke(circuit.ren_in_1, 1)
output_idx = 0
for i in range(4 * depth):
# We are just writing sequentially for this sample
if(i >= 2 * depth + 4 * chunk):
tester.poke(circuit.wen_in_0, 1)
elif(i >= 2 * depth):
# Write for two rounds
tester.poke(circuit.wen_in_0, 0)
else:
tester.poke(circuit.wen_in_0, 1)
tester.poke(circuit.data_in_0, inputs[i])
tester.eval()
if (i > depth + startup_delay):
tester.expect(circuit.valid_out_0, 1)
tester.expect(circuit.valid_out_1, 1)
tester.expect(circuit.data_out_0, outputs_0[output_idx])
tester.expect(circuit.data_out_1, outputs_1[output_idx])
output_idx += 1
else:
tester.expect(circuit.valid_out_0, 0)
tester.expect(circuit.valid_out_1, 0)
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
for genesis_verilog in glob.glob("genesis_verif/*.*"):
shutil.copy(genesis_verilog, tempdir)
tester.compile_and_run(directory=tempdir,
magma_output="coreir-verilog",
target="verilator",
flags=["-Wno-fatal"])
# No longer applicable w/ Diet Lake
@pytest.mark.skip
def test_multiple_output_ports_conv():
# Regular Bootstrap
[circuit, tester, MCore] = make_memory_core()
tester.poke(circuit.stall, 1)
tile_en = 1
depth = 1024
chunk = 128
startup_delay = 4
num_outputs = 6 * 6 * 3 * 3 * 2 * 4
mode = Mode.DB
config_data = []
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_0", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_0", num_outputs))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_wo_0", 256 * 4))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_ss_0", 256 * 4))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_0", int(num_outputs / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_wo_0", 256))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_ss_0", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_0", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_1", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_dimensionality", 6))
# channel
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_0", 2))
# window x
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_1", 3))
# window y
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_2", 3))
# chunk x
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_3", 6))
# cuhnk y
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_4", 6))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_5", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_1", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_2", 16))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_3", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_4", 16))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_outer", chunk))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_0", 1))
config_data.append(MCore.get_config_data("tile_en", 1))
config_data.append(MCore.get_config_data("fifo_ctrl_fifo_depth", 0))
config_data.append(MCore.get_config_data("mode", 0))
config_data.append(MCore.get_config_data("flush_reg_sel", 1))
config_data.append(MCore.get_config_data("wen_in_1_reg_sel", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_0_input_latency", 4))
config_data.append(MCore.get_config_data("enable_chain_output", 0))
config_data.append(MCore.get_config_data("enable_chain_input", 0))
config_data.append(MCore.get_config_data("chain_idx_input", 0))
config_data.append(MCore.get_config_data("chain_idx_output", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_1", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_1", num_outputs))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_1", int(num_outputs / 4)))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_dimensionality", 6))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_0", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_1", 3))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_2", 3))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_3", 6))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_4", 6))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_5", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_starting_addr", 128))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_1", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_2", 16))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_3", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_4", 16))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_outer", chunk))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_1", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_1_input_latency", 4))
config_data = compress_config_data(config_data)
# Configure
for addr, data in config_data:
tester.configure(addr, data)
tester.poke(circuit.stall, 0)
tester.eval()
inputs = []
for z in range(6):
for i in range(depth):
inputs.append(i)
output_index = []
output1_index = []
for y in range(6):
for x in range(6):
for wy in range(3):
for wx in range(3):
for ch in range(2):
offset = y * 16 + x * 2 + wy * 16 + wx * 2 + ch * 1
output1 = 128 + offset
for i in range(4):
output_index.append((offset * 4 + i) % len(inputs))
output1_index.append((output1 * 4 + i) % len(inputs))
tester.poke(circuit.ren_in_0, 1)
tester.poke(circuit.ren_in_1, 1)
output_idx = 0
for i in range(depth + startup_delay + num_outputs):
# We are just writing sequentially for this sample
if (i < 2 * depth):
tester.poke(circuit.wen_in_0, 1)
tester.poke(circuit.data_in_0, inputs[i])
else:
tester.poke(circuit.wen_in_0, 0)
tester.eval()
if (i > depth + startup_delay):
tester.expect(circuit.valid_out_0, 1)
tester.expect(circuit.valid_out_1, 1)
idx0 = output_index[output_idx]
idx1 = output1_index[output_idx]
tester.expect(circuit.data_out_0, inputs[idx0])
tester.expect(circuit.data_out_1, inputs[idx1])
output_idx += 1
else:
tester.expect(circuit.valid_out_0, 0)
tester.expect(circuit.valid_out_1, 0)
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
for genesis_verilog in glob.glob("genesis_verif/*.*"):
shutil.copy(genesis_verilog, tempdir)
tester.compile_and_run(directory=tempdir,
magma_output="coreir-verilog",
target="verilator",
flags=["-Wno-fatal"])
# No longer applicable w/ Diet Lake
@pytest.mark.skip
def test_mult_ports_mult_aggs_double_buffer_conv():
# Regular Bootstrap
[circuit, tester, MCore] = make_memory_core()
tester.poke(circuit.stall, 1)
tile_en = 1
depth = 128 * 4
# 4 is normal start up delay, 1 is due to mult input port agg scheduling
startup_delay = 4 + 1
num_outputs = 6 * 6 * 3 * 3 * 2 * 4
mode = Mode.DB
config_data = []
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_0", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_0", num_outputs))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_wo_0", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_ss_0", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_0", int(num_outputs / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_wo_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_ss_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_1", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_dimensionality", 6))
# channel
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_0", 2))
# window x
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_1", 3))
# window y
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_2", 3))
# chunk x
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_3", 6))
# cuhnk y
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_4", 6))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_5", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_1", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_2", 16))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_3", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_4", 16))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_outer", depth))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_0", 1))
config_data.append(MCore.get_config_data("tile_en", 1))
config_data.append(MCore.get_config_data("fifo_ctrl_fifo_depth", 0))
config_data.append(MCore.get_config_data("mode", 0))
config_data.append(MCore.get_config_data("flush_reg_sel", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_0_input_latency", 4))
config_data.append(MCore.get_config_data("enable_chain_output", 0))
config_data.append(MCore.get_config_data("enable_chain_input", 0))
config_data.append(MCore.get_config_data("chain_idx_input", 0))
config_data.append(MCore.get_config_data("chain_idx_output", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_1", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_1", num_outputs))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_wo_1", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_ss_1", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_1", int(num_outputs / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_wo_1", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_ss_1", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_starting_addr", 128))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_1", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_dimensionality", 6))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_0", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_1", 3))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_2", 3))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_3", 6))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_4", 6))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_5", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_starting_addr", 128))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_1", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_2", 16))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_3", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_4", 16))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_outer", depth))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_1", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_1_input_latency", 4))
config_data = compress_config_data(config_data)
# Configure
for addr, data in config_data:
tester.configure(addr, data)
tester.poke(circuit.stall, 0)
tester.eval()
inputs = []
inputs1 = []
for j in range(3):
for i in range(depth):
inputs.append(i + (j + 1) * 16)
inputs1.append((i + 16) + (j + 1) * 16)
output_index = []
output1_index = []
for y in range(6):
for x in range(6):
for wy in range(3):
for wx in range(3):
for ch in range(2):
offset = y * 16 + x * 2 + wy * 16 + wx * 2 + ch * 1
output1 = 128 + offset
for i in range(4):
output_index.append((offset * 4 + i) % len(inputs))
output1_index.append((output1 * 4 + i) % len(inputs))
tester.poke(circuit.ren_in_0, 1)
tester.poke(circuit.ren_in_1, 1)
output_idx = 0
for i in range(depth + startup_delay + num_outputs):
if(i >= 3 * depth):
tester.poke(circuit.wen_in_0, 0)
tester.poke(circuit.wen_in_1, 0)
else:
tester.poke(circuit.wen_in_0, 1)
tester.poke(circuit.data_in_0, inputs[i])
tester.poke(circuit.wen_in_1, 1)
tester.poke(circuit.data_in_1, inputs1[i])
tester.eval()
if (i > depth + startup_delay):
tester.expect(circuit.valid_out_0, 1)
tester.expect(circuit.valid_out_1, 1)
idx0 = output_index[output_idx]
idx1 = output1_index[output_idx]
tester.expect(circuit.data_out_0, inputs[idx0])
tester.expect(circuit.data_out_1, inputs[idx1])
output_idx += 1
else:
tester.expect(circuit.valid_out_0, 0)
tester.expect(circuit.valid_out_1, 0)
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
for genesis_verilog in glob.glob("genesis_verif/*.*"):
shutil.copy(genesis_verilog, tempdir)
tester.compile_and_run(directory=tempdir,
magma_output="coreir-verilog",
target="verilator",
flags=["-Wno-fatal"])
# No longer applicable w/ Diet Lake
@pytest.mark.skip
def test_mult_ports_mult_aggs_double_buffer():
# Regular Bootstrap
[circuit, tester, MCore] = make_memory_core()
tester.poke(circuit.stall, 1)
tile_en = 1
depth = 128 * 4
# 4 is normal start up delay, 1 is due to mult input port agg scheduling
startup_delay = 4 + 1
num_outputs = 6 * 6 * 3 * 3 * 2 * 4
mode = Mode.DB
config_data = []
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_0", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_0", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_wo_0", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_ss_0", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_wo_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_ss_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_1", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_1", 256))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_outer", depth))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_0", 1))
config_data.append(MCore.get_config_data("tile_en", 1))
config_data.append(MCore.get_config_data("fifo_ctrl_fifo_depth", 0))
config_data.append(MCore.get_config_data("mode", 0))
config_data.append(MCore.get_config_data("flush_reg_sel", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_0_input_latency", 4))
config_data.append(MCore.get_config_data("enable_chain_output", 0))
config_data.append(MCore.get_config_data("enable_chain_input", 0))
config_data.append(MCore.get_config_data("chain_idx_input", 0))
config_data.append(MCore.get_config_data("chain_idx_output", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_1", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_1", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_wo_1", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_ss_1", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_1", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_wo_1", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_ss_1", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_starting_addr", 128))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_1", 256))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_starting_addr", 128))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_1", 256))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_outer", depth))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_1", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_1_input_latency", 4))
config_data = compress_config_data(config_data)
# Configure
for addr, data in config_data:
tester.configure(addr, data)
tester.poke(circuit.stall, 0)
tester.eval()
inputs = []
inputs1 = []
for j in range(3):
for i in range(depth):
inputs.append(i + (j + 1) * 16)
inputs1.append((i + 16) + (j + 1) * 16)
outputs = []
outputs1 = []
for j in range(2):
for i in range(depth):
outputs.append(i + (j + 1) * 16)
outputs1.append((i + 16) + (j + 1) * 16)
tester.poke(circuit.ren_in_0, 1)
tester.poke(circuit.ren_in_1, 1)
output_idx = 0
for i in range(4 * depth):
if(i >= 3 * depth):
tester.poke(circuit.wen_in_0, 0)
tester.poke(circuit.wen_in_1, 0)
else:
tester.poke(circuit.wen_in_0, 1)
tester.poke(circuit.data_in_0, inputs[i])
tester.poke(circuit.wen_in_1, 1)
tester.poke(circuit.data_in_1, inputs1[i])
if i >= 3 * depth + startup_delay:
tester.poke(circuit.ren_in_0, 0)
tester.poke(circuit.ren_in_1, 0)
tester.eval()
if (i > depth + startup_delay) and (i < 3 * depth + startup_delay):
tester.expect(circuit.valid_out_0, 1)
tester.expect(circuit.valid_out_1, 1)
tester.expect(circuit.data_out_0, outputs[output_idx])
tester.expect(circuit.data_out_1, outputs1[output_idx])
output_idx += 1
else:
tester.expect(circuit.valid_out_0, 0)
tester.expect(circuit.valid_out_1, 0)
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
for genesis_verilog in glob.glob("genesis_verif/*.*"):
shutil.copy(genesis_verilog, tempdir)
tester.compile_and_run(directory=tempdir,
magma_output="coreir-verilog",
target="verilator",
flags=["-Wno-fatal"])
# No longer applicable w/ Diet Lake
@pytest.mark.skip
def test_multiple_input_ports_identity_stream_mult_aggs():
# Regular Bootstrap
[circuit, tester, MCore] = make_memory_core()
tester.poke(circuit.stall, 1)
tile_en = 1
depth = 256
# 4 is normal start up delay, 1 is due to mult input port agg scheduling
startup_delay = 4 + 1
mode = Mode.DB
config_data = []
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_0", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_output_port_0", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_0", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_wo_0", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_ss_0", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_wo_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_ss_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_in_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_0_out_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_1", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_ranges_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_1", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_0_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_outer", depth))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_0_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_0", 1))
config_data.append(MCore.get_config_data("tile_en", 1))
config_data.append(MCore.get_config_data("fifo_ctrl_fifo_depth", 0))
config_data.append(MCore.get_config_data("mode", 0))
config_data.append(MCore.get_config_data("flush_reg_sel", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_0_input_latency", 4))
config_data.append(MCore.get_config_data("enable_chain_output", 0))
config_data.append(MCore.get_config_data("enable_chain_input", 0))
config_data.append(MCore.get_config_data("chain_idx_input", 0))
config_data.append(MCore.get_config_data("chain_idx_output", 0))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_input_port_1", 1))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_read_depth_1", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_wo_1", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_write_depth_ss_1", depth))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_read_depth_1", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_wo_1", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_app_ctrl_coarse_write_depth_ss_1", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_in_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_period", 2))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_sched_0", 0))
config_data.append(MCore.get_config_data("strg_ub_agg_in_1_out_sched_1", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_starting_addr", 64))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_1", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_input_addr_ctrl_address_gen_1_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_dimensionality", 2))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_0", int(depth / 4)))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_1", 100))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_2", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_ranges_5", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_starting_addr", 64))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_0", 1))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_1", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_2", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_3", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_4", 0))
config_data.append(MCore.get_config_data("strg_ub_output_addr_ctrl_address_gen_1_strides_5", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_outer", depth))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_starting_addr", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_stride", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_dimensionality", 1))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_indices_0", 0))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_range_inner", 2))
config_data.append(MCore.get_config_data("strg_ub_tba_1_tb_0_tb_height", 1))
config_data.append(MCore.get_config_data("strg_ub_sync_grp_sync_group_1", 1))
config_data.append(MCore.get_config_data("strg_ub_pre_fetch_1_input_latency", 4))
config_data = compress_config_data(config_data)
# Configure
for addr, data in config_data:
tester.configure(addr, data)
tester.poke(circuit.stall, 0)
tester.eval()
inputs = []
inputs1 = []
for z in range(2):
for i in range(depth):
inputs.append(i)
inputs1.append(i + 16)
outputs = []
outputs1 = []
for z in range(2):
for i in range(depth):
outputs.append(i)
outputs1.append(i + 16)
tester.poke(circuit.ren_in_0, 1)
tester.poke(circuit.ren_in_1, 1)
output_idx = 0
for i in range(4 * depth):
if(i >= 2 * depth):
tester.poke(circuit.wen_in_0, 0)
tester.poke(circuit.wen_in_1, 0)
else:
tester.poke(circuit.wen_in_0, 1)
tester.poke(circuit.data_in_0, inputs[i])
tester.poke(circuit.wen_in_1, 1)
tester.poke(circuit.data_in_1, inputs1[i])
if i >= 2 * depth + startup_delay:
tester.poke(circuit.ren_in_0, 0)
tester.poke(circuit.ren_in_1, 0)
tester.eval()
if (i > depth + startup_delay) and (i < 2 * depth + startup_delay):
tester.expect(circuit.valid_out_0, 1)
tester.expect(circuit.valid_out_1, 1)
tester.expect(circuit.data_out_0, outputs[output_idx])
tester.expect(circuit.data_out_1, outputs1[output_idx])
output_idx += 1
else:
tester.expect(circuit.valid_out_0, 0)
tester.expect(circuit.valid_out_1, 0)
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
for genesis_verilog in glob.glob("genesis_verif/*.*"):
shutil.copy(genesis_verilog, tempdir)
tester.compile_and_run(directory=tempdir,
magma_output="coreir-verilog",
target="verilator",
flags=["-Wno-fatal"])
def basic_tb(config_path,
stream_path,
in_file_name="input",
out_file_name="output",
xcelium=False,
tempdir_override=False,
trace=False):
# These need to be set to refer to certain csvs....
lake_controller_path = os.getenv("LAKE_CONTROLLERS")
lake_stream_path = os.getenv("LAKE_STREAM")
assert lake_controller_path is not None and lake_stream_path is not None,\
f"Please check env vars:\nLAKE_CONTROLLERS: {lake_controller_path}\nLAKE_STREAM: {lake_stream_path}"
config_path = lake_controller_path + "/" + config_path
stream_path = lake_stream_path + "/" + stream_path
chip_size = 2
interconnect = create_cgra(chip_size, chip_size, io_sides(),
num_tracks=3,
add_pd=True,
mem_ratio=(1, 2))
netlist = {
"e0": [("I0", "io2f_16"), ("m0", "data_in_0")],
"e1": [("m0", "data_out_0"), ("I1", "f2io_16")]
}
bus = {"e0": 16, "e1": 16}
placement, routing = pnr(interconnect, (netlist, bus))
config_data = interconnect.get_route_bitstream(routing)
# Regular Bootstrap
[circuit, tester, MCore] = make_memory_core()
# Get configuration
configs_mem = MCore.get_static_bitstream(config_path=config_path,
in_file_name=in_file_name,
out_file_name=out_file_name)
config_final = []
for (f1, f2) in configs_mem:
config_final.append((f1, f2, 0))
mem_x, mem_y = placement["m0"]
memtile = interconnect.tile_circuits[(mem_x, mem_y)]
mcore = memtile.core
config_mem_tile(interconnect, config_data, config_final, mem_x, mem_y, mcore)
circuit = interconnect.circuit()
tester = BasicTester(circuit, circuit.clk, circuit.reset)
tester.reset()
tester.zero_inputs()
tester.poke(circuit.interface["stall"], 1)
for addr, index in config_data:
tester.configure(addr, index)
tester.config_read(addr)
tester.eval()
# tester.expect(circuit.read_config_data, index)
tester.done_config()
tester.poke(circuit.interface["stall"], 0)
tester.eval()
in_data, out_data, valids = generate_data_lists(csv_file_name=stream_path,
data_in_width=MCore.num_data_inputs(),
data_out_width=MCore.num_data_outputs())
num_in_data = 1
num_out_data = 2
data_in_x, data_in_y = placement["I0"]
data_in = f"glb2io_16_X{data_in_x:02X}_Y{data_in_y:02X}"
data_out_x, data_out_y = placement["I1"]
data_out = f"io2glb_16_X{data_out_x:02X}_Y{data_out_y:02X}"
for i in range(len(out_data)):
tester.poke(circuit.interface[data_in], in_data[0][i])
tester.eval()
tester.expect(circuit.interface[data_out], out_data[0][i])
# toggle the clock
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
if tempdir_override:
tempdir = "dump"
for genesis_verilog in glob.glob("genesis_verif/*.*"):
shutil.copy(genesis_verilog, tempdir)
for filename in cw_files():
shutil.copy(filename, tempdir)
shutil.copy(os.path.join("tests", "test_memory_core",
"sram_stub.v"),
os.path.join(tempdir, "sram_512w_16b.v"))
for aoi_mux in glob.glob("tests/*.sv"):
shutil.copy(aoi_mux, tempdir)
target = "verilator"
runtime_kwargs = {"magma_output": "coreir-verilog",
"magma_opts": {"coreir_libs": {"float_CW"},
"disable_ndarray": True},
"directory": tempdir,
"flags": []}
if xcelium is False:
runtime_kwargs["flags"].append("-Wno-fatal")
if trace:
runtime_kwargs["flags"].append("--trace")
else:
target = "system-verilog"
runtime_kwargs["simulator"] = "xcelium"
runtime_kwargs["flags"].append("-sv")
runtime_kwargs["flags"].append("./*.*v")
if trace:
runtime_kwargs["dump_vcd"] = True
tester.compile_and_run(target=target,
tmp_dir=False,
**runtime_kwargs)
def test_conv_3_3():
# conv_3_3
config_path = "conv_3_3_recipe/buf_inst_input_10_to_buf_inst_output_3_ubuf"
stream_path = "conv_3_3_recipe/buf_inst_input_10_to_buf_inst_output_3_ubuf_0_top_SMT.csv"
basic_tb(config_path=config_path,
stream_path=stream_path,
in_file_name="input",
out_file_name="output")
if __name__ == "__main__":
# conv_3_3 - default tb - use command line to override
parser = argparse.ArgumentParser(description='Tile_MemCore TB Generator')
parser.add_argument('--config_path',
type=str,
default="conv_3_3_recipe/buf_inst_input_10_to_buf_inst_output_3_ubuf")
parser.add_argument('--stream_path',
type=str,
default="conv_3_3_recipe/buf_inst_input_10_to_buf_inst_output_3_ubuf_0_top_SMT.csv")
parser.add_argument('--in_file_name', type=str, default="input")
parser.add_argument('--out_file_name', type=str, default="output")
parser.add_argument('--xcelium', action="store_true")
parser.add_argument('--tempdir_override', action="store_true")
parser.add_argument('--trace', action="store_true")
args = parser.parse_args()
basic_tb(config_path=args.config_path,
stream_path=args.stream_path,
in_file_name=args.in_file_name,
out_file_name=args.out_file_name,
xcelium=args.xcelium,
tempdir_override=args.tempdir_override,
trace=args.trace)
|
import os
from os.path import join
import numpy as np
import pandas as pd
from numpy.linalg import pinv
from sklearn.base import TransformerMixin
from sklearn.externals.joblib import load
from sklearn.model_selection import GroupShuffleSplit
from sklearn.preprocessing import LabelBinarizer, StandardScaler, LabelEncoder
idx = pd.IndexSlice
def get_output_dir(data_dir=None):
""" Returns the directories in which cogspaces store results.
Parameters
----------
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
Returns
-------
paths: list of strings
Paths of the dataset directories.
Notes
-----
This function retrieves the datasets directories using the following
priority :
1. the keyword argument data_dir
2. the global environment variable OUTPUT_COGSPACES_DIR
4. output/cogspaces in the user home folder
"""
# Check data_dir which force storage in a specific location
if data_dir is not None:
return data_dir
else:
# If data_dir has not been specified, then we crawl default locations
output_dir = os.getenv('OUTPUT_COGSPACES_DIR')
if output_dir is not None:
return output_dir
return os.path.expanduser('~/output/cogspaces')
def make_data_frame(datasets, source,
reduced_dir=None, unmask_dir=None):
"""Aggregate and curate reduced/non reduced datasets"""
X = []
keys = []
for dataset in datasets:
if source == 'unmasked':
this_X = pd.read_pickle(join(unmask_dir, dataset, 'imgs.pkl'))
else:
this_X = pd.read_pickle(join(reduced_dir, source, dataset, 'Xt.pkl'))
# Curation
this_X = this_X.reset_index(level=['direction'], drop=True)
if dataset == 'brainomics':
this_X = this_X.drop(['effects_of_interest'], level='contrast')
if dataset == 'brainpedia':
contrasts = this_X.index.get_level_values('contrast').values
indices = []
for i, contrast in enumerate(contrasts):
if contrast.endswith('baseline'):
indices.append(i)
this_X = this_X.iloc[indices]
for i, (sub_dataset, this_sub_X) in \
enumerate(this_X.groupby(level='dataset')):
if sub_dataset == 'ds102':
continue
this_sub_X = this_sub_X.loc[sub_dataset]
X.append(this_sub_X.astype(np.float32))
keys.append(sub_dataset)
else:
X.append(this_X)
keys.append(dataset)
X = pd.concat(X, keys=keys, names=['dataset'])
X.sort_index(inplace=True)
return X
def split_folds(X, test_size=0.2, train_size=None, random_state=None):
X_train = []
X_test = []
datasets = X.index.get_level_values('dataset').unique().values
if not isinstance(test_size, dict):
test_size = {dataset: test_size for dataset in datasets}
if not isinstance(train_size, dict):
train_size = {dataset: train_size for dataset in datasets}
for dataset, this_X in X.groupby(level='dataset'):
subjects = this_X.index.get_level_values('subject').values
if dataset in test_size:
this_test_size = test_size[dataset]
else:
this_test_size = .5
if dataset in train_size:
this_train_size = train_size[dataset]
else:
this_train_size = .5
cv = GroupShuffleSplit(n_splits=1,
test_size=this_test_size,
train_size=this_train_size,
random_state=random_state)
train, test = next(cv.split(this_X, groups=subjects))
X_train.append(this_X.iloc[train])
X_test.append(this_X.iloc[test])
# WTF autocast in pandas
X_train = pd.concat(X_train, axis=0).astype(np.float32)
X_test = pd.concat(X_test, axis=0).astype(np.float32)
X_train.sort_index(inplace=True)
X_test.sort_index(inplace=True)
return X_train, X_test
class MultiDatasetTransformer(TransformerMixin):
"""Utility transformer"""
def __init__(self, with_std=False, with_mean=True,
per_dataset=True, integer_coding=False):
self.with_std = with_std
self.with_mean = with_mean
self.per_dataset = per_dataset
self.integer_coding = integer_coding
def fit(self, df):
self.lbins_ = {}
if self.per_dataset:
self.scs_ = {}
else:
self.sc_ = StandardScaler(with_std=self.with_std,
with_mean=self.with_mean)
self.sc_.fit(df.values)
for dataset, sub_df in df.groupby(level='dataset'):
if self.integer_coding:
lbin = LabelEncoder()
else:
lbin = LabelBinarizer()
this_y = sub_df.index.get_level_values('contrast')
if self.per_dataset:
sc = StandardScaler(with_std=self.with_std,
with_mean=self.with_mean)
sc.fit(sub_df.values)
self.scs_[dataset] = sc
lbin.fit(this_y)
self.lbins_[dataset] = lbin
return self
def transform(self, df):
X = []
y = []
if not self.per_dataset:
df = df.copy()
df[:] = self.sc_.transform(df.values)
for dataset, sub_df in df.groupby(level='dataset'):
lbin = self.lbins_[dataset]
if self.per_dataset:
sc = self.scs_[dataset]
this_X = sc.transform(sub_df.values)
else:
this_X = sub_df.values
this_y = sub_df.index.get_level_values('contrast')
this_y = lbin.transform(this_y)
if not self.integer_coding and this_y.shape[1] == 1:
this_y = np.hstack([this_y, np.logical_not(this_y)])
y.append(this_y)
X.append(this_X)
return tuple(X), tuple(y)
def inverse_transform(self, df, ys):
contrasts = []
for (dataset, sub_df), this_y in zip(df.groupby(level='dataset'), ys):
lbin = self.lbins_[dataset]
these_contrasts = lbin.inverse_transform(this_y)
these_contrasts = pd.Series(these_contrasts, index=sub_df.index)
contrasts.append(these_contrasts)
contrasts = pd.concat(contrasts, axis=0)
return contrasts
def make_projection_matrix(bases, scale_bases=True):
if not isinstance(bases, list):
bases = [bases]
proj = []
rec = []
for i, basis in enumerate(bases):
if scale_bases:
S = np.std(basis, axis=1)
S[S == 0] = 1
basis = basis / S[:, np.newaxis]
proj.append(pinv(basis))
rec.append(basis)
proj = np.concatenate(proj, axis=1)
rec = np.concatenate(rec, axis=0)
proj_inv = np.linalg.inv(proj.T.dot(rec.T)).T.dot(rec)
return proj, proj_inv, rec
|
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
import json
ckey="2AvbG84msbL34BEHslMsWZTUR"
csecret="gzEENqmZoMl2hhHvDIdaWzIF9ShMSLO0o7gh8csZnfqKdK6Y9H"
atoken="14113114-1we8sJQs1z54dWfjWbUwZtDtkQYf3kDOrXLUMBFkZ"
asecret="6Sq95ezVVRNTuLw7grKzm4czA32VqmlM0QwvaLjWLNl5A"
class TwitterHelper():
def __init__(self):
super().__init__()
self.auth = OAuthHandler(ckey, csecret)
self.auth.set_access_token(atoken, asecret)
def get_tweet(self, keyword):
numberOfLines = 50
tweetConn = TwitterConn(numberOfLines)
twitterStream = Stream(self.auth, tweetConn)
twitterStream.filter(track=[keyword], async=True)
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
class TwitterConn(StreamListener):
def __init__(self, numberOfLines):
super().__init__()
self.numberOfLines = numberOfLines
self.count = 0
def on_data(self, data):
try:
all_data = json.loads(data)
tweet = all_data["text"]
self.count = self.count + 1
output = open("twitter-out.txt", "a")
output.write(tweet)
output.write('/n')
output.close()
return True
except:
if self.numberOfLines <= TwitterHelper.file_len("twitter-out.txt"):
print('[+] other exception')
return False
print(TwitterHelper.file_len("twitter-out.txt"))
print(self.count)
return True
def on_error(self, status):
print(status)
if(status == 420):
return False
def on_status(self, status):
print(status.text)
|
#Q1
x=lambda a,b:a*b
print(x(2,2))
print()
#Q2
n=int(input("Enter number of elements in fibonacci series "))
lst=[0,1]
y=lambda c,d:int(c+d)
print(lst[0],"",lst[1],end=" ")
for i in range(n-2):
z=y(lst[i],lst[i+1])
print(z,end=" ")
lst.append(z)
print()
#Q3
li=[1,2,3,4,5]
e=list(map(lambda x:x*2,li))
print("After multiplying list is : ",e)
print()
#Q4
l=[9,18,20,27,30]
f=tuple(filter(lambda x:(x%9==0),l))
print("Elemens divisible by 9 : ",f)
print()
#Q5
l1=[9,18,20,27,30,2,3,4,5,66,7]
f1=tuple(filter(lambda x:(x%2==0),l1))
print("number of even elements : ",len(f1))
|
# 作者: 迷路的小怪兽
# 创建时间: 2021/10/28 23:00
from src.common.mytest import MyTest
from src.common.baseflow import Para
from src.flows.prepare_flow import PrepareFlow
from src.flows.aftermath_flow import AfterMathFlow
from src.flows.baidu_home_flow import SearchFlow
class TestBaiduFlows(MyTest):
para = Para(
keywords='selenium'
)
@classmethod
def setUpClass(cls):
cls.prepare_flow = PrepareFlow(name='环境准备流程', driver=cls.driver)
cls.prepare_flow.execute()
@classmethod
def tearDownClass(cls):
cls.aftermath_flow = AfterMathFlow(name='浏览器退出流程', driver=cls.driver)
cls.aftermath_flow.execute()
def test_search_flow(self):
search_flow = SearchFlow(name='搜索流程', driver=self.driver, para=self.para)
search_flow.with_start_page(self.prepare_flow.end_page).execute()
self.assertEqual_('test', 'test', msg='测试消息', driver=self.driver) |
# ENTRADA
print('A seguir, digite dois números:')
numero1 = float(input('Digite o 1º número: '))
numero2 = float(input('Digite o 2º número: '))
# PROCESSAMENTO
divisao = (numero1 + numero2) / (numero1 - numero2)
# SAÍDA
print('')
print('O resultado da divisão da soma pela diferença desses números é: {:.3f}'.format(divisao))
print('') |
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pandas as pd
from prior_evaluation_tool.main import create_app
size = 200
true_intercept = 1
true_slope = 2
x = np.linspace(0, 1, size)
# y = a + b*x
true_regression_line = true_intercept + true_slope * x
# add noise
y = true_regression_line + np.random.normal(scale=.5, size=size)
data = pd.DataFrame()
data['y'] = y
data['x'] = x
def model_method(data, **prior_kwargs):
with pm.Model() as model: # model specifications in PyMC3 are wrapped in a with-statement
# Define priors
sigma = pm.HalfCauchy('sigma', beta=prior_kwargs['sigma beta'], testval=prior_kwargs['sigma testval'])
intercept = pm.Normal('intercept', mu=prior_kwargs['intercept mu'], sigma=prior_kwargs['intercept sigma'])
x_coeff = pm.Normal('x', mu=prior_kwargs['x mu'], sigma=prior_kwargs['x sigma'])
# Define likelihood
likelihood = pm.Normal('y', mu=intercept + x_coeff * data['x'],sigma=sigma, observed=data['y'])
return model
prior_params = {
'sigma beta':10,
'sigma testval':1.,
'intercept mu':0,
'intercept sigma':20,
'x mu':0,
'x sigma':20,
}
create_app(
model_method=model_method,
data=data,
prior_kwargs=prior_params,
) |
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
import config
args = config.parse_args()
primary_hidden_units = [64, 32]
adversary_hidden_units=[32]
class Primary_NN(nn.Module):
def __init__(self,indim, primary_hidden_units):
super().__init__()
print(indim)
self.model = nn.Sequential(
nn.Linear(indim, primary_hidden_units[0]),
nn.ReLU(),
nn.Linear(primary_hidden_units[0], primary_hidden_units[1]),
nn.ReLU(),
)
# self.classlayer = nn.Linear(primary_hidden_units[1],2)
# self.sigmoidlayer = nn.Softmax(dim = 1)
######
self.classlayer = nn.Linear(primary_hidden_units[1],1)
self.sigmoidlayer = nn.Sigmoid()
def forward(self, x):
z = self.model(x)
logits = self.classlayer(z)
# sigmoid_output = self.sigmoidlayer(logits)
# prob = torch.max(sigmoid_output,torch.tensor(0.5))
# return z, logits, sigmoid_output # previously is prob not sigmoid_output
#####
sigmoid_output = self.sigmoidlayer(logits)
# print('sigmoid_output',sigmoid_output)
sigmoid_output[sigmoid_output > 0.5] = 1
sigmoid_output[sigmoid_output <= 0.5] = 0
# print('sigmoid_output',sigmoid_output)
return z, logits, sigmoid_output
class Adversary_NN(nn.Module):
def __init__(self,indim,adversary_hidden_units):
super().__init__()
self.model = nn.Sequential(
nn.Linear(indim,adversary_hidden_units[0]),
nn.Linear(adversary_hidden_units[0],1,bias=True),
)
def forward(self, x):
"""Applies sigmoid to adversary output layer and returns normalized example weight."""
# adv_output_layer = self.model(x)
# example_weights = torch.sigmoid(adv_output_layer)
#
# mean_example_weights = torch.mean(example_weights)
# example_weights /= torch.maximum(mean_example_weights, torch.tensor(1e-4))
#
#
# example_weights = torch.ones_like(example_weights) + example_weights
adv_output_layer = self.model(x)
out_1 = torch.sigmoid(adv_output_layer)
mean_example_weights = torch.mean(out_1)
out_2 = out_1/torch.maximum(mean_example_weights, torch.tensor(1e-4))
example_weights = torch.ones_like(out_2) + out_2
return example_weights
|
"""Contains methods for node setups creation"""
from typing import Any, Optional
from pyviews.core.binding import Bindable
from pyviews.core.rendering import Node, NodeGlobals, RenderingContext
from pyviews.core.xml import XmlNode
from pyviews.pipes import apply_attributes, render_children
from pyviews.rendering.context import get_child_context
from pyviews.rendering.pipeline import RenderingPipeline, render, render_view
class Container(Node):
"""Used to combine some xml elements"""
def get_container_pipeline() -> RenderingPipeline:
"""Returns setup for container"""
return RenderingPipeline(pipes = [apply_attributes, render_container_children], name = 'container pipeline')
def render_container_children(node, context: RenderingContext):
"""Renders container children"""
render_children(node, context, get_child_context)
class View(Container, Bindable):
"""Loads xml from another file"""
def __init__(self, xml_node: XmlNode, node_globals: Optional[NodeGlobals] = None):
Bindable.__init__(self)
Container.__init__(self, xml_node, node_globals = node_globals)
self._name: Optional[str] = None
@property
def name(self) -> Optional[str]:
"""Returns view name"""
return self._name
@name.setter
def name(self, value: Optional[str]):
old_name = self._name
self._name = value
self._notify('name', value, old_name)
def get_view_pipeline() -> RenderingPipeline:
"""Returns setup for container"""
return RenderingPipeline(
pipes = [apply_attributes, render_view_content, rerender_on_view_change], name = 'view pipeline'
)
def render_view_content(node: View, context: RenderingContext):
"""Finds view by name attribute and renders it as view node child"""
if node.name:
child_context = get_child_context(node.xml_node, node, context)
content = render_view(node.name, child_context)
node.add_child(content)
def rerender_on_view_change(node: View, context: RenderingContext):
"""Subscribes to name change and renders new view"""
node.observe('name', lambda *_: _rerender_view(node, context))
def _rerender_view(node: View, context: RenderingContext):
node.destroy_children()
render_view_content(node, context)
class For(Container, Bindable):
"""Renders children for every item in items collection"""
def __init__(self, xml_node: XmlNode, node_globals: Optional[NodeGlobals] = None):
Bindable.__init__(self)
Container.__init__(self, xml_node, node_globals = node_globals)
self._items = []
@property
def items(self):
"""Returns items"""
return self._items
@items.setter
def items(self, value):
old_items = self._items
self._items = value
self._notify('items', value, old_items)
def get_for_pipeline() -> RenderingPipeline:
"""Returns setup for For node"""
return RenderingPipeline(
pipes = [apply_attributes, render_for_items, rerender_on_items_change], name = 'for pipeline'
)
def render_for_items(node: For, context: RenderingContext):
"""Renders For children"""
_render_for_children(node, node.items, context)
def _render_for_children(node: For, items: list, context: RenderingContext, index_shift = 0):
item_xml_nodes = node.xml_node.children
for index, item in enumerate(items):
for xml_node in item_xml_nodes:
child_context = _get_for_child_args(xml_node, index + index_shift, item, node, context)
child = render(child_context)
node.add_child(child)
def _get_for_child_args(xml_node: XmlNode, index: int, item: Any, parent_node: For, context: RenderingContext):
child_context = get_child_context(xml_node, parent_node, context)
child_globals = child_context.node_globals
child_globals['index'] = index
child_globals['item'] = item
return child_context
def rerender_on_items_change(node: For, context: RenderingContext):
"""Subscribes to items change and updates children"""
node.observe('items', lambda *_: _on_items_changed(node, context))
def _on_items_changed(node: For, context: RenderingContext):
_destroy_overflow(node)
_update_existing(node)
_create_not_existing(node, context)
def _destroy_overflow(node: For):
try:
items_count = len(node.items)
children_count = len(node.xml_node.children) * items_count
overflow = node.children[children_count:]
for child in overflow:
child.destroy()
node._children = node.children[:children_count]
except IndexError:
pass
def _update_existing(node: For):
item_children_count = len(node.xml_node.children)
try:
for index, item in enumerate(node.items):
start = index * item_children_count
end = (index + 1) * item_children_count
for child_index in range(start, end):
globs = node.children[child_index].node_globals
globs['item'] = item
globs['index'] = index
except IndexError:
pass
def _create_not_existing(node: For, context: RenderingContext):
item_children_count = len(node.xml_node.children)
start = int(len(node.children) / item_children_count)
end = len(node.items)
items = [node.items[i] for i in range(start, end)]
_render_for_children(node, items, context, start)
class If(Container, Bindable):
"""Renders children if condition is True"""
def __init__(self, xml_node: XmlNode, node_globals: Optional[NodeGlobals] = None):
Bindable.__init__(self)
Container.__init__(self, xml_node, node_globals = node_globals)
self._condition = False
@property
def condition(self):
"""Returns condition"""
return self._condition
@condition.setter
def condition(self, value):
old_condition = self._condition
self._condition = value
self._notify('condition', value, old_condition)
def get_if_pipeline() -> RenderingPipeline:
"""Returns setup for For node"""
return RenderingPipeline(pipes = [apply_attributes, render_if, rerender_on_condition_change], name = 'if pipeline')
def render_if(node: If, context: RenderingContext):
"""Renders children nodes if condition is true"""
if node.condition:
render_children(node, context, get_child_context)
def rerender_on_condition_change(node: If, context: RenderingContext):
"""Rerenders if on condition change"""
node.observe('condition', lambda *_: _on_condition_change(node, context))
def _on_condition_change(node: If, context: RenderingContext):
node.destroy_children()
render_if(node, context)
|
def binary_search(data, target, low, high):
"""Return True if target is found in indicated portion of a Python list.
The search only considers the portion of data[low] to data[high] inclusive.
"""
if low > high:
return False
else:
mid = (low + high) // 2
if target == data[mid]:
return True
elif target < data[mid]:
# recur on the portion left of middle
return binary_search(data, target, low, mid - 1)
else:
#recur on the portion right of the middle
return binary_search(data, target, mid + 1, high) |
import pandas as pd
data = pd.read_csv('LINADV_data.txt', sep='\s+', header = None)
data = pd.DataFrame(data)
import matplotlib.pyplot as plt
x = data[0]
y = data[1]
z = data[2]
#w = data[3]
plt.plot(x,y,'r',label="Initial State")
plt.plot(x,z,'b',label="Final State")
#plt.plot(x,w, 'g', label = "Initial Height Tendency")
plt.xlabel('1/1000 (m)')
plt.ylabel('Heigth')
plt.legend()
plt.show()
|
# OCR of Hand-written Digits
# Our goal is to build an application which can read the handwritten digits. For this we need some train_data and test_data. OpenCV comes with an image digits.png (in the folder opencv/samples/python2/data/) which has 5000 handwritten digits (500 for each digit). Each digit is a 20x20 image. So our first step is to split this image into 5000 different digits. For each digit, we flatten it into a single row with 400 pixels. That is our feature set, ie intensity values of all pixels. It is the simplest feature set we can create. We use first 250 samples of each digit as train_data, and next 250 samples as test_data. So let’s prepare them first.
import numpy as np
from cv2 import cv2
img = cv2.imread('resource/digits.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 50 x 100 cells, 20 x 20 each
rows = np.vsplit(gray, 50)
cells = [np.hsplit(row, 100) for row in rows]
x = np.array(cells)
# Now we prepare train_data and test_data.
# train is the left half, and the test is the right half. then reshape to (2500,400)
train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.ml.KNearest_create()
knn.train(train,0,train_labels)
ret,result,neighbours,dist = knn.findNearest(test,k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print accuracy
print 'hello'
# So our basic OCR app is ready. This particular example gave me an accuracy of 91%. One option improve accuracy is to add more data for training, especially the wrong ones. So instead of finding this training data everytime I start application, I better save it, so that next time, I directly read this data from a file and start classification. You can do it with the help of some Numpy functions like np.savetxt, np.savez, np.load etc. Please check their docs for more details.
# # save the data
# np.savez('knn_data.npz',train=train, train_labels=train_labels)
# # Now load the data
# with np.load('knn_data.npz') as data:
# print data.files
# train = data['train']
# train_labels = data['train_labels']
# In my system, it takes around 4.4 MB of memory. Since we are using intensity values (uint8 data) as features, it would be better to convert the data to np.uint8 first and then save it. It takes only 1.1 MB in this case. Then while loading, you can convert back into float32. |
from django.conf.urls.defaults import *
urlpatterns = patterns(
'areas.views',
#(r'^map/view/(?P<mapId>\d+)/', 'renderMap'),
#(r'^map/edit/(?P<mapId>\d+)/', 'editMap'),
#(r'^map/move/(?P<mapId>\d+)/(?P<direction>\w+)/', 'move'),
(r'^area/list/', 'area_list'),
(r'^area/create/', 'area_create'),
(r'^area/read/(?P<areaId>\d+)/', 'area_read'),
(r'^area/update/(?P<areaId>\d+)/', 'area_update'),
(r'^area/delete/(?P<mapId>\d+)/', 'area_delete'),
(r'^wizard/get_or_create/(?P<name>\w+)/', 'wizard_get_or_create'),
(r'^realm/get_or_create/(?P<name>\w+)/', 'realm_get_or_create'),
)
|
from __future__ import absolute_import
from tests.integration.flask_app.app import sleep_task
from time import sleep
import pytest
from celery_once import AlreadyQueued
import pytest
@pytest.mark.framework
def test_flask():
sleep_task.delay(1)
sleep(0.5)
with pytest.raises(AlreadyQueued):
sleep_task.delay(1)
sleep(2) # Task should of completed by now.
sleep_task.delay(1)
|
# reduce() function takes in a function and a list as argument
# new reduced result is returned
from functools import reduce
mylist = [1, 3, 5, 6, 7]
mul = reduce((lambda x, y: x * y), mylist) # returns product of all elements in mylist
print(mul)
print(type(mul))
|
import pymongo
import requests
import json
class Table(object):
def __init__(self,meta):
self.meta = meta
self.option_dict = None
self.data = None
def get_options(self):
pass
def fetch_data(self):
pass
class SpiderDB(object):
def __init__(self,client,db_info):
self.client = client
self.spider_db = self.client[db_info['db_name']]
#self.spider_db = client['tongjiju_db']
self.coll_meta = self.spider_db[db_info['meta']]
self.coll_data= self.spider_db[db_info['data']]
self.coll_desc= self.spider_db[db_info['desc']]
def docs_readable(self,docs):
l = list(map(lambda x:(x['query_type'],(x['title'],x['trace_name'],x['id_name']),(x.get('section'),x['trace'],x['id'])),docs))
c = 0
for item in l:
print(item)
c=c+1
print('total find=%d'%c)
def find_table_by_keyword(self,keyword):
return self.coll_meta.find({ '$or':[{'id_name':{'$regex':keyword}},{'trace_name':{'$regex':keyword}}]})
def all_docs(self):
return self.coll_meta.find()
def docs_count(self):
return self.coll_meta.find().count()
def get_table(self,section=None,trace=None,id=None):
query = []
if section is not None:
query.append({'section':section})
if trace is not None:
query.append({'trace':trace})
if id is not None:
query.append({'id':id})
data = self.coll_meta.find_one({'$and':query})
return self.Table(data)
|
import os
import importlib.util as imp
from azureml.api.schema.schemaUtil import *
from azureml.api.exceptions.BadRequest import BadRequestException
from azureml.api.realtime.swagger_spec_generator import generate_service_swagger
driver_module_spec = imp.spec_from_file_location('service_driver', 'score.py')
driver_module = imp.module_from_spec(driver_module_spec)
driver_module_spec.loader.exec_module(driver_module)
def run(http_body):
if aml_service_schema is not None:
arguments = parse_service_input(http_body, aml_service_schema.input)
try:
return_obj = driver_module.run(**arguments)
except TypeError as exc:
raise BadRequestException(str(exc))
else:
return_obj = driver_module.run(http_body)
return return_obj
def init():
global aml_service_schema
schema_file = ""
service_name = os.getenv('SERVICE_NAME', 'ML service')
service_path_prefix = os.getenv('SERVICE_PATH_PREFIX', '')
service_version = os.getenv('SERVICE_VERSION', '1.0')
if schema_file:
aml_service_schema = load_service_schema(schema_file)
else:
aml_service_schema = None
swagger_json = generate_service_swagger(service_name=service_name,
service_schema_file_path=schema_file,
service_version=service_version,
service_path_prefix=service_path_prefix)
with open('swagger.json', 'w') as swagger_file:
json.dump(swagger_json, swagger_file)
driver_module.init()
|
# -*- coding: cp1252 -*-
"""Enrique Ibáñez Orero - 1º DAW - Practica 7 - Ejercicio 4 - Pràctica 7.
Escribe un programa que pida una frase, y le pase como parámetro a una
función dicha frase. La función debe sustituir todos los espacios
en blanco de una frase por un asterisco, y devolver el resultado
para que el programa principal la imprima por pantalla. """
def sustituto(cadena):
nada=""
for i in range(len(cadena)):
if cadena[i]==" ":
nada=nada+"*"
else:
nada=nada+cadena[i]
return nada
frase=raw_input("Escribe una frase: ")
asteriscos=sustituto(frase)
print asteriscos
|
#!/bin/python
import math
import os
import random
import re
import sys
# Complete the biggerIsGreater function below.
def biggerIsGreater(w):
w = list(w)
for i in xrange(len(w) - 2, -1, -1):
if w[i] < w[i + 1]:
idx = i
break
if not idx:
return "no answer"
small = idx + 1
for i in xrange(idx + 2, len(w)):
if w[i] < w[small]:
small = i
if idx == small:
return "no answer"
w[idx], w[small] = w[small], w[idx]
return "".join(w[:idx + 1] + sorted(w[idx + 1:]))
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
T = int(raw_input())
for T_itr in xrange(T):
w = raw_input()
print biggerIsGreater(w)
# fptr.write(result + '\n')
|
from calvin import *
#from postprocessor import *
# specify data file
calvin = CALVIN('calvin/data/links_infeasible.csv')
# create pyomo model from specified data file
calvin.create_pyomo_model(debug_mode=False)
# solve the problem
calvin.solve_pyomo_model(solver='glpk', nproc=10, debug_mode=False)
# postprocess results to create time-series files
postprocess(calvin.df, calvin.model, resultdir='result')
|
refFilePath = "../../../data/sapIngB1.fa"
# kmer model
kmerModelFilePath = "../../../data/kmer_model.hdf5"
# positive and negative reads folder
readsPosFilePath = "goodReadsDebug.txt"
readsNegFilePath = "../../../data/neg-basecalled"
targetContig = "contig1"
targetBeg, targetEnd = 0, 50000
posTestCases, negTestCases = 40, 40
levels = 6
repeatSignal = 10
kmerLength = 23
overflow = 0.30
smoothParam = 5
refWindowSize = 1000
refWindowJump = 700
fromRead, toRead = 5000, 20000
contigNum = 1
################################################################################
import sys
import glob
import copy
import numpy as np
import mappy as mp
from pyfaidx import Fasta
from nadavca.dtw import KmerModel
sys.path.append("../")
from signalHelper import (
stringToSignal,
getLevels,
getSignalFromRead,
getSeqfromRead,
produceRandom,
)
from signalHelper import (
computeNorm,
computeString,
smoothSignal,
buildDictionary,
overlappingKmers,
)
import matplotlib
import matplotlib.pyplot as plt
def overlap(dict1, dict2):
intersect = 0
for kmer in dict1:
if kmer in dict2:
intersect += 1
return intersect
def plotAOC(src):
src.sort()
src.reverse()
X, Y = [], []
x, y = 0, 0
for i in src:
X.append(x)
Y.append(y)
if i[1] == 1:
y += 1
else:
x += 1
print(X)
print(Y)
plt.scatter(X, Y)
plt.show()
def getDictFromSequence(signal, refWindowSize, refWindowJump):
dic = {}
for winBeg in range(0, len(signal) - refWindowSize + 1, refWindowJump):
winEnd = winBeg + refWindowSize
currSignal = np.array(copy.deepcopy(signal[winBeg:winEnd]), float)
currSignal = smoothSignal(currSignal, smoothParam)
currSignalShift, currSignalScale = computeNorm(currSignal, 0, refWindowSize)
currString = computeString(
currSignal,
0,
refWindowSize,
currSignalShift,
currSignalScale,
levels,
overflow=overflow,
)
newDict = buildDictionary(currString, kmerLength)
for i in newDict:
dic[i] = dic.get(i, 0) + newDict[i]
return dic
def intervalOverlap(b, e, c, d):
if e <= c or b >= d:
return False
return True
################################################################################
referenceIdx = mp.Aligner(refFilePath)
assert referenceIdx, "failed to load/build reference index"
mod = KmerModel.load_from_hdf5(kmerModelFilePath)
# load filenames of all positive and negative reads
data = [i.strip() for i in open(readsPosFilePath, "r").readlines() if i.strip() != ""]
posFast5 = [data[i] for i in range(0, len(data), 2)]
basecalledFast5 = [data[i] for i in range(1, len(data), 2)]
negFast5 = glob.glob(readsNegFilePath + "/*.fast5", recursive=True)
assert len(posFast5) >= posTestCases, "Not enough positive testcases!"
assert len(negFast5) >= negTestCases, "Not enough negative testcases!"
posFast5 = posFast5[:posTestCases]
negFast5 = negFast5[:negTestCases]
################################################################################
hashTable = {}
for contig in Fasta(refFilePath):
if contig.name != targetContig:
continue
ref = str(contig)
ref = ref[targetBeg:targetEnd]
contigSignal = stringToSignal(ref, mod, repeatSignal=repeatSignal)
hashTable = getDictFromSequence(contigSignal, refWindowSize, refWindowJump)
for k in sorted(hashTable, key=hashTable.get, reverse=True)[:100]:
pass
# print("{0} {1}".format(k, hashTable[k]))
# del hashTable[k]
def processRead(path, readFromRef=False):
readSignal = np.array(getSignalFromRead(path), dtype=float)
readSignal = readSignal[fromRead:toRead]
readDict = getDictFromSequence(readSignal, refWindowSize, refWindowJump)
hits = overlap(readDict, hashTable)
print("Number of hits is {0}".format(hits))
return hits
helper = []
print("Positive:")
for i in range(len(posFast5)):
filePath = posFast5[i]
# try:
# readSeq, basecallTable = getSeqfromRead(filePath)
# except:
# continue
# if len(readSeq) < (toRead // repeatSignal):
# continue
readSeq = basecalledFast5[i]
hits = [
aln
for aln in referenceIdx.map(readSeq)
if (aln.q_en - aln.q_st > 0.95 * len(readSeq))
and aln.strand == 1
and aln.ctg == targetContig
and intervalOverlap(aln.r_st, aln.r_en, targetBeg, targetEnd)
]
if len(hits) == 0:
negTestCases -= 1
continue
if not (hits[0].r_st > targetBeg and hits[0].r_en < targetEnd):
negTestCases -= 1
continue
print(f"Len is {len(hits)}")
print(hits[0].r_st)
print(hits[0].r_en)
# print(f"{len(hits)}")
# if len(hits) == 0:
# print("Zle je")
# continue
helper.append((processRead(filePath, readFromRef=True), 1))
print("\n\nNegative:")
for i in range(negTestCases):
filePath = negFast5[i]
helper.append((processRead(filePath), 0))
plotAOC(helper)
|
import re
import numpy as np
from prody import *
def extract_pockets(filepath,pdbid,selection):
"""Returns dictionaries where the key is pocket ID (starting at zero) and the value is a list of residue indices or numbers/chains in the pocket."""
# dictionary where key is pocket ID (starting at zero) and value is list of residue indices in pocket
pockets_res = {}
# dictionary where key is pocket ID (starting at zero) and value is list of residue numbers/chains in pocket
pockets_res_name = {}
pocket_count = count_pockets(filepath,pdbid)
for pocket_no in range(pocket_count):
pockets_res[pocket_no] = []
pockets_res_name[pocket_no] = []
in_file = open(filepath+pdbid+'_out/pockets/pocket'+str(pocket_no)+'_atm.pdb')
for line in in_file:
if line[:4] == 'ATOM':
# add residue name
name = line[22:26].lstrip()+':'+line[21]
if name not in pockets_res_name[pocket_no]:
pockets_res_name[pocket_no].append(name)
# add residue ID
selector = 'chain '+line[21]+' and resnum '+line[22:26].lstrip()
try:
index = selection.select(selector).getResindices()[0]
if index not in pockets_res[pocket_no]:
pockets_res[pocket_no].append(index)
# selector might not have a resindex
except AttributeError:
pass
# cannot select negative resnums
except atomic.select.SelectionError:
pass
in_file.close()
return pockets_res, pockets_res_name
def extract_info(filepath,pdbid,info_id_list):
"""Returns a dictionary where the key is pocket ID (starting at zero) and the value is a dictionary of information points."""
pockets_info = {}
pocket_file = open(filepath+pdbid+'_out/'+pdbid+'_info.txt')
pocket_lines = pocket_file.readlines()
pocket_file.close()
# create inner dictionaries
counter = 0
for line in pocket_lines:
if line[:6] == 'Pocket':
pockets_info[counter] = {}
counter += 1
# populate inner dictionaries
for info_id in info_id_list:
counter = 0
for line in pocket_lines:
if line.lstrip()[:len(info_id)] == info_id:
split = re.split(r'\s+',line.rstrip())
pockets_info[counter][info_id] = float(split[-1])
counter += 1
return pockets_info
def count_pockets(filepath,pdbid):
"""Counts the number of pockets found by fpocket."""
pocket_count = 0
pocket_file = open(filepath+pdbid+'_out/'+pdbid+'_info.txt')
for line in pocket_file:
if line[:6] == 'Pocket':
pocket_count += 1
pocket_file.close()
return pocket_count
def dist_to_active(calphas,active_indices,pockets_res):
"""Returns a dictionary where the key is pocket index and the value is distance to the active site in Angstrom."""
active_selector = 'resindex '+' or resindex '.join([str(i) for i in active_indices])
active_coords = calphas.select(active_selector).getCoords()
active_site = sum(active_coords)/float(len(active_indices))
# dictionary where key is pocket index and value is distance to active site in A
pockets_dist = {}
for pocket in pockets_res:
pocket_selector = 'resindex '+' or resindex '.join([str(i) for i in pockets_res[pocket]])
pocket_coords = calphas.select(pocket_selector).getCoords()
pocket_center = sum(pocket_coords)/float(len(pockets_res[pocket]))
diff = pocket_center - active_site
dist = np.sqrt(diff.dot(diff))
pockets_dist[pocket] = round(dist,1)
return pockets_dist
|
import PySimpleGUI as sg
'''
this python file is for another pop out window which shows the maths of
how the cumulative probability function works
'''
def explanation2():
layout1 = [
[sg.Image(filename = 'betacdf.PNG')]
]
window3 = sg.Window('Probability Densities', layout1)
while True:
event, values = window3.read()
if event in (sg.WIN_CLOSED, 'Cancel'):
#event for closing window
break
window3.close()
|
from . import *
def FermiDirac(en, T):
"""
The Fermi-Dirac distribution.
Parameters
----------
en: array-like
Energy of state in units J
T: scalar
Temperature in units K
Returns
----------
FD: array-like
Fermi-Dirac probability of state at energy en
"""
kB = 1.38064852*10**-23 # J/K
# Using logaddexp reduces chance of underflow error
FD = np.exp( -np.logaddexp(en/(kB*(T+0.000000000001)),0) )
return FD |
from django import forms
import random
from .models import Guitar, Story, Photos, Specs, Appearances, Videos
master_id = 0
class AppearForm(forms.ModelForm):
Appearances.guitar_id = master_id
tour_name = forms.CharField(required=False)
album_name = forms.CharField(required=False)
class Meta:
model = Appearances
fields = ('tour_name', 'album_name')
class GuitarForm(forms.ModelForm):
# serial_number = forms.CharField(required=False)
class Meta:
model = Guitar
fields = ("manufacturer_name", "guitar_name", "guitar_model", "serial_number")
class PhotosForm(forms.ModelForm):
photo_path = forms.CharField(required=False)
class Meta:
model = Photos
fields = ('photo_path',)
class SpecsForm(forms.ModelForm):
# OPTIONS = (
# ("1", 'True'),
# ("0", 'False'),
#
# )
# repairs = forms.ChoiceField(widget=forms.CheckboxSelectMultiple,
# choices=OPTIONS)
# print(repairs)
class Meta:
model = Specs
# fields = '__all__'
fields = ('production_year', 'weight', 'finish',
'body_wood', 'neck_wood', 'fretboard_wood', 'cap_wood', 'neck_pickup',
'middle_pickup', 'bridge_pickup', 'repairs')
class StoryForm(forms.ModelForm):
where_purchased = forms.CharField(label='Current Location', required=False)
# story_text = forms.CharField(widget=forms.Textarea, required=False)
# custom_built = forms.IntegerField(required=False)
class Meta:
model = Story
fields = ('where_purchased', 'custom_built', 'story_text')
class VideosForm(forms.ModelForm):
video_path = forms.CharField(required=False)
class Meta:
model = Videos
fields = ('video_path',)
|
#!/usr/bin/env python
import glob
import os
from itertools import chain
from sys import argv
import pycodestyle
EXTS = ('.py', '.pyx')
SKIP_ERRORS = {'a_intro/sequences.py': ('E402', 'E711', 'E712')}
SCRIPTS = ('sci_py_example', 'sci_py_import_all', 'sci_py_test_style')
def check_style(fpath):
for exception_path, errors in SKIP_ERRORS.items():
if fpath.endswith(exception_path):
style = pycodestyle.StyleGuide(ignore=errors)
break
else:
style = pycodestyle.StyleGuide()
return style.check_files([fpath])
if __name__ == '__main__':
dir_name = argv[1]
script_dir = os.path.join(dir_name, 'bin')
package_dir = os.path.join(dir_name, 'scientific_python')
fpaths = [os.path.join(script_dir, script) for script in SCRIPTS]
fpaths += list(chain.from_iterable(
(glob.glob(os.path.join(package_dir, '**/*'+ext)) for ext in EXTS)
))
results = dict(filter(lambda x: x[1].total_errors != 0,
((fpath, check_style(fpath)) for fpath in fpaths)))
if len(results) > 0:
errors_per_file = (
(fpath, '\n'.join(r.get_statistics()))
for fpath, r in results.items()
)
errors = '\n'.join(
(' {}:\n{}'.format(fpath, errors)
for fpath, errors in errors_per_file)
)
msg = 'Style checker has found error{s}:\n{errors}'.format(
s='s'*bool(len(results) != 1),
errors=errors
)
print(msg)
raise RuntimeError(msg)
else:
print("Style check hasn't found any errors")
|
import logging
import re
import serial
import sys
import time
logging_handler = logging.StreamHandler(sys.stdout)
logging_handler.setFormatter(logging.Formatter(
'[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging_handler)
def connect(device, baud = 115200):
return serial.Serial(device, baud)
class Coordinates(object):
@classmethod
def parse(cls, value):
x, y, z = value.split(',')
return cls(float(x), float(y), float(z))
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y) and (self.z == other.z)
def __add__(self, other):
return self.__class__(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return self.__class__(self.x - other.x, self.y - other.y, self.z - other.z)
def __repr__(self):
return '%f,%f,%f' % (self.x, self.y, self.z)
class Response(object):
error_regex = re.compile('^error:(\d+)')
@classmethod
def is_response(cls, value):
return cls.error_regex.match(value) != None or value == 'ok'
def __init__(self, value):
self.value = value
def is_success(self):
return self.value == 'ok'
def is_error(self):
return self.error_regex.match(self.value) != None
def error_code(self):
if self.is_error():
match = self.error_regex.match(self.value)
return int(match.group(1))
class Status(object):
regex = re.compile('\<(\w+)\|(.+)\>')
@classmethod
def is_status(cls, value):
return cls.regex.match(value) != None
def __init__(self, value):
self.value = value
if not self.is_status(value):
raise Exception('invalid status format')
match = self.regex.match(self.value)
self.state = match.group(1)
self.segments = {}
for segment in match.group(2).split('|'):
name, value = segment.split(':')
self.segments[name] = value
def is_idle(self):
return self.state == 'Idle'
def mpos(self):
if 'MPos' in self.segments:
return Coordinates.parse(self.segments['MPos'])
def wco(self):
if 'WCO' in self.segments:
return Coordinates.parse(self.segments['WCO'])
class PendingMessages(Exception):
pass
class SendError(Exception):
pass
class Sender(object):
polling_interval = 0.2
def __init__(self, serial):
self.serial = serial
self.messages = []
lines = self._read_until(lambda line: re.match(r'^Grbl.*', line))
if not re.match(r'^Grbl 1\.1.*', lines[-1]):
raise Exception('Unsupported Grbl version')
self.mpos = Coordinates(0.0, 0.0, 0.0)
self.wco = Coordinates(0.0, 0.0, 0.0)
def receive(self):
lines = self._read_until(lambda line: Response.is_response(line))
self.messages = lines[0:-1]
return Response(lines[-1])
def read_messages(self):
messages = self.messages
self.messages = []
return messages
def message(self):
messages = self.read_messages()
assert len(messages) == 1
return messages[0]
def send_gcode(self, value):
if len(self.messages) > 0:
raise PendingMessages()
logger.info('send: %s' % value)
self.serial.write('%s\n' % value)
response = self.receive()
if response.is_error():
raise SendError(response.error_code())
return response
def status(self):
self.serial.write('?')
self.serial.flush()
lines = self._read_until(lambda line: Status.is_status(line))
self.messages += lines[0:-1]
return self._update_status(Status(lines[-1]))
def wait(self):
while True:
if self.status().is_idle():
break
time.sleep(self.polling_interval)
def position(self):
return self.mpos - self.wco
def _read_until(self, f):
lines = []
while True:
line = self.serial.readline().strip()
if len(line) > 0:
logger.info('recv: %s' % line)
lines.append(line)
if f(line):
break
return lines
def _update_status(self, status):
self.mpos = status.mpos()
wco = status.wco()
if wco:
self.wco = wco
return status
|
#!/usr/bin/env python
#
# Copyright (C) 2012 Space Monkey, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
LevelDB Python interface via C-Types.
http://code.google.com/p/leveldb-py/
Missing still (but in progress):
* custom comparators, filter policies, caches
This interface requires nothing more than the leveldb shared object with
the C api being installed.
Now requires LevelDB 1.6 or newer.
For most usages, you are likely to only be interested in the "DB" and maybe
the "WriteBatch" classes for construction. The other classes are helper
classes that you may end up using as part of those two root classes.
* DBInterface - This class wraps a LevelDB. Created by either the DB or
MemoryDB constructors
* Iterator - this class is created by calls to DBInterface::iterator.
Supports range requests, seeking, prefix searching, etc
* WriteBatch - this class is a standalone object. You can perform writes
and deletes on it, but nothing happens to your database until you
write the writebatch to the database with DB::write
"""
__author__ = "JT Olds"
__email__ = "jt@spacemonkey.com"
import bisect
import ctypes
import ctypes.util
import weakref
import threading
from collections import namedtuple
_pth = ctypes.util.find_library('leveldb')
if _pth is None:
raise ImportError('Missing leveldb librairy on the system')
_ldb = ctypes.CDLL(_pth)
_ldb.leveldb_filterpolicy_create_bloom.argtypes = [ctypes.c_int]
_ldb.leveldb_filterpolicy_create_bloom.restype = ctypes.c_void_p
_ldb.leveldb_filterpolicy_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_filterpolicy_destroy.restype = None
_ldb.leveldb_cache_create_lru.argtypes = [ctypes.c_size_t]
_ldb.leveldb_cache_create_lru.restype = ctypes.c_void_p
_ldb.leveldb_cache_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_cache_destroy.restype = None
_ldb.leveldb_options_create.argtypes = []
_ldb.leveldb_options_create.restype = ctypes.c_void_p
_ldb.leveldb_options_set_filter_policy.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_options_set_filter_policy.restype = None
_ldb.leveldb_options_set_create_if_missing.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_create_if_missing.restype = None
_ldb.leveldb_options_set_error_if_exists.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_error_if_exists.restype = None
_ldb.leveldb_options_set_paranoid_checks.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_options_set_paranoid_checks.restype = None
_ldb.leveldb_options_set_write_buffer_size.argtypes = [ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_options_set_write_buffer_size.restype = None
_ldb.leveldb_options_set_max_open_files.argtypes = [ctypes.c_void_p,
ctypes.c_int]
_ldb.leveldb_options_set_max_open_files.restype = None
_ldb.leveldb_options_set_cache.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_options_set_cache.restype = None
_ldb.leveldb_options_set_block_size.argtypes = [ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_options_set_block_size.restype = None
_ldb.leveldb_options_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_options_destroy.restype = None
_ldb.leveldb_open.argtypes = [ctypes.c_void_p, ctypes.c_char_p,
ctypes.c_void_p]
_ldb.leveldb_open.restype = ctypes.c_void_p
_ldb.leveldb_close.argtypes = [ctypes.c_void_p]
_ldb.leveldb_close.restype = None
_ldb.leveldb_put.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p]
_ldb.leveldb_put.restype = None
_ldb.leveldb_delete.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
_ldb.leveldb_delete.restype = None
_ldb.leveldb_write.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_write.restype = None
_ldb.leveldb_get.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_get.restype = ctypes.POINTER(ctypes.c_char)
_ldb.leveldb_writeoptions_create.argtypes = []
_ldb.leveldb_writeoptions_create.restype = ctypes.c_void_p
_ldb.leveldb_writeoptions_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writeoptions_destroy.restype = None
_ldb.leveldb_writeoptions_set_sync.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_writeoptions_set_sync.restype = None
_ldb.leveldb_readoptions_create.argtypes = []
_ldb.leveldb_readoptions_create.restype = ctypes.c_void_p
_ldb.leveldb_readoptions_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_readoptions_destroy.restype = None
_ldb.leveldb_readoptions_set_verify_checksums.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_readoptions_set_verify_checksums.restype = None
_ldb.leveldb_readoptions_set_fill_cache.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
_ldb.leveldb_readoptions_set_fill_cache.restype = None
_ldb.leveldb_readoptions_set_snapshot.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_readoptions_set_snapshot.restype = None
_ldb.leveldb_create_iterator.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_create_iterator.restype = ctypes.c_void_p
_ldb.leveldb_iter_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_destroy.restype = None
_ldb.leveldb_iter_valid.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_valid.restype = ctypes.c_bool
_ldb.leveldb_iter_key.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_size_t)]
_ldb.leveldb_iter_key.restype = ctypes.c_void_p
_ldb.leveldb_iter_value.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_size_t)]
_ldb.leveldb_iter_value.restype = ctypes.c_void_p
_ldb.leveldb_iter_next.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_next.restype = None
_ldb.leveldb_iter_prev.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_prev.restype = None
_ldb.leveldb_iter_seek_to_first.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_seek_to_first.restype = None
_ldb.leveldb_iter_seek_to_last.argtypes = [ctypes.c_void_p]
_ldb.leveldb_iter_seek_to_last.restype = None
_ldb.leveldb_iter_seek.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_iter_seek.restype = None
_ldb.leveldb_iter_get_error.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_iter_get_error.restype = None
_ldb.leveldb_writebatch_create.argtypes = []
_ldb.leveldb_writebatch_create.restype = ctypes.c_void_p
_ldb.leveldb_writebatch_destroy.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writebatch_destroy.restype = None
_ldb.leveldb_writebatch_clear.argtypes = [ctypes.c_void_p]
_ldb.leveldb_writebatch_clear.restype = None
_ldb.leveldb_writebatch_put.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t]
_ldb.leveldb_writebatch_put.restype = None
_ldb.leveldb_writebatch_delete.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
_ldb.leveldb_writebatch_delete.restype = None
_ldb.leveldb_approximate_sizes.argtypes = [ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p]
_ldb.leveldb_approximate_sizes.restype = None
_ldb.leveldb_compact_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, ctypes.c_void_p, ctypes.c_size_t]
_ldb.leveldb_compact_range.restype = None
_ldb.leveldb_create_snapshot.argtypes = [ctypes.c_void_p]
_ldb.leveldb_create_snapshot.restype = ctypes.c_void_p
_ldb.leveldb_release_snapshot.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ldb.leveldb_release_snapshot.restype = None
_ldb.leveldb_free.argtypes = [ctypes.c_void_p]
_ldb.leveldb_free.restype = None
Row = namedtuple('Row', 'key value')
class Error(Exception):
pass
class Iterator(object):
"""This class is created by calling __iter__ or iterator on a DB interface
"""
__slots__ = ["_prefix", "_impl", "_keys_only"]
def __init__(self, impl, keys_only=False, prefix=None):
self._impl = impl
self._prefix = prefix
self._keys_only = keys_only
def valid(self):
"""Returns whether the iterator is valid or not
@rtype: bool
"""
valid = self._impl.valid()
if not valid or self._prefix is None:
return valid
key = self._impl.key()
return key[:len(self._prefix)] == self._prefix
def seekFirst(self):
"""
Jump to first key in database
@return: self
@rtype: Iter
"""
if self._prefix is not None:
self._impl.seek(self._prefix)
else:
self._impl.seekFirst()
return self
def seekLast(self):
"""
Jump to last key in database
@return: self
@rtype: Iter
"""
# if we have no prefix or the last possible prefix of this length, just
# seek to the last key in the db.
if self._prefix is None or self._prefix == "\xff" * len(self._prefix):
self._impl.seekLast()
return self
# we have a prefix. see if there's anything after our prefix.
# there's probably a much better way to calculate the next prefix.
hex_prefix = self._prefix.encode('hex')
next_prefix = hex(long(hex_prefix, 16) + 1)[2:].rstrip("L")
next_prefix = next_prefix.rjust(len(hex_prefix), "0")
next_prefix = next_prefix.decode("hex").rstrip("\x00")
self._impl.seek(next_prefix)
if self._impl.valid():
# there is something after our prefix. we're on it, so step back
self._impl.prev()
else:
# there is nothing after our prefix, just seek to the last key
self._impl.seekLast()
return self
def seek(self, key):
"""Move the iterator to key. This may be called after StopIteration,
allowing you to reuse an iterator safely.
@param key: Where to position the iterator.
@type key: str
@return: self
@rtype: Iter
"""
if self._prefix is not None:
key = self._prefix + key
self._impl.seek(key)
return self
def key(self):
"""Returns the iterator's current key. You should be sure the iterator
is currently valid first by calling valid()
@rtype: string
"""
key = self._impl.key()
if self._prefix is not None:
return key[len(self._prefix):]
return key
def value(self):
"""Returns the iterator's current value. You should be sure the
iterator is currently valid first by calling valid()
@rtype: string
"""
return self._impl.val()
def __iter__(self):
return self
def next(self):
"""Advances the iterator one step. Also returns the current value prior
to moving the iterator
@rtype: Row (namedtuple of key, value) if keys_only=False, otherwise
string (the key)
@raise StopIteration: if called on an iterator that is not valid
"""
if not self.valid():
raise StopIteration()
if self._keys_only:
rv = self.key()
else:
rv = Row(self.key(), self.value())
self._impl.next()
return rv
def prev(self):
"""Backs the iterator up one step. Also returns the current value prior
to moving the iterator.
@rtype: Row (namedtuple of key, value) if keys_only=False, otherwise
string (the key)
@raise StopIteration: if called on an iterator that is not valid
"""
if not self.valid():
raise StopIteration()
if self._keys_only:
rv = self.key()
else:
rv = Row(self.key(), self.value())
self._impl.prev()
return rv
def stepForward(self):
"""Same as next but does not return any data or check for validity"""
self._impl.next()
def stepBackward(self):
"""Same as prev but does not return any data or check for validity"""
self._impl.prev()
def range(self, start_key=None, end_key=None, start_inclusive=True,
end_inclusive=False):
"""A generator for some range of rows"""
if start_key is not None:
self.seek(start_key)
if not start_inclusive and self.key() == start_key:
self._impl.next()
else:
self.seekFirst()
for row in self:
if end_key is not None and (row.key > end_key or (
not end_inclusive and row.key == end_key)):
break
yield row
def keys(self):
while self.valid():
yield self.key()
self.stepForward()
def values(self):
while self.valid():
yield self.value()
self.stepForward()
def close(self):
self._impl.close()
class _OpaqueWriteBatch(object):
"""This is an opaque write batch that must be written to using the putTo
and deleteFrom methods on DBInterface.
"""
def __init__(self):
self._puts = {}
self._deletes = set()
self._private = True
def clear(self):
self._puts = {}
self._deletes = set()
class WriteBatch(_OpaqueWriteBatch):
"""This class is created stand-alone, but then written to some existing
DBInterface
"""
def __init__(self):
_OpaqueWriteBatch.__init__(self)
self._private = False
def put(self, key, val):
self._deletes.discard(key)
self._puts[key] = val
def delete(self, key):
self._puts.pop(key, None)
self._deletes.add(key)
class DBInterface(object):
"""This class is created through a few different means:
Initially, it can be created using either the DB() or MemoryDB()
module-level methods. In almost every case, you want the DB() method.
You can then get new DBInterfaces from an existing DBInterface by calling
snapshot or scope.
"""
__slots__ = ["_impl", "_prefix", "_allow_close", "_default_sync",
"_default_verify_checksums", "_default_fill_cache"]
def __init__(self, impl, prefix=None, allow_close=False,
default_sync=False, default_verify_checksums=False,
default_fill_cache=True):
self._impl = impl
self._prefix = prefix
self._allow_close = allow_close
self._default_sync = default_sync
self._default_verify_checksums = default_verify_checksums
self._default_fill_cache = default_fill_cache
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._allow_close:
self._impl.close()
def newBatch(self):
return _OpaqueWriteBatch()
def put(self, key, val, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None:
key = self._prefix + key
self._impl.put(key, val, sync=sync)
# pylint: disable=W0212
def putTo(self, batch, key, val):
if not batch._private:
raise ValueError("batch not from DBInterface.newBatch")
if self._prefix is not None:
key = self._prefix + key
batch._deletes.discard(key)
batch._puts[key] = val
def delete(self, key, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None:
key = self._prefix + key
self._impl.delete(key, sync=sync)
# pylint: disable=W0212
def deleteFrom(self, batch, key):
if not batch._private:
raise ValueError("batch not from DBInterface.newBatch")
if self._prefix is not None:
key = self._prefix + key
batch._puts.pop(key, None)
batch._deletes.add(key)
def get(self, key, verify_checksums=None, fill_cache=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
if self._prefix is not None:
key = self._prefix + key
return self._impl.get(key, verify_checksums=verify_checksums,
fill_cache=fill_cache)
# pylint: disable=W0212
def write(self, batch, sync=None):
if sync is None:
sync = self._default_sync
if self._prefix is not None and not batch._private:
unscoped_batch = _OpaqueWriteBatch()
for key, value in batch._puts.iteritems():
unscoped_batch._puts[self._prefix + key] = value
for key in batch._deletes:
unscoped_batch._deletes.add(self._prefix + key)
batch = unscoped_batch
return self._impl.write(batch, sync=sync)
def iterator(self, verify_checksums=None, fill_cache=None, prefix=None,
keys_only=False):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
if self._prefix is not None:
if prefix is None:
prefix = self._prefix
else:
prefix = self._prefix + prefix
return Iterator(
self._impl.iterator(verify_checksums=verify_checksums,
fill_cache=fill_cache),
keys_only=keys_only, prefix=prefix)
def snapshot(self, default_sync=None, default_verify_checksums=None,
default_fill_cache=None):
if default_sync is None:
default_sync = self._default_sync
if default_verify_checksums is None:
default_verify_checksums = self._default_verify_checksums
if default_fill_cache is None:
default_fill_cache = self._default_fill_cache
return DBInterface(self._impl.snapshot(), prefix=self._prefix,
allow_close=False, default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
def __iter__(self):
return self.iterator().seekFirst()
def __getitem__(self, k):
v = self.get(k)
if v is None:
raise KeyError(k)
return v
def __setitem__(self, k, v):
self.put(k, v)
def __delitem__(self, k):
self.delete(k)
def __contains__(self, key):
return self.has(key)
def has(self, key, verify_checksums=None, fill_cache=None):
return self.get(key, verify_checksums=verify_checksums,
fill_cache=fill_cache) is not None
def scope(self, prefix, default_sync=None, default_verify_checksums=None,
default_fill_cache=None):
if default_sync is None:
default_sync = self._default_sync
if default_verify_checksums is None:
default_verify_checksums = self._default_verify_checksums
if default_fill_cache is None:
default_fill_cache = self._default_fill_cache
if self._prefix is not None:
prefix = self._prefix + prefix
return DBInterface(self._impl, prefix=prefix, allow_close=False,
default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
def range(self, start_key=None, end_key=None, start_inclusive=True,
end_inclusive=False, verify_checksums=None, fill_cache=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.iterator(verify_checksums=verify_checksums,
fill_cache=fill_cache).range(start_key=start_key,
end_key=end_key, start_inclusive=start_inclusive,
end_inclusive=end_inclusive)
def keys(self, verify_checksums=None, fill_cache=None, prefix=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.iterator(verify_checksums=verify_checksums,
fill_cache=fill_cache, prefix=prefix).seekFirst().keys()
def values(self, verify_checksums=None, fill_cache=None, prefix=None):
if verify_checksums is None:
verify_checksums = self._default_verify_checksums
if fill_cache is None:
fill_cache = self._default_fill_cache
return self.iterator(verify_checksums=verify_checksums,
fill_cache=fill_cache, prefix=prefix).seekFirst().values()
def approximateDiskSizes(self, *ranges):
return self._impl.approximateDiskSizes(*ranges)
def compactRange(self, start_key, end_key):
return self._impl.compactRange(start_key, end_key)
def MemoryDB(*_args, **kwargs):
"""This is primarily for unit testing. If you are doing anything serious,
you definitely are more interested in the standard DB class.
Arguments are ignored.
TODO: if the LevelDB C api ever allows for other environments, actually
use LevelDB code for this, instead of reimplementing it all in
Python.
"""
assert kwargs.get("create_if_missing", True)
return DBInterface(_MemoryDBImpl(), allow_close=True)
class _IteratorMemImpl(object):
__slots__ = ["_data", "_idx"]
def __init__(self, memdb_data):
self._data = memdb_data
self._idx = -1
def valid(self):
return 0 <= self._idx < len(self._data)
def key(self):
return self._data[self._idx][0]
def val(self):
return self._data[self._idx][1]
def seek(self, key):
self._idx = bisect.bisect_left(self._data, (key, ""))
def seekFirst(self):
self._idx = 0
def seekLast(self):
self._idx = len(self._data) - 1
def prev(self):
self._idx -= 1
def next(self):
self._idx += 1
def close(self):
self._data = []
self._idx = -1
class _MemoryDBImpl(object):
__slots__ = ["_data", "_lock", "_is_snapshot"]
def __init__(self, data=None, is_snapshot=False):
if data is None:
self._data = []
else:
self._data = data
self._lock = threading.RLock()
self._is_snapshot = is_snapshot
def close(self):
with self._lock:
self._data = []
def put(self, key, val, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot put on leveldb snapshot")
assert isinstance(key, str)
assert isinstance(val, str)
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
self._data[idx] = (key, val)
else:
self._data.insert(idx, (key, val))
def delete(self, key, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot delete on leveldb snapshot")
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
del self._data[idx]
def get(self, key, **_kwargs):
with self._lock:
idx = bisect.bisect_left(self._data, (key, ""))
if 0 <= idx < len(self._data) and self._data[idx][0] == key:
return self._data[idx][1]
return None
# pylint: disable=W0212
def write(self, batch, **_kwargs):
if self._is_snapshot:
raise TypeError("cannot write on leveldb snapshot")
with self._lock:
for key, val in batch._puts.iteritems():
self.put(key, val)
for key in batch._deletes:
self.delete(key)
def iterator(self, **_kwargs):
# WARNING: huge performance hit.
# leveldb iterators are actually lightweight snapshots of the data. in
# real leveldb, an iterator won't change its idea of the full database
# even if puts or deletes happen while the iterator is in use. to
# simulate this, there isn't anything simple we can do for now besides
# just copy the whole thing.
with self._lock:
return _IteratorMemImpl(self._data[:])
def approximateDiskSizes(self, *ranges):
if self._is_snapshot:
raise TypeError("cannot calculate disk sizes on leveldb snapshot")
return [0] * len(ranges)
def compactRange(self, start_key, end_key):
pass
def snapshot(self):
if self._is_snapshot:
return self
with self._lock:
return _MemoryDBImpl(data=self._data[:], is_snapshot=True)
class _PointerRef(object):
__slots__ = ["ref", "_close", "_referrers", "__weakref__"]
def __init__(self, ref, close_cb):
self.ref = ref
self._close = close_cb
self._referrers = weakref.WeakValueDictionary()
def addReferrer(self, referrer):
self._referrers[id(referrer)] = referrer
def close(self):
ref, self.ref = self.ref, None
close, self._close = self._close, None
referrers = self._referrers
self._referrers = weakref.WeakValueDictionary()
for referrer in referrers.valuerefs():
referrer = referrer()
if referrer is not None:
referrer.close()
if ref is not None and close is not None:
close(ref)
__del__ = close
def _checkError(error):
if bool(error):
message = ctypes.string_at(error)
_ldb.leveldb_free(ctypes.cast(error, ctypes.c_void_p))
raise Error(message)
class _IteratorDbImpl(object):
__slots__ = ["_ref"]
def __init__(self, iterator_ref):
self._ref = iterator_ref
def valid(self):
return _ldb.leveldb_iter_valid(self._ref.ref)
def key(self):
length = ctypes.c_size_t(0)
val_p = _ldb.leveldb_iter_key(self._ref.ref, ctypes.byref(length))
assert bool(val_p)
return ctypes.string_at(val_p, length.value)
def val(self):
length = ctypes.c_size_t(0)
val_p = _ldb.leveldb_iter_value(self._ref.ref, ctypes.byref(length))
assert bool(val_p)
return ctypes.string_at(val_p, length.value)
def seek(self, key):
_ldb.leveldb_iter_seek(self._ref.ref, key, len(key))
self._checkError()
def seekFirst(self):
_ldb.leveldb_iter_seek_to_first(self._ref.ref)
self._checkError()
def seekLast(self):
_ldb.leveldb_iter_seek_to_last(self._ref.ref)
self._checkError()
def prev(self):
_ldb.leveldb_iter_prev(self._ref.ref)
self._checkError()
def next(self):
_ldb.leveldb_iter_next(self._ref.ref)
self._checkError()
def _checkError(self):
error = ctypes.POINTER(ctypes.c_char)()
_ldb.leveldb_iter_get_error(self._ref.ref, ctypes.byref(error))
_checkError(error)
def close(self):
self._ref.close()
def DB(path, bloom_filter_size=10, create_if_missing=False,
error_if_exists=False, paranoid_checks=False,
write_buffer_size=(4 * 1024 * 1024), max_open_files=1000,
block_cache_size=(8 * 1024 * 1024), block_size=(4 * 1024),
default_sync=False, default_verify_checksums=False,
default_fill_cache=True):
"""This is the expected way to open a database. Returns a DBInterface.
"""
filter_policy = _PointerRef(
_ldb.leveldb_filterpolicy_create_bloom(bloom_filter_size),
_ldb.leveldb_filterpolicy_destroy)
cache = _PointerRef(
_ldb.leveldb_cache_create_lru(block_cache_size),
_ldb.leveldb_cache_destroy)
options = _ldb.leveldb_options_create()
_ldb.leveldb_options_set_filter_policy(
options, filter_policy.ref)
_ldb.leveldb_options_set_create_if_missing(options, create_if_missing)
_ldb.leveldb_options_set_error_if_exists(options, error_if_exists)
_ldb.leveldb_options_set_paranoid_checks(options, paranoid_checks)
_ldb.leveldb_options_set_write_buffer_size(options, write_buffer_size)
_ldb.leveldb_options_set_max_open_files(options, max_open_files)
_ldb.leveldb_options_set_cache(options, cache.ref)
_ldb.leveldb_options_set_block_size(options, block_size)
error = ctypes.POINTER(ctypes.c_char)()
print('PATH TYPE: %s' % type(path))
db = _ldb.leveldb_open(options, path, ctypes.byref(error))
_ldb.leveldb_options_destroy(options)
_checkError(error)
db = _PointerRef(db, _ldb.leveldb_close)
filter_policy.addReferrer(db)
cache.addReferrer(db)
return DBInterface(_LevelDBImpl(db, other_objects=(filter_policy, cache)),
allow_close=True, default_sync=default_sync,
default_verify_checksums=default_verify_checksums,
default_fill_cache=default_fill_cache)
class _LevelDBImpl(object):
__slots__ = ["_objs", "_db", "_snapshot"]
def __init__(self, db_ref, snapshot_ref=None, other_objects=()):
self._objs = other_objects
self._db = db_ref
self._snapshot = snapshot_ref
def close(self):
db, self._db = self._db, None
objs, self._objs = self._objs, ()
if db is not None:
db.close()
for obj in objs:
obj.close()
def put(self, key, val, sync=False):
if self._snapshot is not None:
raise TypeError("cannot put on leveldb snapshot")
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_put(self._db.ref, options, key, len(key), val, len(val),
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_checkError(error)
def delete(self, key, sync=False):
if self._snapshot is not None:
raise TypeError("cannot delete on leveldb snapshot")
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_delete(self._db.ref, options, key, len(key),
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_checkError(error)
def get(self, key, verify_checksums=False, fill_cache=True):
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_readoptions_create()
_ldb.leveldb_readoptions_set_verify_checksums(options,
verify_checksums)
_ldb.leveldb_readoptions_set_fill_cache(options, fill_cache)
if self._snapshot is not None:
_ldb.leveldb_readoptions_set_snapshot(options, self._snapshot.ref)
size = ctypes.c_size_t(0)
val_p = _ldb.leveldb_get(self._db.ref, options, key, len(key),
ctypes.byref(size), ctypes.byref(error))
if bool(val_p):
val = ctypes.string_at(val_p, size.value)
_ldb.leveldb_free(ctypes.cast(val_p, ctypes.c_void_p))
else:
val = None
_ldb.leveldb_readoptions_destroy(options)
_checkError(error)
return val
# pylint: disable=W0212
def write(self, batch, sync=False):
if self._snapshot is not None:
raise TypeError("cannot delete on leveldb snapshot")
real_batch = _ldb.leveldb_writebatch_create()
for key, val in batch._puts.iteritems():
_ldb.leveldb_writebatch_put(real_batch, key, len(key), val,
len(val))
for key in batch._deletes:
_ldb.leveldb_writebatch_delete(real_batch, key, len(key))
error = ctypes.POINTER(ctypes.c_char)()
options = _ldb.leveldb_writeoptions_create()
_ldb.leveldb_writeoptions_set_sync(options, sync)
_ldb.leveldb_write(self._db.ref, options, real_batch,
ctypes.byref(error))
_ldb.leveldb_writeoptions_destroy(options)
_ldb.leveldb_writebatch_destroy(real_batch)
_checkError(error)
def iterator(self, verify_checksums=False, fill_cache=True):
options = _ldb.leveldb_readoptions_create()
if self._snapshot is not None:
_ldb.leveldb_readoptions_set_snapshot(options, self._snapshot.ref)
_ldb.leveldb_readoptions_set_verify_checksums(
options, verify_checksums)
_ldb.leveldb_readoptions_set_fill_cache(options, fill_cache)
it_ref = _PointerRef(
_ldb.leveldb_create_iterator(self._db.ref, options),
_ldb.leveldb_iter_destroy)
_ldb.leveldb_readoptions_destroy(options)
self._db.addReferrer(it_ref)
return _IteratorDbImpl(it_ref)
def approximateDiskSizes(self, *ranges):
if self._snapshot is not None:
raise TypeError("cannot calculate disk sizes on leveldb snapshot")
assert len(ranges) > 0
key_type = ctypes.c_void_p * len(ranges)
len_type = ctypes.c_size_t * len(ranges)
start_keys, start_lens = key_type(), len_type()
end_keys, end_lens = key_type(), len_type()
sizes = (ctypes.c_uint64 * len(ranges))()
for i, range_ in enumerate(ranges):
assert isinstance(range_, tuple) and len(range_) == 2
assert isinstance(range_[0], str) and isinstance(range_[1], str)
start_keys[i] = ctypes.cast(range_[0], ctypes.c_void_p)
end_keys[i] = ctypes.cast(range_[1], ctypes.c_void_p)
start_lens[i], end_lens[i] = len(range_[0]), len(range_[1])
_ldb.leveldb_approximate_sizes(self._db.ref, len(ranges), start_keys,
start_lens, end_keys, end_lens, sizes)
return list(sizes)
def compactRange(self, start_key, end_key):
assert isinstance(start_key, str) and isinstance(end_key, str)
_ldb.leveldb_compact_range(self._db.ref, start_key, len(start_key),
end_key, len(end_key))
def snapshot(self):
snapshot_ref = _PointerRef(
_ldb.leveldb_create_snapshot(self._db.ref),
lambda ref: _ldb.leveldb_release_snapshot(self._db.ref, ref))
self._db.addReferrer(snapshot_ref)
return _LevelDBImpl(self._db, snapshot_ref=snapshot_ref,
other_objects=self._objs)
|
import numpy as np
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
GAMMA = 0.9
ALL_POSSIBLE_ACTIONS = ('U','D','L','R')
# This script implements the monte carlo exploring-starts method for finding optimal policy.
def print_values(V,g):
for i in range(g.width):
print("----------------------------")
for j in range(g.height):
v = V.get((i,j),0)
if v >=0:
print(' %.2f|' % v, end=" ")
else:
print('%.2f|'%v, end= " ")
print ("")
def print_policy(P,g):
for i in range(g.width):
print("----------------------------")
for j in range(g.height):
a = P.get((i,j),' ')
print(' %s |' %a, end = ' ')
print()
def play_game(grid, policy, manual = False):
if manual:
input()
print('**************Start One Game**************')
start_states = list(grid.actions.keys())
start_idx = np.random.choice(len(start_states))
# exploring start method. Because MC needs to start at any state.
grid.set_state(start_states[start_idx])
if manual:
print('start_states: ' + str(start_states))
print_policy(policy, grid)
print('start_idx: ' + str(start_idx))
print('Start State: ' + str(grid.current_state()))
print()
input()
s = grid.current_state()
a = np.random.choice(ALL_POSSIBLE_ACTIONS)
print('First random Action: ' + str(a))
states_actions_rewards = [(s,a,0)]
seen_states = set()
seen_states.add(grid.current_state())
num_steps = 0
while True:
r = grid.move(a)
s = grid.current_state()
print('Moved to State: ' + str(s) + ' with reward: ' + str(r))
num_steps +=1
if s in seen_states:
# hack so that we don't end up in an infinitely long episode
# bumping into the wall repeatedly
reward = -10 / num_steps
print('State already seen, reward changed to : ' + str(reward))
states_actions_rewards.append((s,None, reward))
print('Game is Over')
break
elif grid.game_over():
print('Game is Over')
states_actions_rewards.append((s,None,r))
break
else:
a = policy[s]
states_actions_rewards.append((s,a,r))
print('New Action According to Policy: ' + str(a))
seen_states.add(s)
print('state: ' + str(grid.current_state()))
print('states_actions_rewards: ' + str(states_actions_rewards))
print()
if manual:
input()
G = 0
states_actions_returns = []
first = True
for s, a, r in reversed(states_actions_rewards):
if first:
first = False
else:
states_actions_returns.append((s,a,G))
G = r + GAMMA*G
states_actions_returns.reverse() # we want it to be in order of state visited
print('states_and_returns: ' + str(states_actions_returns))
return states_actions_returns
def max_dict(d):
# returns the argmax (key) and max (value) from a dictionary
# put this into a function since we are using it so often
max_key = None
max_val = float('-inf')
for k, v in d.items():
if v > max_val:
max_val = v
max_key = k
return max_key, max_val
if __name__ == '__main__':
grid = negative_grid(step_cost=-0.9)
print ('rewards: ')
print_values(grid.rewards,grid)
# state -> action
# initialize a random policy
policy = {}
for s in grid.actions.keys():
policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)
# initialize Q(s,a) and returns
Q = {}
returns = {} # dictionary of state -> list of returns we've received
states = grid.all_states()
for s in states:
if s in grid.actions:
Q[s] = {}
for a in ALL_POSSIBLE_ACTIONS:
Q[s][a] = 0 # needs to be initialized to something so we can argmax
returns[(s,a)] = []
else:
pass
#repeat until convergence
deltas = []
manual = False
for t in range(2000):
if t % 1000 == 0:
print(t)
# generate an episode using pi
biggest_change = 0
states_actions_returns = play_game(grid, policy, manual)
seen_state_action_pairs = set()
print('***Policy improvement***')
for s, a, G in states_actions_returns:
# check if we have already seen s
# called 'first-visit' MC policy evaluation
sa = (s,a)
print('State: ' + str(s))
print('Action: ' + str(a))
if sa not in seen_state_action_pairs:
old_q = Q[s][a]
print('old_q: ' + str(Q[s][a]))
returns[sa].append(G)
Q[s][a] = np.mean(returns[sa])
print('new Q[s][a]: ' + str(Q[s][a]))
biggest_change = max(biggest_change, np.abs(old_q - Q[s][a]))
print("biggest_change: " + str(biggest_change))
if manual:
input()
seen_state_action_pairs.add(sa)
print('Q[s]: ' + str(Q[s]))
deltas.append(biggest_change)
#update policy
for s in policy.keys():
policy[s] = max_dict(Q[s])[0]
plt.plot(deltas)
plt.show()
print('final policy: ')
print_policy(policy, grid)
# find V
V = {}
for s, Qs in Q.items():
V[s] = max_dict(Q[s])[1]
print('final values:')
print_values(V, grid) |
#!/usr/bin/env python
import roslib
import rospy
import math
import tf
import geometry_msgs.msg
import turtlesim.srv
from sensor_msgs.msg import CameraInfo
# https://github.com/carla-simulator/scenario_runner/blob/master/srunner/challenge/autoagents/ros_agent.py
def build_camera_info(attributes):
"""
Private function to compute camera info
camera info doesn't change over time
"""
camera_info = CameraInfo()
# store info without header
camera_info.header.frame_id = "velodyne"
camera_info.width = int(attributes['width'])
camera_info.height = int(attributes['height'])
camera_info.distortion_model = 'plumb_bob'
cx = camera_info.width / 2.0
cy = camera_info.height / 2.0
fx = camera_info.width / (
2.0 * math.tan(float(attributes['fov']) * math.pi / 360.0))
fy = fx
camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
camera_info.D = [0, 0, 0, 0, 0]
camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]
return camera_info
if __name__ == '__main__':
rospy.init_node('gimbal_control')
listener = tf.TransformListener()
gimbal_angle_pub = rospy.Publisher('/sim/gimbal_angle', geometry_msgs.msg.Vector3Stamped, queue_size=1)
camera_info_pub = rospy.Publisher('/sim/camera_info', CameraInfo, queue_size=1)
rate = rospy.Rate(100.0)
br = tf.TransformBroadcaster()
attributes = dict()
attributes['width'] = 1920
attributes['height'] = 1080
attributes['fov'] = 26.9914
camera_info = build_camera_info(attributes)
while not rospy.is_shutdown():
try:
# listener.waitForTransform('/map', '/base_link', rospy.Time.now(), rospy.Duration(2.0))
now = rospy.Time.now()
(trans,rot) = listener.lookupTransform('/map', '/base_link', rospy.Time(0))
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(rot)
msg = geometry_msgs.msg.Vector3Stamped()
msg.header.stamp = now
msg.vector.x = 0.0 # math.degrees(roll)
msg.vector.y = 90.0 # math.degrees(pitch)
msg.vector.z = 90 - math.degrees(yaw)
gimbal_angle_pub.publish(msg)
br.sendTransform(trans,
tf.transformations.quaternion_from_euler(0, math.radians(30), yaw),
now,
"gimbal",
"map")
except (tf.Exception):
continue
rate.sleep()
camera_info.header.stamp = rospy.Time.now()
camera_info_pub.publish(camera_info)
|
def coordenadaZ(x, y):
for i in range(3):
x = x + 1
y = y + 1
return x , y
# programa principal
x = int(input("Ingrese la coordenada del eje x: "))
y = int(input("Ingrese la coordenada del eje y: "))
print("Coordenadas finales:",coordenadaZ(x,y))
|
import os
import torch
import argparse
import numpy as np
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image
cuda = True if torch.cuda.is_available() else False
os.makedirs('../images', exist_ok=True)
class Generator(nn.Module):
# language=rst
"""
Generator network of the generative adversarial network.
"""
def __init__(self, image_shape, n_latent):
# language=rst
"""
Constructor of the generator network.
:param image_shape: Dimensionality of the input images.
:param n_latent: Number of neuron in latent vector space.
"""
super(Generator, self).__init__()
self.image_shape = image_shape
self.model = nn.Sequential(
nn.Linear(n_latent, 128),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(128, 256),
nn.BatchNorm1d(256, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 512),
nn.BatchNorm1d(512, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, int(np.prod(image_shape))),
nn.Tanh()
)
def forward(self, z):
# language=rst
"""
Forward pass of the generator network.
:param z: Sample(s) from the latent vector space.
"""
image = self.model(z)
image = image.view(image.size(0), *self.image_shape)
return image
class Discriminator(nn.Module):
# language=rst
"""
Discriminator network of the generative adversarial network.
"""
def __init__(self, image_shape):
# language=rst
"""
Constructor for discriminator network.
"""
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(int(np.prod(image_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, img):
flat = img.view(img.size(0), -1)
validity = self.model(flat)
return validity
def main(image_size=(28, 28), channels=1, n_latent=50, batch_size=50, n_epochs=25, sample_interval=400):
image_shape = (channels, image_size[0], image_size[1])
# Initialize generator and discriminator networks.
generator = Generator(image_shape, n_latent)
discriminator = Discriminator(image_shape)
# Initialize adversarial loss.
loss = nn.BCELoss()
if cuda:
generator.cuda()
discriminator.cuda()
loss.cuda()
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
'../../data/mnist', train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
), batch_size=batch_size, shuffle=True
)
# Optimizers
generator_optimizer = torch.optim.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999))
discriminator_optimizer = torch.optim.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Training
# ----------
for epoch in range(n_epochs):
for i, (images, _) in enumerate(dataloader):
# Adversarial ground truths
valid = Tensor(images.size(0), 1).fill_(1.0)
fake = Tensor(images.size(0), 1).fill_(0.0)
# Configure input
real_imgs = images.type(Tensor)
# -----------------
# Train Generator
# -----------------
generator_optimizer.zero_grad()
# Sample noise as generator input
z = Tensor(np.random.normal(0, 1, (images.shape[0], n_latent)))
# Generate a batch of images
generated = generator(z)
# Loss measures generator's ability to fool the discriminator
g_loss = loss(discriminator(generated), valid)
g_loss.backward()
generator_optimizer.step()
# ---------------------
# Train Discriminator
# ---------------------
discriminator_optimizer.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = loss(discriminator(real_imgs), valid)
fake_loss = loss(discriminator(generated.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
discriminator_optimizer.step()
print(
f'[Epoch {epoch}/{n_epochs}] [Batch {i}/{len(dataloader)}] '
f'[D loss: {d_loss.item()}] [G loss: {g_loss.item()}]'
)
batches_done = epoch * len(dataloader) + i
if batches_done % sample_interval == 0:
save_image(generated.data[:25], f'../images/{batches_done}.png', nrow=5, normalize=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image_size', type=int, nargs=2, default=[28, 28])
parser.add_argument('--channels', type=int, default=1)
parser.add_argument('--n_latent', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--n_epochs', type=int, default=10)
parser.add_argument('--sample_interval', type=int, default=400)
args = parser.parse_args()
image_size = args.image_size
channels = args.channels
n_latent = args.n_latent
batch_size = args.batch_size
n_epochs = args.n_epochs
sample_interval = args.sample_interval
main(
image_size=image_size, channels=channels, n_latent=n_latent,
batch_size=batch_size, n_epochs=n_epochs, sample_interval=sample_interval
)
|
#import model_b.layers as layers
import layers_cc as layers
def baseline(x,view_type, nodropout_probability=None, gaussian_noise_std=None):
#x,view_type = input
if gaussian_noise_std is not None:
x = layers.all_views_gaussian_noise_layer(x, gaussian_noise_std)
# first conv sequence
h = layers.all_views_conv_layer(x, view_type, 'conv1', number_of_filters=32, filter_size=[3, 3], stride=[2, 2])
# second conv sequence
h = layers.all_views_max_pool(h, view_type, stride=[3, 3])
h = layers.all_views_conv_layer(h, view_type, 'conv2a', number_of_filters=64, filter_size=[3, 3], stride=[2, 2])
h = layers.all_views_conv_layer(h, view_type, 'conv2b', number_of_filters=64, filter_size=[3, 3], stride=[1, 1])
h = layers.all_views_conv_layer(h, view_type, 'conv2c', number_of_filters=64, filter_size=[3, 3], stride=[1, 1])
# third conv sequence
h = layers.all_views_max_pool(h, view_type, stride=[2, 2])
h = layers.all_views_conv_layer(h, view_type, 'conv3a', number_of_filters=128, filter_size=[3, 3], stride=[1, 1])
h = layers.all_views_conv_layer(h, view_type, 'conv3b', number_of_filters=128, filter_size=[3, 3], stride=[1, 1])
h = layers.all_views_conv_layer(h, view_type, 'conv3c', number_of_filters=128, filter_size=[3, 3], stride=[1, 1])
# fourth conv sequence
h = layers.all_views_max_pool(h, view_type, stride=[2, 2])
h = layers.all_views_conv_layer(h, view_type, 'conv4a', number_of_filters=128, filter_size=[3, 3], stride=[1, 1])
h = layers.all_views_conv_layer(h, view_type, 'conv4b', number_of_filters=128, filter_size=[3, 3], stride=[1, 1])
h = layers.all_views_conv_layer(h, view_type, 'conv4c', number_of_filters=128, filter_size=[3, 3], stride=[1, 1])
# fifth conv sequence
h = layers.all_views_max_pool(h, view_type, stride=[2, 2])
h = layers.all_views_conv_layer(h, view_type, 'conv5a', number_of_filters=256, filter_size=[3, 3], stride=[1, 1])
h = layers.all_views_conv_layer(h, view_type, 'conv5b', number_of_filters=256, filter_size=[3, 3], stride=[1, 1])
h = layers.all_views_conv_layer(h, view_type, 'conv5c', number_of_filters=256, filter_size=[3, 3], stride=[1, 1])
# Pool, flatten, and fully connected layers
h = layers.all_views_global_avg_pool(h, view_type)
h = layers.all_views_flattening_layer(h, view_type) #flatening and concatenation
h = layers.fc_layer(h, number_of_units=1024)
#h = layers.dropout_layer(h, nodropout_probability)
y_prediction_birads = layers.softmax_layer(h, number_of_outputs=3)
print(y_prediction_birads)
return y_prediction_birads
class BaselineBreastModel:
def __init__(self, x, view_type, nodropout_probability=0.5, gaussian_noise_std=1):
self.y_prediction_birads = baseline(x, view_type, nodropout_probability, gaussian_noise_std) |
from flask import Flask,render_template,Response
app = Flask(__name__)
class Config(object):
DEBUG = True
app.config.from_object(Config)
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/add_user')
def add_user():
return Response('........')
@app.route('/user_list')
def user_list():
return Response('........')
@app.route('/add_user')
def add_user():
return Response('........')
@app.route('/user_list')
def user_list():
return Response('........')
@app.route('/add_user')
def add_user():
return Response('........')
@app.route('/user_list')
def user_list():
return Response('........')
if __name__ == '__main__':
app.run() |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
urlpatterns = patterns('todos.views',
url(r'^$', 'index', name='index'),
url(r'^lists/$', 'lists', name='lists'),
url(r'^lists/(?P<pk>\d+)/$', 'list', name='list'),
url(r'^item/(?P<pk>\d+)/$', 'item', name='item'),
)
|
import time
import numpy as np
from .base_env import BaseEnv
from .blue_team.blue_base import BlueTeam
from .red_team.red_base import RedTeam # noqa
from .sensors import Sensors
from .interaction_manager import InteractionManager
class EnhanceEnv(BaseEnv):
def __init__(self, config):
# Initialise the base environment
super().__init__(config)
self.config = config
# Load the environment
if self.config['simulation']['detailed_model']:
path = '/'.join([
self.config['urdf_data_path'],
self.config['simulation']['map_to_use'],
'environment_collision_free.urdf'
])
else:
path = '/'.join([
self.config['urdf_data_path'],
self.config['simulation']['map_to_use'],
'environment_collision_free.urdf'
])
self.p.loadURDF(path, [0, 0, 0],
self.p.getQuaternionFromEuler([np.pi / 2, 0, 0]),
flags=self.p.URDF_USE_MATERIAL_COLORS_FROM_MTL,
useFixedBase=True)
# Initial step of blue and red team
self._initial_team_setup()
# Initialize interaction manager
self.sensors = Sensors(self.p)
self.interaction_manager = InteractionManager(config)
return None
def _initial_team_setup(self):
# Blue team
self.blue_team = BlueTeam(self.p, self.config)
# # Red team
# self.red_team = RedTeam(self.p, self.config)
return None
def reset(self):
for i in range(50):
time.sleep(1 / 240)
self.p.stepSimulation()
def step(self, blue_actions, red_actions):
# Roll the actions
done_rolling_actions = False
simulation_count = 0
start_time = time.time()
current_time = 0
duration = self.config['simulation']['total_time']
# Perform action allocation for blue and red team respectively
self.blue_team.action_manager.perform_action_allocation(
blue_actions['uav'], blue_actions['ugv'])
# self.red_team.action_manager.perform_action_allocation(
# red_actions['uav'], red_actions['ugv'])
step_time = []
# Run the simulation
while not done_rolling_actions and current_time <= duration:
simulation_count += 1
current_time = time.time() - start_time
# Run the blue team (these can be made parallel)
action_time = time.time()
done_rolling_actions = self.blue_team.execute()
# Perform a step in simulation to update
self.base_env_step()
step_time.append(time.time() - action_time)
# self.sensors.get_camera_image([0, 0, 10], image_type='rgb')
# # Run the red team (these can be made parallel)
# self.red_team.execute()
# # Perform a step in simulation to update
# self.base_env_step()
# # Interaction Manager (this over-rides the given actions)
# self.interaction_manager.update_actions(self.blue_team,
# self.red_team)
# Perform a step in simulation to update
# self.base_env_step()
# TODO: Need to implement state, action, and reward
return 1 / np.mean(step_time)
def get_reward(self):
"""Update reward of all the agents
"""
# Calculate the reward
total_reward = self.reward.mission_reward(self.state_manager.ugv,
self.state_manager.uav,
self.config)
for vehicle in self.state_manager.uav:
vehicle.reward = total_reward
for vehicle in self.state_manager.ugv:
vehicle.reward = total_reward
return total_reward
def check_episode_done(self):
done = False
if self.current_time >= self.config['simulation']['total_time']:
done = True
if self.state_manager.found_goal:
done = True
return done
|
# coding=utf-8
import re
class BaseSite:
url_patterns = []
def __init__(self):
super(BaseSite, self).__init__()
def process(self, url, html):
params = {
'url': url,
'html': html,
'params': ''
}
return params
class Douban(BaseSite):
url_patterns = ['https://movie.douban.com']
def __init__(self):
super(BaseSite, self).__init__()
def process(self, url, html):
params = {
'url': url,
'html': html,
'params': 'douban'
}
return params
class Tencent(BaseSite):
url_patterns = ['https?://www.qq.com']
def __init__(self):
super(BaseSite, self).__init__()
def process(self, url, html):
params = {
'url': url,
'html': html,
'params': 'qq'
}
return params
class Factory:
def __init__(self):
self.site_list = []
def init_factory(self):
self.site_list.append(Douban())
self.site_list.append(Tencent())
def get_site(self, url):
for site in self.site_list:
for pattern in site.url_patterns:
if re.search(pattern, url):
return site
return BaseSite()
if __name__ == '__main__':
factory = Factory()
factory.init_factory()
url = 'https://www.qq.com'
html = '<html></html>'
site = factory.get_site(url)
params = site.process(url, html)
print(params) |
"""
My constellation of social media sites that can be fetched and published.
"""
ACTIVELY_USED = {
'Delicious': 'http://delicious.com/palewire',
'Digg': 'http://digg.com/users/palewire',
'Facebook': 'http://www.facebook.com/palewire',
'Flickr': 'http://www.flickr.com/photos/77114776@N00/',
'Flixster': 'http://www.flixster.com/user/palewire/',
'Github': 'http://github.com/palewire/',
'Kiva': 'http://www.kiva.org/lender/palewire',
'Last.FM': 'http://www.last.fm/user/palewire',
'LinkedIn': 'http://www.linkedin.com/pub/ben-welsh/9/3b4/256',
'Readernaut': 'http://readernaut.com/palewire/',
'Twitter': 'http://twitter.com/palewire',
} |
import tabula
import pandas as pd
# import time
# start_time = time.time()
tb_name="test"
df = tabula.read_pdf(tb_name+".pdf", pages='all', multiple_tables=True)
df = pd.DataFrame(df)
# df.to_excel('test.xlsx', header=False, index=False)
# df=pd.read_csv(tb_name+".csv", error_bad_lines=False, header=None)
# res = pd.DataFrame()
# n=len(df)
# arr = df.values
# idx = []
# for i in range(0,n):
# t=str(arr[i][0])[0:2]
# if (t!="31"):
# idx.append(i)
# df.drop(df.index[idx], inplace=True)
# df.to_csv(tb_name+'.csv', header=False, index=False)
# Printing Time
# print("--- %s seconds ---" % (time.time() - start_time)) |
"""
This package describe all periodic tasks
"""
from celery import shared_task
from celery.utils import log
from sqlalchemy.orm import Session
from app.helper import get_config
from app.models import Microcontroller
from app.tasks import DBTask
from app.tasks.normal_tasks import check_host
_configs = get_config()
logger = log.get_task_logger(__name__)
@shared_task(bind=True, base=DBTask, ignore_result=True)
def actuators_check(self):
db: Session = self.get_db_session()
_hosts = Microcontroller.get_all(db=db)
logger.info(f'check if {len(_hosts)} Hosts are online')
for d in _hosts:
check_host.delay(d.url)
|
import os
from abc import ABC, abstractmethod
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found')
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import sys
import pickle
from keras.models import load_model
from sklearn.metrics import roc_curve, auc
class NetworkTrain(ABC):
'''Network Train in declared as an abstract class to train differently for 3axis input or z-axis input'''
def save_folder_w_datetime(self, date_time):
date_now = str(date_time.date())
time_now = str(date_time.time())
sf = "saved_models/model_" + date_now + "_" + time_now + "_" + os.path.basename(__file__).split('.')[0]
return sf
def save_model(self, save_folder, model, model_fit):
"""
Output: Saves dictionary of model training history as a pickle file.
"""
model.save(save_folder + '/savedmodel' + '.h5')
with open(save_folder + '/history.pickle', 'wb') as f:
pickle.dump(model_fit.history, f)
def plot_accuracy(self, model_fit, save_folder, history=None):
"""
Output: Plots and saves graph of accuracy at each epoch.
"""
train_accuracy_names = ['output_aud_binary_accuracy', 'output_foctype_binary_accuracy']
val_accuracy_names = ['val_output_aud_binary_accuracy','val_output_foctype_binary_accuracy']
plot_titles = ['Call Types', 'Focal Types']
(fig, ax) = plt.subplots(2,1, figsize=(8,8))
for idx, (train_binary_acc, val_binary_acc, plot_title) in enumerate(zip(train_accuracy_names, val_accuracy_names,
plot_titles)):
train_acc = history[train_binary_acc] if history is not None else model_fit.history[train_binary_acc]
val_acc = history[val_binary_acc] if history is not None else model_fit.history[val_binary_acc]
epoch_axis = np.arange(1, len(train_acc) + 1)
ax[idx].set_title('Train vs Validation Accuracy for '+plot_title)
ax[idx].plot(epoch_axis, train_acc, 'b', label='Train Acc')
ax[idx].plot(epoch_axis, val_acc, 'r', label='Val Acc')
ax[idx].set_xlim([1, len(train_acc)])
ax[idx].set_xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_acc) / 10) + 0.5)))
ax[idx].legend(loc='lower right')
ax[idx].set_ylabel('Accuracy')
ax[idx].set_xlabel('Epochs')
plt.savefig(save_folder + '/accuracy.png')
plt.show()
plt.close()
def plot_loss(self, model_fit, save_folder, history=None):
"""
Output: Plots and saves graph of loss at each epoch.
"""
train_loss_names = ['output_aud_loss', 'output_foctype_loss']
val_loss_names = ['val_output_aud_loss','val_output_foctype_loss']
plot_titles = ['Call Types', 'Focal Types']
(fig, ax) = plt.subplots(2,1, figsize=(8,8))
for idx, (train_op_loss, val_op_loss, plot_title) in enumerate(
zip(train_loss_names, val_loss_names,
plot_titles)):
train_loss = history[train_op_loss] if history is not None else model_fit.history[train_op_loss]
val_loss = history[val_op_loss] if history is not None else model_fit.history[val_op_loss]
epoch_axis = np.arange(1, len(train_loss) + 1)
ax[idx].set_title('Train vs Validation Loss for '+plot_title)
ax[idx].plot(epoch_axis, train_loss, 'b', label='Train Loss')
ax[idx].plot(epoch_axis, val_loss,'r', label='Val Loss')
ax[idx].set_xlim([1, len(train_loss)])
ax[idx].set_xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_loss) / 10) + 0.5)))
ax[idx].legend(loc='upper right')
ax[idx].set_ylabel('Loss')
ax[idx].set_xlabel('Epochs')
plt.savefig(save_folder + '/loss.png')
plt.show()
plt.close()
def plot_ROC(self, model, x, y, save_folder):
"""
Output: Plots and saves overall ROC graph
for the validation set.
"""
predicted = model.predict(x)
plot_titles = ['Call Types', 'Focal Types']
(fig, ax) = plt.subplots(2,1, figsize=(8,8))
for i in range(len(predicted)):
fpr, tpr, thresholds = roc_curve(y[i].ravel(), predicted[i].ravel(), pos_label=None)
roc_auc = auc(fpr, tpr)
ax[i].set_title('Receiver Operating Characteristic Overall for '+plot_titles[i])
ax[i].plot(fpr, tpr, 'b', label='AUC = %0.3f' % roc_auc)
ax[i].legend(loc='lower right')
ax[i].plot([0,1],[0,1],'r--')
ax[i].set_xlim([0.0,1.0])
ax[i].set_ylim([0.0,1.0])
ax[i].set_ylabel('True Positive Rate')
ax[i].set_xlabel('False Positive Rate')
plt.savefig(save_folder + '/ROC.png')
plt.show()
plt.close()
def plot_class_ROC(self, model, x_val, y_val, save_folder):
"""
Output: Plots and saves ROC graphs
for the validation set.
"""
class_names = [['GIG', 'SQL', 'GRL', 'GRN', 'SQT', 'MOO', 'RUM', 'WHP'], ['NON-FOC', 'NOTDEF', 'FOC']]
predicted = model.predict(x_val)
for i in range(len(y_val)):
for class_idx in range(len(class_names[i])):
fpr, tpr, thresholds = roc_curve(y_val[i][:, :, class_idx].ravel(),
predicted[i][:, :, class_idx].ravel(), pos_label=None)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic ' + class_names[i][class_idx])
plt.plot(fpr, tpr, 'b', label='AUC = %0.3f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig(os.path.join(save_folder, 'class_ROC_' + class_names[i][class_idx] + '.png'))
plt.show()
plt.close()
def save_arch(self, model, save_folder):
with open(save_folder + '/archiecture.txt','w') as f:
# Pass the file handle in as a lambda function to make it callable
model.summary(print_fn=lambda x: f.write(x + '\n'))
def count_labels(self, file):
count = {}
sound_label = {0: 'GIG', 1: 'SQL', 2: 'GRL', 3: 'GRN', 4: 'SQT', 5: 'MOO', 6: 'RUM', 7: 'WHP', 8: 'OTH'}
y = np.load(file)
data = np.where(y == 1)[2]
for label_idx in data:
label = sound_label[label_idx]
count[label] = count.get(label, 0) + 1
return count
def create_save_folder(self, save_folder):
if not os.path.isdir(save_folder):
os.makedirs(save_folder)
@abstractmethod
def train_network(self):
pass
@abstractmethod
def create_model(self, xtrain, filters, gru_units, dense_neurons, dropout):
pass
|
"""
sim_anneal.py
Author: Olivier Vadiavaloo
Description:
This file contains the code implementing the simulated annealing
algorithm.
"""
import copy as cp
from time import time
import random
import multiprocessing as mp
from math import exp
from src.dsl import *
from src.Evaluation.evaluation import *
from src.Optimizer.optimizer import *
from src.Optimizer.start_optimizer import *
from src.SA.plotter import *
from statistics import *
class SimulatedAnnealing:
def __init__(self, time_limit, logger, optimizer, program_mutator):
self.time_limit = time_limit
self.logger = logger
if optimizer is None:
self.run_optimizer = False
else:
self.run_optimizer = True
self.optimizer = optimizer
self.program_mutator = program_mutator
def reduce_temp(self, current_t, epoch):
return current_t / (1 + self.alpha * epoch)
def is_accept(self, j_diff, temp):
rand = random.uniform(0, 1)
if rand < min(1, exp(j_diff * (self.beta / temp))):
return True
return False
def check_new_best(self, candidate, candidate_eval, candidate_scores, best_eval, eval_funct):
if candidate_eval > best_eval:
if candidate_eval > eval_funct.STRONG_SCORE:
print('before run longer', candidate_eval)
more_accurate_scores, more_accurate_eval = self.run_longer_eval(eval_funct, candidate)
print('after run longer', more_accurate_eval)
if more_accurate_eval > best_eval:
return True, more_accurate_eval, more_accurate_scores
else:
return False, more_accurate_eval, more_accurate_scores
return True, candidate_eval, candidate_scores
return False, candidate_eval, candidate_scores
def init_attributes(self, eval_funct):
self.alpha = 0.9
self.beta = 100
self.ppool = [] # for storing solutions to be optimized
if self.run_optimizer:
if self.optimizer.get_parallel():
# Change this number to change the number of
# solutions to be optimized in parallel
self.ppool_max_size = 5
else:
self.ppool_max_size = 1
# Initialize variables used to generate plots later on
self.scores_dict = {}
self.best_pscore_dict = {}
self.optimized_pscore_dict = {}
self.unoptimized_pscore_dict = {}
def get_timestamp(self):
return round((time() - self.start) / 60, 2)
def synthesize(
self,
current_t,
final_t,
eval_funct,
plot_filename,
option=1,
verbose_opt=False,
generate_plot=False,
save_data=False
):
"""
This method implements the simulated annealing algorithm that can be used
to generate strategies given a grammar and an evaluation function.
- CFG: grammar
- current_t: initial temperature
- final_t: final temperature
- option: 1 or 2
-- Option 1: Does not generate a random program each time simulated annealing
finishes to run. More likely to get stuck on a local min/max.
-- Option 2: Generates a random program after each simulated annealing run.
"""
self.start = time()
self.init_attributes(eval_funct)
initial_t = current_t
iterations = 0
self.closed_list = {}
# Option 2: Generate random program only once
if option == 2:
best = self.program_mutator.generate_random(self.closed_list)
timestamp = self.get_timestamp()
scores, best_eval = eval_funct.evaluate(best, verbose=True)
self.closed_list[best.to_string()] = (best_eval, timestamp)
eval_funct.set_best(best, best_eval, scores) # update best score in eval object
# Set baseline for optimizer
if self.run_optimizer:
self.optimizer.set_baseline_eval(best_eval)
if best_eval != Evaluation.MIN_SCORE:
self.best_pscore_dict[iterations] = (best_eval, timestamp)
else:
best = None
best_eval = None
'''
Run Simulated Annealing until time limit is reached
If option 1 is specified, generate a random program for
the initial program. If option 2 is specified, use best
as the initial program.
'''
while time() - self.start < self.time_limit:
current_t = initial_t
timestamp = self.get_timestamp()
# Option 1: Generate random program and compare with best
if option == 1:
current = self.program_mutator.generate_random(self.closed_list)
scores, current_eval = eval_funct.evaluate(current, verbose=True)
self.closed_list[current.to_string()] = (current_eval, timestamp) # save to closed_list
if best is not None:
new_best, current_eval, scores = self.check_new_best(current, current_eval, scores, best_eval, eval_funct)
if best is None or new_best:
best, best_eval = current, current_eval
eval_funct.set_best(best, best_eval, scores) # update best score in eval object
# Set baseline for optimizer
if self.run_optimizer:
self.optimizer.set_baseline_eval(best_eval)
if best_eval != Evaluation.MIN_SCORE:
self.best_pscore_dict[iterations] = (best_eval, timestamp)
# Option 2: Assign current to best solution in previous iteration
elif option == 2 and best is not None:
current = best
current_eval = best_eval
if verbose_opt or iterations == 0:
# Log initial program to file
pdescr = {
'header': 'Initial Program',
'psize': current.get_size(),
'score': current_eval,
'timestamp': timestamp
}
self.logger.log_program(current.to_string(), pdescr)
self.logger.log('Scores: ' + str(scores).strip('()'), end='\n\n')
if current_eval != Evaluation.MIN_SCORE:
self.scores_dict[iterations] = (current_eval, timestamp)
iterations += 1
# Call simulated annealing
best, best_eval, epochs = self.simulated_annealing(
current_t,
final_t,
current,
best,
current_eval,
best_eval,
iterations,
eval_funct,
verbose_opt,
)
iterations += epochs
self.logger.log('Running Time: ' + str(round(time() - self.start, 2)) + 'seconds')
self.logger.log('Iterations: ' + str(iterations), end='\n\n')
# Log best program
pdescr = {
'header': 'Best Program Found By SA',
'psize': best.get_size(),
'score': best_eval,
'timestamp': self.closed_list[best.to_string()][1]
}
self.logger.log_program(best.to_string(), pdescr)
# Plot data if required
if generate_plot:
self.plot(plot_filename)
# Save data
if save_data:
self.save(plot_filename)
return best, best_eval
def save(self, plot_filename):
plotter = Plotter()
data_filenames_dict = plotter.construct_dat_filenames(plot_filename)
# Bundle values of dict into a list
data_filenames = []
data_filenames.extend(list(data_filenames_dict.values()))
if self.run_optimizer:
plotter.save_data(
self.scores_dict,
self.best_pscore_dict,
self.unoptimized_pscore_dict,
self.optimized_pscore_dict,
names=data_filenames
)
else:
plotter.save_data(
self.scores_dict,
self.best_pscore_dict,
names=data_filenames
)
def plot(self, plot_filename):
plotter = Plotter() # Plotter object
plot_names = {
'x': 'Iterations',
'y': 'Program Score',
'z': 'Iterations',
'title': 'SA Program Scores vs Total Iterations',
'filename': plot_filename,
'legend': ['current program', 'best program', 'unoptimized program']
}
plotter.plot_from_data(self.scores_dict, self.best_pscore_dict, names=plot_names) # plot all scores
def run_longer_eval(self, eval_funct, program):
# Change the evaluation object's configuration
new_config_attributes = form_basic_attr_dict(
False,
eval_funct.get_random_var_bound(),
eval_funct.get_confidence_value(),
eval_funct.RUN_LONGER_TOTAL_GAMES,
eval_funct.get_best()[1],
eval_funct.MIN_SCORE,
None
)
original_eval_config = eval_funct.change_config("NORMAL", new_config_attributes)
scores, program_eval = eval_funct.evaluate_parallel(program, verbose=True)
eval_funct.set_config(original_eval_config)
return scores, program_eval
def simulated_annealing(
self,
current_t,
final_t,
current,
best,
current_eval,
best_eval,
iterations,
eval_funct,
verbose_opt,
):
epoch = 0
mutations = 0
while current_t > final_t:
best_updated = False
header = 'Mutated Program'
timestamp = self.get_timestamp()
# Mutate current program
candidate = self.program_mutator.mutate(cp.deepcopy(current), self.closed_list)
mutations += 1
# Evaluate the mutated program
scores, candidate_eval = eval_funct.evaluate(candidate, verbose=True)
# Run optimizer if flag was specified
if self.run_optimizer:
self.ppool.append((candidate, candidate_eval, scores))
# print('self.ppool_len', len(self.ppool))
if len(self.ppool) >= self.ppool_max_size:
unoptimized_candidate_eval = candidate_eval
candidate, candidate_eval, scores, is_optimized = start_optimizer(
self.optimizer,
self.ppool,
self.logger,
self.get_timestamp,
verbose=verbose_opt
)
if is_optimized:
timestamp = self.get_timestamp()
self.unoptimized_pscore_dict[iterations + epoch] = (unoptimized_candidate_eval, timestamp)
self.optimized_pscore_dict[iterations + epoch] = (candidate_eval, timestamp)
self.ppool = []
new_best, candidate_eval, scores = self.check_new_best(candidate, candidate_eval, scores, best_eval, eval_funct)
if new_best:
header = 'New Best Program'
best_updated = True
best, best_eval = candidate, candidate_eval
# Set the best program and its score in eval_funct
# Since triage is used, the best score in eval_funct must be updated
eval_funct.set_best(best, best_eval, scores)
# Update the baseline score of the optimizer
if self.run_optimizer:
self.optimizer.set_baseline_eval(best_eval)
self.best_pscore_dict[iterations + epoch] = (best_eval, timestamp)
# If candidate program does not raise an error, store scores
if candidate_eval != Evaluation.MIN_SCORE:
self.scores_dict[iterations + epoch] = (candidate_eval, timestamp)
self.closed_list[candidate.to_string()] = (candidate_eval, timestamp)
# Log program to file
if best_updated or verbose_opt:
pdescr = {
'header': header,
'psize': candidate.get_size(),
'score': candidate_eval,
'timestamp': timestamp
}
self.logger.log_program(candidate.to_string(), pdescr)
self.logger.log('Scores: ' + str(scores).strip('()'))
self.logger.log('Mutations: ' + str(mutations), end='\n\n')
j_diff = candidate_eval - current_eval
# Decide whether to accept the candidate program
if j_diff > 0 or self.is_accept(j_diff, current_t):
current, current_eval = candidate, candidate_eval
current_t = self.reduce_temp(current_t, epoch)
epoch += 1
return best, best_eval, epoch+1 |
print("Using the values from a tuple")
for i in {1,2,3,4,5}:
print(i, " ", end='\n')
print("---------------------------------------------------")
print("Print out values in a range")
for i in range(1,10,2):
print(i, " ",end="\n")
print("---------------------------------------------------")
print("Using break in a loop")
for i in range(1,10):
if(i==6):
break
print(i, " ", sep=' ', end="\n")
print("done")
print("---------------------------------------------------")
print("Using break in a loop")
for i in range(1,10):
if(i==6):
continue
print(i, " ", sep=' ', end="\n")
|
#!/usr/bin/env python
import glob
import numpy as np
import os
import pandas as pd
import sys
import trimesh
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, ".."))
from parameters import (PipeFlowParams,
load_parameters)
def load_mesh(fname: str):
return trimesh.load(fname, process=False)
def compute_mean_rbc_length_width(simdir: str,
show_plot: bool=False):
ply_list = sorted(glob.glob(os.path.join(simdir, 'ply', 'rbc_*.ply')))
params_path = os.path.join(simdir, 'settings.pkl')
params = load_parameters(params_path)
n = len(ply_list)
start = 0
lengths = []
widths = []
for fname in ply_list[start:]:
mesh = load_mesh(fname)
L = np.ptp(mesh.vertices[:,0]) * params.length_scale_
W = np.ptp(mesh.vertices[:,1]) * params.length_scale_
lengths += [L.to('um').magnitude]
widths += [W.to('um').magnitude]
lengths = np.array(lengths)
widths = np.array(widths)
n = len(lengths)
if show_plot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(range(n), lengths)
ax.set_xlabel("dump id")
ax.set_ylabel(r"$l [\mu m]$")
plt.show()
start = 7*n//8
return np.mean(lengths[n//2:]), np.mean(widths[n//2:])
def get_param(basedir: str, param_name: str):
import re
rexf = '[-+]?\d*\.\d+|\d+'
matches = re.findall(f"{param_name}_({rexf})", basedir)
assert len(matches) == 1
return float(matches[0])
def main(argv):
import argparse
parser = argparse.ArgumentParser(description='Compute the mean length and width of the RBC flowing in the pipe.')
parser.add_argument('simdirs', type=str, nargs='+', help="The simulation directories.")
parser.add_argument('--out', type=str, default=None, help="output csv file.")
parser.add_argument('--against', type=str, default="pg", help="Varying input parameter.")
parser.add_argument('--show-plot', action='store_true', default=False, help="If true, show the RBC length over time.")
args = parser.parse_args(argv)
param_name = args.against
all_L = list()
all_W = list()
all_against_params = list()
for simdir in args.simdirs:
L, W = compute_mean_rbc_length_width(simdir,
show_plot=args.show_plot)
param_value = get_param(simdir, param_name)
print(f"{param_name} = {param_value:.4e}, l = {L:.4e} um, w = {W:.4e} um")
all_L.append(L)
all_W.append(W)
all_against_params.append(param_value)
if args.out is not None:
df = pd.DataFrame({param_name : all_against_params,
"l": all_L,
"w": all_W})
df.to_csv(args.out, index=False)
if __name__ == '__main__':
main(sys.argv[1:])
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
if (cap.isOpened()== False):
print("Error opening video stream")
id_counter = 0
started = False
recording = False
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
cv2.imshow('Frame', frame)
if recording and not started:
out = cv2.VideoWriter('wave_' + str(id_counter) + '.mp4', 0x7634706d, 10, (frame_width,frame_height))
started = True
if recording:
out.write(frame)
if cv2.waitKey(25) & 0xFF == ord('d') and recording:
print("Done recording")
recording = False
started = False
if cv2.waitKey(25) & 0xFF == ord('c') and not started:
print("Starting recording")
id_counter += 1
recording = True
started = False
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
|
# This Python file uses the following encoding: utf-8
try:
text = input('Enter something --> ')
except EOFError:
print('\nWhy did you do an EOF on me?')
except KeyboardInterrupt:
print('\nYou cancelled the operation.')
except NameError:
print('\nSo you use python2, you should use \'my words\' as input')
else:
print('\nYou entered {}'.format(text))
# Try ctrl + d(EOF) ctrl + c(Interrupt) and normal input for this. |
from django.conf.urls import patterns, include, url
from test_app.views import AppIndexView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'burstSMS.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', AppIndexView.as_view(), name='home'),
)
|
import warnings
warnings.filterwarnings('ignore')
import torchvision
import torch
from jetbot import Robot
import time
print("loading model")
model = torchvision.models.resnet18(pretrained=False)
model.fc = torch.nn.Linear(512, 2)
print("loading floor")
model.load_state_dict(torch.load('floor.pth'))
device = torch.device('cuda')
model = model.to(device)
model = model.eval().half()
print("model loaded")
robot = Robot()
import torchvision.transforms as transforms
import torch.nn.functional as F
import cv2
import PIL.Image
import numpy as np
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half()
def preprocess(image):
image = PIL.Image.fromarray(image)
image = transforms.functional.to_tensor(image).to(device).half()
image.sub_(mean[:, None, None]).div_(std[:, None, None])
return image[None, ...]
from IPython.display import display
import ipywidgets
import traitlets
from jetbot import Camera, bgr8_to_jpeg
camera = Camera()
#image_widget = ipywidgets.Image()
prevTime = 0
def execute4(change):
global angle, angle_last, prevTime
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
xfps = 1/(sec)
str = "FPS : %0.1f" % xfps
print(str)
print("running..")
image = change['new']
#image = camera.value
preprocessed = preprocess(image)
output = model(preprocessed).detach().cpu().numpy().flatten()
# category_index = dataset.categories.index(category_widget.value)
xx = output[0]
yy = output[1]
x = int(camera.width * (xx / 2.0 + 0.5))
y = int(camera.height * (yy / 2.0 + 0.5))
# prediction = image.copy()
# prediction = cv2.circle(prediction, (x, y), 8, (255, 0, 0), 3)
# prediction = cv2.line(prediction, (x,y), (112,224), (255,0,0), 3)
# prediction = cv2.putText(prediction,str(xx)+","+str(yy), (x-50,y+25), cv2.FONT_HERSHEY_PLAIN, 1, 2)
# prediction = cv2.putText(prediction,str(x)+","+str(y), (x-40,y+50), cv2.FONT_HERSHEY_PLAIN, 1, 2)
#x_slider.value = x
#y_slider.value = y
if xx < 0:
robot.left_motor.value = 0.1
robot.right_motor.value = 0.16
else:
robot.left_motor.value = 0.15
robot.right_motor.value = 0.1
# angle = np.arctan2(x, y)
# pid = angle * steering_gain_slider.value + (angle - angle_last) * steering_dgain_slider.value
# angle_last = angle
# steering_slider.value = pid + steering_bias_slider.value
# robot.left_motor.value = max(min(speed_slider.value + steering_slider.value, 1.0), 0.0)
# robot.right_motor.value = max(min(speed_slider.value - steering_slider.value, 1.0), 0.0)
# prediction = cv2.putText(prediction,str(robot.left_motor.value)+","+str(robot.right_motor.value), (x-15,y+75), cv2.FONT_HERSHEY_PLAIN, 1, 2)
# image_widget2.value = bgr8_to_jpeg(prediction)
# execute({'new': camera.value})
camera.observe(execute4, names='value')
|
'''2. Write a program to accept a two-dimensional array containing integers as the parameter and determine the following from the elements of the array:
a. Element with minimum value in the entire array
b. Element with maximum value in the entire array
c. The elements with minimum and maximum values in each column
d. The elements with minimum and maximum values in each row'''
n = 4
a = [[0] * n for i in range(n)]
for i in range(n):
for j in range(0, i):
a[i][j] = 2
a[i][i] = 1
for j in range(i + 1, n):
a[i][j] = 0
for row in a:
print(' '.join([str(elem) for elem in row])) |
import random
class bcolors:
HEADER = '\033[033m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Person:
def __init__(self,name,hp,mp,atk,df,magic,items):
self.name=name
self.max_hp=hp
self.hp=hp
self.max_mp=mp
self.mp=mp
self.atkl=atk-10
self.atkh=atk+10
self.df=df
self.magic=magic
self.actions=["Attack","Magic","Items"]
self.items=items
def generate_damage(self):
return random.randrange(self.atkl,self.atkh)
def take_damage(self,dmg):
self.hp-=dmg
if self.hp < 0:
self.hp=0
return self.hp
def heal(self,dmg):
self.hp +=dmg
if self.hp > self.max_hp:
self.hp= self.max_hp
def get_hp(self):
return self.hp
def get_max_hp(self):
return self.max_hp
def get_mp(self):
return self.mp
def get_max_mp(self):
return self.max_mp
def reduce_mp(self,cost):
self.mp -= cost
def reduce_item(self,choice):
self.items[choice]["quantity"] -= 1
def choose_action(self):
i=1
print(bcolors.OKBLUE + self.name + bcolors.ENDC)
print(bcolors.OKBLUE + "ACTIONS" + bcolors.ENDC)
for item in self.actions:
print(" "+ str(i) + ". " + item)
i+=1
def choose_magic(self):
i=1
print("\n"+ bcolors.OKBLUE+"MAGIC"+bcolors.ENDC)
for item in self.magic:
print(" "+ str(i) + ". " + item.name + " " + str(item.cost))
i+=1
def choose_item(self):
i=1
print("\n"+ bcolors.OKBLUE+"ITEMS"+bcolors.ENDC)
for item in self.items:
print(" "+ str(i) + ". " + item["item"].name + ": " + item["item"].description + " - " + bcolors.FAIL + str(item["quantity"]) + bcolors.ENDC)
i+=1
def choose_target(self, enemies):
i = 1
print("\n" + bcolors.FAIL + "ENEMIES" + bcolors.ENDC)
for enemy in enemies:
if enemy.hp > 0:
print(" " + str(i) + ". " + enemy.name + ": " + bcolors.ENDC)
i += 1
return int(input("select enemy:"))-1
def get_enemy_stats(self):
bar_hp = ""
bar_ticks_hp = (self.hp / self.max_hp) * 50
for x in range (0, 50):
if x < int(bar_ticks_hp):
bar_hp+="█"
else:
bar_hp+=" "
bar_mp = ""
bar_ticks_mp = (self.mp / self.max_mp) * 10
for x in range(0, 10):
if x < int(bar_ticks_mp):
bar_mp += "█"
else:
bar_mp += " "
hp_range=str(self.max_hp)
hp_range="/"+hp_range
hp_range =str(self.hp) + hp_range
if len(hp_range) < 11:
for x in range(0, 11 - len(hp_range)):
hp_range = " " + hp_range
mp_range = str(self.max_mp)
mp_range = "/" + mp_range
mp_range = str(self.mp) + mp_range
if len(mp_range) < 7:
for x in range(0, 7 - len(mp_range)):
mp_range = " " + mp_range
print(" __________________________________________________ __________")
print(bcolors.BOLD + self.name+": " +
hp_range + " |" + bcolors.FAIL + bar_hp + bcolors.ENDC + "| " +
mp_range + " |" + bcolors.WARNING + bar_mp + bcolors.ENDC + "|")
def get_stats(self):
bar_hp = ""
bar_ticks_hp = (self.hp / self.max_hp) * 25
for x in range (0, 25):
if x < int(bar_ticks_hp):
bar_hp+="█"
else:
bar_hp+=" "
bar_mp = ""
bar_ticks_mp = (self.mp / self.max_mp) * 10
for x in range(0, 10):
if x < int(bar_ticks_mp):
bar_mp += "█"
else:
bar_mp += " "
hp_range=str(self.max_hp)
hp_range="/"+hp_range
hp_range =str(self.hp) + hp_range
if len(hp_range) < 9:
for x in range(0, 9 - len(hp_range)):
hp_range = " " + hp_range
mp_range = str(self.max_mp)
mp_range = "/" + mp_range
mp_range = str(self.mp) + mp_range
if len(mp_range) < 7:
for x in range(0, 7 - len(mp_range)):
mp_range = " " + mp_range
print(" _________________________ __________")
print(bcolors.BOLD + self.name+": " +
hp_range + " |" + bcolors.OKGREEN + bar_hp + bcolors.ENDC + "| " +
mp_range + " |" + bcolors.OKBLUE + bar_mp + bcolors.ENDC + "|")
def choose_enemy_spell(self):
spell = self.magic[random.randrange(0, len(self.magic))]
current_mp = self.get_mp()
if spell.cost > current_mp or (spell.type == "white" and self.hp > self.max_hp // 3):
return self.choose_enemy_spell()
self.reduce_mp(spell.cost)
return spell |
"""Computation class for Concord
.. module:: Computation
:synopsis: Computation class and helper function
"""
import sys
import os
import types
import threading
from thrift import Thrift
from thrift.transport import (
TSocket, TTransport
)
from thrift.server import TServer
from thrift.protocol import TBinaryProtocol
from concord.internal.thrift import (
ComputationService,
BoltProxyService
)
from concord.internal.thrift.ttypes import (
Record,
ComputationTx,
ComputationMetadata,
Endpoint,
StreamMetadata,
StreamGrouping
)
from concord.internal.thrift.constants import (
kConcordEnvKeyClientListenAddr,
kConcordEnvKeyClientProxyAddr
)
import logging
import logging.handlers
logging_format_string='%(levelname)s:%(asctime)s %(filename)s:%(lineno)d] %(message)s'
# Basic Config is needed for thrift and default loggers
logging.basicConfig(format=logging_format_string)
concord_formatter = logging.Formatter(logging_format_string)
concord_logging_handle = logging.handlers.TimedRotatingFileHandler("concord_py.log")
concord_logging_handle.setFormatter(concord_formatter)
for h in logging.getLogger().handlers: h.setFormatter(concord_formatter)
ccord_logger = logging.getLogger('concord.computation')
ccord_logger.setLevel(logging.DEBUG)
ccord_logger.propagate = False
ccord_logger.addHandler(concord_logging_handle)
class Metadata:
"""High-level wrapper for `ComputationMetadata`
"""
def __init__(self, name=None, istreams=[], ostreams=[]):
"""Create a new Metadata object
:param name: The globally unique identifier of the computation.
:type name: str.
:param istreams: The list of streams to subscribe this computation to.
If `istreams` is a string, use the default grouping (shuffle). If it
is a pair, use the grouping passed as the second item.
:type istreams: list(str), (str, StreamGrouping).
:param ostreams: The list of streams this computation may produce on.
:type ostreams: list(str).
"""
self.name = name
self.istreams = istreams
self.ostreams = ostreams
if len(self.istreams) == 0 and len(self.ostreams) == 0:
raise Exception("Both input and output streams are empty")
def new_computation_context(tcp_proxy):
"""Creates a context object wrapping a transaction.
:returns: (ComputationContext, ComputationTx)
"""
transaction = ComputationTx()
transaction.records = []
transaction.timers = {}
class ComputationContext:
"""Wrapper class exposing a convenient API for computation to proxy
interactions.
"""
def produce_record(self, stream, key, data):
"""Produce a record to be emitted down stream.
:param stream: The stream to emit the record on.
:type stream: str.
:param key: The key to route this message by (only used when
using GROUP_BY routing).
:type key: str.
:param data: The binary blob to emit down stream.
:type data: str.
"""
r = Record()
r.key = key
r.data = data
r.userStream = stream
transaction.records.append(r)
def set_timer(self, key, time):
"""Set a timer callback for some point in the future.
:name key: The name of the timer.
:type key: str.
:name time: The time (in ms) at which the callback should trigger.
:type time: int.
"""
transaction.timers[key] = time
def set_state(self, key, value):
tcp_proxy.setState(key, value)
def get_state(self, key):
return tcp_proxy.getState(key)
return (ComputationContext(), transaction)
class Computation:
"""Abstract class for users to extend when making computations.
"""
def init(ctx):
"""Called when the framework has registered the computation
successfully. Gives users a first opportunity to schedule
timer callbacks and produce records.
:param ctx: The computation context object provided by the system.
:type ctx: ComputationContext.
"""
pass
def destroy():
"""Called right before the concord proxy is ready to shutdown.
Gives users an opportunity to perform some cleanup before the
process is killed."""
pass
def process_record(ctx, record):
"""Process an incoming record on one of the computation's `istreams`.
:param ctx: The computation context object provided by the system.
:type ctx: ComputationContext.
:param record: The `Record` to emit downstream.
:type record: Record.
"""
raise Exception('process_record not implemented')
def process_timer(ctx, key, time):
"""Process a timer callback previously set via `set_timer`.
:param ctx: The computation context object provided by the system.
:type ctx: ComputationContext.
:param key: The name of the timer.
:type key: str.
:param time: The time (in ms) for which the callback was scheduled.
:type time: int.
"""
raise Exception('process_timer not implemented')
def metadata():
"""The metadata defining this computation.
:returns: Metadata.
"""
raise Exception('metadata not implemented')
class ComputationServiceWrapper(ComputationService.Iface):
def __init__(self, handler):
self.handler = handler
self.proxy_client = None
def init(self):
ctx, transaction = new_computation_context(self.proxy())
try:
self.handler.init(ctx)
except Exception as e:
ccord_logger.exception(e)
ccord_logger.critical("Exception in client init")
sys.exit(1)
return transaction
def destroy(self):
try:
self.handler.destroy()
except Exception as e:
ccord_logger.exception(e)
ccord_logger.critical("Exception in client destroy")
sys.exit(1)
def boltProcessRecords(self, records):
def txfn(record):
ctx, transaction = new_computation_context(self.proxy())
try:
self.handler.process_record(ctx, record)
except Exception as e:
ccord_logger.exception(e)
ccord_logger.critical("Exception in process_record")
sys.exit(1)
return transaction
return map(txfn, records)
def boltProcessTimer(self, key, time):
ctx, transaction = new_computation_context(self.proxy())
try:
self.handler.process_timer(ctx, key, time)
except Exception as e:
ccord_logger.exception(e)
ccord_logger.critical("Exception in process_timer")
sys.exit(1)
return transaction
def boltMetadata(self):
def enrich_stream(stream):
defaultGrouping = StreamGrouping.SHUFFLE
sm = StreamMetadata()
if isinstance(stream, types.TupleType):
stream_name, grouping = stream
sm.name = stream_name
sm.grouping = grouping
else:
sm.name = stream
sm.grouping = defaultGrouping
return sm
try:
ccord_logger.info("Getting client metadata")
md = self.handler.metadata()
except Exception as e:
ccord_logger.exception(e)
ccord_logger.critical("Exception in metadata")
sys.exit(1)
metadata = ComputationMetadata()
metadata.name = md.name
metadata.istreams = list(map(enrich_stream, md.istreams))
metadata.ostreams = md.ostreams
ccord_logger.info("Got metadata: %s", metadata)
return metadata
def proxy(self):
if not self.proxy_client:
self.proxy_client = self.new_proxy_client()
return self.proxy_client
def new_proxy_client(self):
host, port = self.proxy_address
socket = TSocket.TSocket(host, port)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = BoltProxyService.Client(protocol)
transport.open()
return client
def set_proxy_address(self, host, port):
md = self.boltMetadata()
proxy_endpoint = Endpoint()
proxy_endpoint.ip = host
proxy_endpoint.port = port
md.proxyEndpoint = proxy_endpoint
self.proxy_address = (host, port)
proxy = self.proxy()
proxy.registerWithScheduler(md)
def serve_computation(handler):
"""Helper function. Parses environment variables and starts a thrift service
wrapping the user-defined computation.
:param handler: The user computation.
:type handler: Computation.
"""
ccord_logger.info("About to serve computation and service")
if not 'concord_logger' in dir(handler):
handler.concord_logger = ccord_logger
def address_str(address):
host, port = address.split(':')
return (host, int(port))
comp = ComputationServiceWrapper(handler)
_, listen_port = address_str(
os.environ[kConcordEnvKeyClientListenAddr])
proxy_host, proxy_port = address_str(
os.environ[kConcordEnvKeyClientProxyAddr])
processor = ComputationService.Processor(comp)
transport = TSocket.TServerSocket(host="127.0.0.1", port=listen_port)
tfactory = TTransport.TFramedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
try:
ccord_logger.info("Starting python service port: %d", listen_port)
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
ccord_logger.info("registering with framework at: %s:%d",
proxy_host, proxy_port)
comp.set_proxy_address(proxy_host, proxy_port)
server.serve()
concord_logger.error("Exciting service")
except Exception as exception:
ccord_logger.exception(exception)
ccord_logger.critical("Exception in python client")
sys.exit(1)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 11:50:05 2018
@author: kennedy
"""
__author__ = "kennedy Czar"
__email__ = "kennedyczar@gmail.com"
__version__ = '1.0'
import os
import time
from datetime import datetime
from os import chdir
from selenium import webdriver
from TICKERS import TICKER
class Data_collector(object):
def __init__(self, path):
'''
:Argument:
:path:
Enter the woring directory os state
the location you would love to create the dataset
:example:
create_folder('D:\\YourDirectory')
Creates the DATASET FOLDER @D:\\MyDirectory\\DATASET
:Complexity for file Creation:
The Function runs in:
Time Complexity: O(N*logN)
Space Complexity: O(1)
'''
self.path = path
try:
if os.path.exists(self.path):
try:
self.FOLDERS = ['\\DATASET',
'\\TICKERS',
'\\PREDICTED',
'\\MODEL']
FOLDER_COUNT = 0
for folders in self.FOLDERS:
'''If folder is not created or created but deleted..Recreate/Create the folder.
Check for all folders in the FOLDERS list'''
if not os.path.exists(self.path + self.FOLDERS[FOLDER_COUNT]):
os.makedirs(path + self.FOLDERS[FOLDER_COUNT])
print('====== 100% Completed ==== : {}'.format(self.path + self.FOLDERS[FOLDER_COUNT]))
FOLDER_COUNT += 1
elif os.path.exists(self.path + self.FOLDERS[FOLDER_COUNT]):
'''OR check if the file is already existing using a boolean..if true return'''
print('File Already Existing : {}'.format(self.path + self.FOLDERS[FOLDER_COUNT]))
FOLDER_COUNT += 1
except OSError as e:
'''raise OSError('File Already Existing {}'.format(e))'''
print('File Already existing: {}'.format(e))
elif not os.path.exists(self.path):
raise OSError('File path: {} does not exist\n\t\tPlease check the path again'.format(self.path))
else:
print('File Already Existing')
except Exception as e:
raise(e)
finally:
print('Process completed...Exiting')
def STOCK_EXTRACTOR(self, url, start, end):
'''
:Functionality:
Collects stock data using the yahoo API
Collects all excel data and stores in DATASET FOLDER
append .csv to all files downloaded
self.END = datetime.today().date()
'''
import fix_yahoo_finance as yahoo
import pandas as pd
from datetime import datetime
self.url = url
'''Set the start date'''
self.START = start
self.END = end
# start_date = pd.Timestamp(2010, 12, 29)
# end_date =
'''Create a list of stocks to download'''
self.TICKERS = []
self.SYMBOLS = TICKER(self.url).parse()
for ii in self.SYMBOLS['IB_Symbol'].values:
self.TICKERS.append(ii + '{}'.format('.MX'))
'''write the stock data to specific format by
appending the right extension'''
STOCK_TICKER_ = pd.DataFrame(self.TICKERS)
self.FORMAT = ['.csv', '.xlsx', '.json']
for extension in self.FORMAT:
STOCK_TICKER_.to_csv('../TICKERS/STOCK_TICKER{}'.format(extension))
print('======= Begin downloading stock dataset ======')
try:
self.unavailable_ticks = []
for self.TICK_SYMBOLS in self.TICKERS:
'''just in case your connection breaks,
we'd like to save our progress! by appending
downloaded dataset to DATASET FOLDER'''
if not os.path.exists('../DATASET/{}.csv'.format(self.TICK_SYMBOLS)):
try:
df = yahoo.download(self.TICK_SYMBOLS, start = self.START, end = self.END)
df.reset_index(inplace = True)
df.set_index("Date", inplace = True)
#check size of file before saving
import sys
if sys.getsizeof(df) <= 1024:
pass
else:
df.to_csv('../DATASET/{}.csv'.format(self.TICK_SYMBOLS))
except ValueError:
print('{} is unavailable'.format(self.TICK_SYMBOLS))
self.unavailable_ticks.append(self.TICK_SYMBOLS)
pass
else:
#this section redownloads the file even though it
#is already existing..
try:
df = yahoo.download(self.TICK_SYMBOLS, start = self.START, end = datetime.now())
df.reset_index(inplace = True)
df.set_index("Date", inplace = True)
#check size of file before saving
import sys
if sys.getsizeof(df) <= 1024:
pass
else:
df.to_csv('../DATASET/{}.csv'.format(self.TICK_SYMBOLS))
except ValueError:
print('{} is unavaible'.format(self.TICK_SYMBOLS))
self.unavailable_ticks.append(self.TICK_SYMBOLS)
pass
# print('File Already existing: {}'.format(self.TICK_SYMBOLS))
except OSError as e:
raise OSError('Something wrong with destination path: {}'.format(e))
finally:
print('API Download Completed..!')
print('*'*40)
print('External Download Begin..!')
print('Unavailable tickers are \n{}\n'.format(self.unavailable_ticks))
print('*'*60)
print('Downloading Unavailable data from YAHOO..')
print('*'*40)
print('External Download Completed..!')
print('*'*40)
print('Process Completed..!')
print('A total of {} unavailable tickers'.format(len(self.unavailable_ticks)))
def YAHOO_(path, start, end, stock_):
import pandas as pd
import numpy as np
date = [start, end]
date_epoch = pd.DatetimeIndex(date)
date_epoch = date_epoch.astype(np.int64) // 10**9
chrom_options_ = webdriver.ChromeOptions()
prefer_ = {'download.default_directory': path,
'profile.default_content_settings.popups': 0,
'directory_upgrade': True}
chrom_options_.add_experimental_option('prefs',prefer_)
for ii in stock_:
try:
yahoo_page_ = 'https://finance.yahoo.com/quote/{}/history?period1={}&period2={}&interval=1d&filter=history&frequency=1d'.format(ii, date_epoch[0], date_epoch[1])
driver = webdriver.Chrome("C:/chromedriver.exe", chrome_options = chrom_options_)
driver.minimize_window()
driver.get(yahoo_page_)
time.sleep(2)
driver.find_element_by_css_selector('.btn.primary').click()
time.sleep(2)
driver.find_element_by_css_selector('.Fl\(end\).Mt\(3px\).Cur\(p\)').click()
time.sleep(10)
driver.close()
except:
pass
if __name__ == '__main__':
#specify the start and end dates for stocks
import pandas as pd
start = pd.Timestamp(2010, 1, 1)
end = datetime.today().date()
'''Define a path on your drive where this project folder is located'''
path = 'D:\\BITBUCKET_PROJECTS\\Forecasting 1.0'
base_url = "https://www.interactivebrokers.com/en/index.php?f=2222&exch=mexi&showcategories=STK&p=&cc=&limit=100"
Data_collector(path).STOCK_EXTRACTOR(base_url, start, end)
|
__author__ = 'dowling'
from mongokit import Document
from model.db import connection
class User(Document):
structure = {
'username': unicode,
'device': unicode
}
use_autorefs = True
use_dot_notation = True
connection.register([User])
|
# converts ok cupid profiles into a csv file with the features we want to include, empty neighbors and linear regression file
import pandas as pd
all = pd.read_csv("okcupid_profiles.csv") # the entire dataset
features_to_include = {'name', 'neighbors', 'age', 'status', 'sex', 'orientation', 'body_type', 'diet', 'drinks', 'drugs', 'education',
'ethnicity', 'height', 'income', 'job', 'offspring', 'pets', 'religion', 'smokes', 'speaks', 'linear classifier'}
features_to_not_include = []
for feature in all:
if feature not in features_to_include:
features_to_not_include.append(feature)
# delete unwanted features
for feature in features_to_not_include:
del all[feature]
#create the list of names
names_file = pd.read_csv("60knames.csv")
names = [names_file.loc[i][0] + ' ' + names_file.loc[i][1] for i in range(names_file.shape[0])]
#insert the names as the first column in the dataframe
all.insert(0, 'neighbors', [{} for i in range(all.shape[0])])
all.insert(0, 'name', names[:all.shape[0]])
all['linear classifier'] = ['' for i in range(all.shape[0])]
#check that features we want are left
print(set(all.keys()) == features_to_include, '\n')
print(all.head())
print(all.shape)
#create the population output file
#first 2 elements are name and neighbors, rest are the trainable features
all.to_csv("population.csv", index = False)
|
# -*- coding: utf-8 -*-
# LOC Map designer
import pygame
from config import ConfigManager
import maps
cfg = ConfigManager()
gridSize = dict(x=10,y=100,size=80)
pygame.init()
scrinfo = pygame.display.Info()
x = scrinfo.current_w - 100
y = scrinfo.current_h - 100
display = pygame.display.set_mode((x,y))
pygame.display.set_caption(u'LOC Designer')
pygame.display.update()
def designerLoop():
endDesigner = False
palette = []
while not endDesigner:
for event in pygame.event.get():
if event.type == pygame.QUIT:
endDesigner = True
elif event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
print (pos)
pygame.quit()
quit()
if __name__ == '__main__':
designerLoop() |
__version__ = "2.6.0"
from pyquil.quil import Program
from pyquil.api import list_quantum_computers, get_qc
|
"""
Alexander Moriarty
Google Code Jam 2016
Problem A. Counting Sheep
"""
import sys
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def count200(N):
list_of_ints = list()
n = None
for n_ in xrange(1,200):
list_of_ints += [int(i) for i in str(n_*N)]
list_of_ints = f7(list_of_ints)
if len(list_of_ints) == 10:
n = n_
break
if len(list_of_ints) != 10:
return "INSOMNIA"
else:
return str(n*N)
if __name__ == "__main__":
f = sys.stdin
if len(sys.argv) >= 2:
fn = sys.argv[1]
if fn != '-':
f = open(fn)
T = int(f.readline())
for _t in xrange(T):
N = int(f.readline())
n = count200(N)
print "Case #%d: %s" % (_t+1, n)
|
def solution(numbers, hand):
left_hand = 10
right_hand = 12
answer = ''
left_flag = False
right_flag = False
for i in range(len(numbers)):
if numbers[i] == 0:
num = 11
else:
num = numbers[i]
if numbers[i] == 1 or numbers[i] == 4 or numbers[i] == 7:
answer += 'L'
left_hand = numbers[i]
left_flag = False
elif numbers[i] == 3 or numbers[i] == 6 or numbers[i] == 9:
answer += 'R'
right_hand = numbers[i]
right_flag = False
else:
target_num = int(num/3.1)
target_left = int(left_hand/3.1)
target_right = int(right_hand/3.1)
left_dist = abs(target_num - target_left)
right_dist = abs(target_num - target_right)
if left_flag == True:
left_dist -= 1
if right_flag == True:
right_dist -= 1
if left_dist > right_dist:
answer += 'R'
right_hand = num
right_flag = True
elif right_dist > left_dist:
answer += 'L'
left_hand = num
left_flag = True
elif right_dist == left_dist:
if hand == 'left':
answer += 'L'
left_hand = num
left_flag = True
else:
answer += 'R'
right_hand = num
right_flag = True
return answer |
import threading
import keyboard
import time
try:
from MainAssistant import main
except:
import main
import subprocess as s
print("started")
hotkey = "shift + alt"
Killhotkey = "ctrl + alt"
def killProcess(pid):
s.Popen('taskkill /F /PID {0}'.format(pid), shell=True)
def kill():
while True:
if keyboard.is_pressed(Killhotkey):
print("Killed")
killProcess(main.PID)
time.sleep(.5)
# Remember that the order in which the hotkey is set up is the order you
# need to press the keys.
x = threading.Thread(target = kill)
x.start()
while True:
# if main.kill:
# sys.exit("Kill Executed")
# if main.paused:
# continue
if keyboard.is_pressed(hotkey):
print("Hotkey is being pressed")
main.Initialization()
time.sleep(5)
# print('not pressed')
time.sleep(1)
|
# Bonnet's thm: d/dm E[f(z)] = E[d/dz f(z)] for z ~ N(m,v)
#Price's thm: d/dv E[f(z)] = 0.5 E[d^2/dz^2 f(z)] for z ~ N(m,v)
# Note that we are taking derivatives wrt the parameters of the sampling distribution
# We rely on the fact that TFP Gaussian samples are reparameterizable
import numpy as np
import jax
import jax.numpy as jnp
import tensorflow_probability as tfp
tfp = tfp.substrates.jax
tfd = tfp.distributions
key = jax.random.PRNGKey(0)
nsamples = 10000
def f(z):
return jnp.square(z) # arbitrary fn
def expect_f(params):
m, v = params
dist = tfd.Normal(m, jnp.sqrt(v))
zs = dist.sample(nsamples, key)
return jnp.mean(f(zs))
def expect_grad(params):
m, v = params
dist = tfd.Normal(m, jnp.sqrt(v))
zs = dist.sample(nsamples, key)
grads = jax.vmap(jax.grad(f))(zs)
return jnp.mean(grads)
def expect_grad2(params):
m, v = params
dist = tfd.Normal(m, jnp.sqrt(v))
zs = dist.sample(nsamples, key)
#g = jax.grad(f)
#grads = jax.vmap(jax.grad(g))(zs)
grads = jax.vmap(jax.hessian(f))(zs)
return jnp.mean(grads)
params = (1.0, 2.0)
e1 = expect_grad(params)
e2 = expect_grad2(params)
print([e1, 0.5*e2])
grads = jax.grad(expect_f)(params)
print(grads)
assert np.allclose(e1, grads[0], atol=1e-1)
assert np.allclose(0.5 * e2, grads[1], atol=1e-1)
|
from django.shortcuts import render
from .models import Song,Audio
from django.views.decorators.csrf import csrf_exempt
import os
from pathlib import Path
from django.core.files.storage import FileSystemStorage
import ffmpeg_streaming
from ffmpeg_streaming import Formats
from django.http import HttpResponse
import shutil
BASE_DIR = Path(__file__).resolve().parent.parent
def home(request):
context={
'songs':Song.objects.all()
}
return render(request,'videos/list.html',context)
@csrf_exempt
def upload(request):
if request.method=="POST":
uploaded_file = request.FILES['myFile']
name_array=uploaded_file.name.split('.')
uploaded_file.name=name_array[0]
song=Song.objects.create(title=request.POST.get('title'),description=request.POST.get('description'),artist=request.POST.get('artist'),path=f'{uploaded_file.name}')
song.save()
song=Song.objects.all().last()
song_id=song.id
os.mkdir(f'{BASE_DIR}\\media\\{song_id}\\')
fs=FileSystemStorage(BASE_DIR)
fs.save(uploaded_file.name,uploaded_file)
video = ffmpeg_streaming.input(f'{BASE_DIR}\\{uploaded_file.name}')
dash = video.dash(Formats.h264())
dash.auto_generate_representations()
dash.output(f'{BASE_DIR}\\media\\{song_id}\\{uploaded_file.name}.mpd')
os.remove(f'{BASE_DIR}\\{uploaded_file.name}')
return HttpResponse("uploaded successfully")
return render(request,'videos/upload.html')
@csrf_exempt
def delete_video(request):
if request.method=="POST":
song_id=request.POST.get('id')
song=Song.objects.get(id=song_id)
song.delete()
shutil.rmtree(f'{BASE_DIR}\media\{song_id}\\')
return HttpResponse("Deleted Successfully!")
return HttpResponse("Method not allowed")
def player(request,id):
context={
'song':Song.objects.get(id=id),
}
return render(request,'videos/watch.html',context)
def audio_player(request):
context={
'songs':Audio.objects.all()
}
return render(request,'videos/list2.html',context)
@csrf_exempt
def upload_mp3(request):
if request.method=="POST":
uploaded_file = request.FILES['myFile']
audio=Audio.objects.create(title=request.POST.get('title'),artist=request.POST.get('artist'),path=uploaded_file.name)
audio.save()
audio=Audio.objects.all().last()
song_id=audio.id
fs=FileSystemStorage(f'{BASE_DIR}\\media')
fs.save(f'{song_id}.mp3',uploaded_file)
return HttpResponse("Uploaded Successfully!")
return render(request,'videos/upload_audio.html')
def audio_play_play_by_id(request,id):
context={
'song':Audio.objects.get(id=id)
}
return render(request,'videos/listen.html',context)
@csrf_exempt
def delete_audio(request):
if request.method=="POST":
song_id=request.POST.get('id')
audio=Audio.objects.get(id=song_id)
audio.delete()
os.remove(f'{BASE_DIR}\\media\\{song_id}.mp3')
return HttpResponse("Deleted Successfully!")
return HttpResponse("Method not allowed") |
from testing import *
from testing.tests import *
from testing.assertions import *
with all_or_nothing():
with tested_function_name("tree"):
tree = reftest()
tree('.')
tree('testdata')
tree('..')
|
import sqlite3
from employee import Employee
# conn = sqlite3.connect(':memory:')
conn = sqlite3.connect('employee.db')
c = conn.cursor()
# budući baza podataka već postoji ovo nam ne treba
# c.execute("""CREATE TABLE employees (
# first text,
# last text,
# pay integer
# )""")
# načini na koji unosimo podatke u tablicu
# 1.
#c.execute("INSERT INTO employees VALUES ('Jozo', 'Leko', 60000)")
# 2.
#c.execute("INSERT INTO employees VALUES (:first, :last, :pay)", {'first': emp_1.first, 'last': emp_1.last, 'pay': emp_1.pay})
# 3. funkcija
def insert_emp(emp):
with conn:
c.execute("INSERT INTO employees VALUES (:first, :last, :pay)", {'first': emp.first, 'last': emp.last, 'pay': emp.pay})
# načini na koji selektiramo podatke iz tablice
# 1.
#c.execute("SELECT * FROM employees WHERE last='Leko'")
#print(c.fetchone())
# 2.
#c.execute("SELECT * FROM employees WHERE last=?",('Leko'))
# 3.
c.execute("SELECT * FROM employees WHERE last=:last",{'last':'Leko'})
print(c.fetchmany())
# 4. funkcija
def get_emps_by_name(lastname):
c.execute("SELECT * FROM employees WHERE last=:last", {'last': lastname})
return c.fetchall()
def update_pay(emp, pay):
with conn:
c.execute("""UPDATE employees SET pay = :pay
WHERE first = :first AND last = :last""",
{'first': emp.first, 'last': emp.last, 'pay': pay})
def remove_emp(emp):
with conn:
c.execute("DELETE from employees WHERE first = :first AND last = :last",
{'first': emp.first, 'last': emp.last})
emp_1 = Employee('John', 'Doe', 80000)
emp_2 = Employee('Jane', 'Doe', 90000)
emp_3 = Employee('Mirela', 'Doe', 80000)
emp_4 = Employee('Klara', 'Doe', 75000)
emp_5 = Employee('Ema', 'Doe', 85000)
emp_6 = Employee('Ante', 'Leko', 95000)
# insert_emp(emp_1)
# insert_emp(emp_2)
# insert_emp(emp_3)
# insert_emp(emp_4)
# insert_emp(emp_5)
# insert_emp(emp_6)
print(emp_3.first)
emps = get_emps_by_name('Doe')
print(emps)
update_pay(emp_2, 95000)
remove_emp(emp_1)
emps = get_emps_by_name('Doe')
print(emps)
conn.commit()
conn.close()
|
# Exercício Python 045: Crie um programa que faça o computador jogar Jokenpô com você.
# EXTRA: Crie diferentes modos de jogo.
from random import choice as ch
from time import sleep as sl
lis = ['pedra', 'papel', 'tesoura']
def cls():
print('\n' * 5)
def jogo():
while True:
acao = str(input('''Escolha sua ação:
[ 1 ] PEDRA
[ 2 ] PAPEL
[ 3 ] TESOURA
Será que você conseguirá ganhar do RNG? ''').lower().strip())
if acao == 'pedra' or acao == '1':
# pedra >>> tesoura >>> papel >>> pedra
if random == 'pedra':
print('e o computador escolhe... {}!'.format(random))
sl(0.7)
print('EMPATE!!!')
print('\n\nObrigado por ter jogado!!')
sl(1.5)
if md == '1':
exit()
continue
elif random == 'papel':
print('e o computador escolhe... {}!'.format(random))
sl(0.7)
print('OH NOOOO! VOCÊ PERDEU!')
print('\n\nObrigado por ter jogado!!')
sl(1.5)
if md == '1':
exit()
continue
elif random == 'tesoura':
print('e o computador escolhe... {}!'.format(random))
sl(0.7)
print('PARABÉNS!! VOCÊ GANHOU!')
print('\nObrigado por ter jogado!!')
sl(1.5)
if md == '1':
exit()
continue
if acao == 'papel' or acao == '2':
if random == 'pedra':
print('e o computador escolhe... {}!'.format(random))
sl(0.7)
print('PARABÉNS!! VOCÊ GANHOU!')
print('\n\nObrigado por ter jogado!!')
sl(1.5)
if md == '1':
exit()
continue
elif random == 'papel':
print('e o computador escolhe... {}!'.format(random))
sl(0.7)
print('EMPATE!!!')
print('\n\nObrigado por ter jogado!!')
sl(1.5)
if md == '1':
exit()
continue
elif random == 'tesoura':
print('OH NOOOO! VOCÊ PERDEU!')
print('\n\nObrigado por ter jogado!!')
sl(1.5)
if md == '1':
exit()
continue
elif acao == 'tesoura' or acao == '3':
if random == 'pedra':
print('OH NOOOO! VOCÊ PERDEU!')
print('\n\nObrigado por ter jogado!!')
sl(1.5)
if md == '1':
exit()
continue
elif random == 'papel':
print('PARABÉNS!! VOCÊ GANHOU!')
print('\n\nObrigado por ter jogado!!')
sl(1.5)
continue
elif random == 'tesoura':
print('EMPATE!!!')
print('\n\nObrigado por ter jogado!!')
sl(1.5)
if md == '1':
exit()
continue
elif acao == 'sair':
print('Obrigado por ter jogado!!')
sl(1.5)
exit()
while True:
random = ch(lis)
print(random)
print('=' * 7, 'JOGUIN DO JO KEN PO', '=' * 7)
sl(1.3)
cls()
md = str(input('''Escolha o estilo de partida:
[ 1 ] Partida única
[ 2 ] MD3
[ 3 ] MD5
[ 4 ] Infinito ''')) # FAZER AMANHÃ O OTARIO
if md == '1':
jogo()
exit()
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow.keras.layers as layers
__all__ = ['VGG','vgg16', 'vgg19']
class VGG(tf.keras.Model):
def __init__(self, features, classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = tf.keras.Sequential(
layers=[
layers.Flatten(),
layers.Dense(4096, activation='relu'),
layers.Dense(4096, activation='relu'),
layers.Dense(classes)],
name='classifier')
def call(self, x):
x = self.features(x)
x = self.classifier(x)
return x
def vgg16(pretrained):
features = tf.keras.Sequential(
layers=[
# Block 1
layers.Conv2D(64, (3, 3), activation='relu', padding="same", name='block1_conv1'),
layers.Conv2D(64, (3, 3), activation='relu', padding="same", name='block1_conv2'),
layers.MaxPooling2D((2, 2), (2, 2), name='block1_pool'),
# Block 2
layers.Conv2D(128, (3, 3), activation='relu', padding="same", name='block2_conv1'),
layers.Conv2D(128, (3, 3), activation='relu', padding="same", name='block2_conv2'),
layers.MaxPooling2D((2, 2), (2, 2), name='block2_pool'),
# Block 3
layers.Conv2D(256, (3, 3), activation='relu', padding="same", name='block3_conv1'),
layers.Conv2D(256, (3, 3), activation='relu', padding="same", name='block3_conv2'),
layers.Conv2D(256, (3, 3), activation='relu', padding="same", name='block3_conv3'),
layers.MaxPooling2D((2, 2), (2, 2), name='block3_pool'),
# Block 4
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block4_conv1'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block4_conv2'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block4_conv3'),
layers.MaxPooling2D((2, 2), (2, 2), name='block4_pool'),
# Block 5
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block5_conv1'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block5_conv2'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block5_conv3'),
layers.MaxPooling2D((2, 2), (2, 2), name='block5_pool')],
name='vgg16_features')
model = VGG(features)
if pretrained:
input = tf.random.normal([1, 224, 224, 3])
model(input)
model.features.summary()
model.classifier.summary()
base_model = tf.keras.applications.VGG16(include_top=False,
weights='imagenet') # 加载预训练模型
weights = base_model.get_weights()
model.features.set_weights(weights)
del base_model, weights
return model
def vgg19(pretrained):
features = tf.keras.Sequential(
layers=[
# Block 1
layers.Conv2D(64, (3, 3), activation='relu', padding="same", name='block1_conv1'),
layers.Conv2D(64, (3, 3), activation='relu', padding="same", name='block1_conv2'),
layers.MaxPooling2D((2, 2), padding="valid", name='block1_pool'),
# Block 2
layers.Conv2D(128, (3, 3), activation='relu', padding="same", name='block2_conv1'),
layers.Conv2D(128, (3, 3), activation='relu', padding="same", name='block2_conv2'),
layers.MaxPooling2D((2, 2), padding="valid", name='block2_pool'),
# Block 3
layers.Conv2D(256, (3, 3), activation='relu', padding="same", name='block3_conv1'),
layers.Conv2D(256, (3, 3), activation='relu', padding="same", name='block3_conv2'),
layers.Conv2D(256, (3, 3), activation='relu', padding="same", name='block3_conv3'),
layers.Conv2D(256, (3, 3), activation='relu', padding="same", name='block3_conv4'),
layers.MaxPooling2D((2, 2), padding="valid", name='block3_pool'),
# Block 4
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block4_conv1'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block4_conv2'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block4_conv3'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block4_conv4'),
layers.MaxPooling2D((2, 2), padding="valid", name='block4_pool'),
# Block 5
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block5_conv1'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block5_conv2'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block5_conv3'),
layers.Conv2D(512, (3, 3), activation='relu', padding="same", name='block5_conv4'),
layers.MaxPooling2D((2, 2), padding="valid", name='block5_pool')],
name='vgg19_features')
model = VGG(features)
if pretrained:
input = tf.random.normal([1, 224, 224, 3])
model(input)
model.features.summary()
model.classifier.summary()
base_model = tf.keras.applications.VGG19(include_top=True,
weights='imagenet') # 加载预训练模型
weights = base_model.get_weights()
model.set_weights(weights)
del base_model, weights
return model
|
"""Subclass of frmMorph, which is generated by wxFormBuilder."""
import wx
import TextmorphGui
from mylib import word_count
# Implementing frmMorph
class frmMorph(TextmorphGui.frmMorph):
def __init__(self, parent):
TextmorphGui.frmMorph.__init__(self, parent)
self.process_list = []
def tell_word_count(self):
text_message = "%s (%s) words" % (word_count(self.txt_input.Value), word_count(self.txt_processed.Value))
self.status_bar.SetStatusText(text_message, i=0)
def tell_char_count(self):
text_message = "%s (%s) characters" % (len(self.txt_input.Value), len(self.txt_processed.Value))
self.status_bar.SetStatusText(text_message, i=1)
def process_text(self):
buffer = str(self.txt_input.Value)
for func in self.process_list:
if func == str.replace:
buffer = func(buffer, str(self.txt_search_text.Value), str(self.txt_replace_text.Value))
else:
buffer = func(buffer)
self.txt_processed.Value = buffer
self.tell_word_count()
self.tell_char_count()
def on_search_text(self, event):
self.process_text()
def on_replace_text(self, event):
self.process_text()
def on_input_text_changed(self, event):
self.process_text()
def on_toggle_search_replace(self, event):
if self.chk_search_replace.Value:
self.txt_search_text.Enable()
self.txt_replace_text.Enable()
self.process_list.append(str.replace)
else:
self.txt_search_text.Disable()
self.txt_replace_text.Disable()
if str.replace in self.process_list:
self.process_list.remove(str.replace)
def clear_lowercase(self):
if self.chk_modify_text_lower.Value:
self.chk_modify_text_lower.Value = False
if str.lower in self.process_list:
self.process_list.remove(str.lower)
def clear_uppercase(self):
if self.chk_modify_text_upper.Value:
self.chk_modify_text_upper.Value = False
if str.upper in self.process_list:
self.process_list.remove(str.upper)
def clear_titlecase(self):
if self.chk_modify_text_proper.Value:
self.chk_modify_text_proper.Value = False
if str.title in self.process_list:
self.process_list.remove(str.title)
def on_toggle_lowercase(self, event):
self.clear_titlecase()
self.clear_uppercase()
if self.chk_modify_text_lower.Value:
self.process_list.append(str.lower)
else:
self.clear_lowercase()
self.process_text()
def on_toggle_titlecase(self, event):
self.clear_lowercase()
self.clear_uppercase()
if self.chk_modify_text_proper.Value:
self.process_list.append(str.title)
else:
self.clear_titlecase()
self.process_text()
def on_toggle_uppercase(self, event):
self.clear_lowercase()
self.clear_titlecase()
if self.chk_modify_text_upper.Value:
self.process_list.append(str.upper)
else:
self.clear_uppercase()
self.process_text()
def on_close(self, event):
self.Destroy()
app = wx.App(redirect=False)
# app = wx.App()
frame = frmMorph(None)
frame.Show(True)
app.MainLoop() |
class Solution(object):
def medianSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[float]
"""
if not nums:
return []
res = list()
windows = list()
for i in range(k):
pos = self.binary_search(windows, i, nums[i])
windows.insert(pos, nums[i])
res.append((windows[k >> 1] + windows[(k - 1) >> 1]) / 2)
for idx in range(k, len(nums)):
pos = self.binary_search(windows, k, nums[idx - k])
windows.pop(pos)
pos = self.binary_search(windows, k - 1, nums[idx])
windows.insert(pos, nums[idx])
res.append((windows[k >> 1] + windows[(k - 1) >> 1]) / 2)
return res
@staticmethod
def binary_search(nums, size, target):
l = 0
r = size - 1
while l <= r:
m = l + ((r - l) >> 1)
if nums[m] > target:
r = m - 1
elif nums[m] < target:
l = m + 1
else:
return m
return l
class HashHeap:
def __init__(self, desc=False):
self.hash = dict()
self.heap = [-1] # 根节点保存在下标为1的位置
self.desc = desc
self.size = 0
def push(self, item):
self.heap.append(item)
self.size += 1
self.hash[item] = self.size
self._sift_up(self.size)
def pop(self):
item = self.heap[1]
self.remove(item)
return item
def top(self):
return self.heap[1]
def remove(self, item):
# in case of heap is empty or bad remove request
if item not in self.hash:
return
index = self.hash[item]
self._swap(index, self.size)
del self.hash[item]
self.heap.pop()
self.size -= 1
# in case of the removed item is the last item
if index < self.size:
self._sift_up(index)
self._sift_down(index)
def _smaller(self, left, right):
return right < left if self.desc else left < right
def _sift_up(self, index):
while index > 1:
parent = index // 2
if self._smaller(self.heap[parent], self.heap[index]):
break
self._swap(parent, index)
index = parent
def _sift_down(self, index):
while index * 2 <= self.size:
child = index * 2
if child != self.size and self._smaller(self.heap[child + 1], self.heap[child]):
child = child + 1
if self._smaller(self.heap[child], self.heap[index]):
self._swap(index, child)
else:
break
index = child
def _swap(self, i, j):
elem1 = self.heap[i]
elem2 = self.heap[j]
self.heap[i] = elem2
self.heap[j] = elem1
self.hash[elem1] = j
self.hash[elem2] = i
class Solution1:
def __init__(self):
self.max_heap = HashHeap(desc=True)
self.min_heap = HashHeap()
self.window_size_odd = True
def medianSlidingWindow(self, nums, k):
if not nums or len(nums) < k:
return []
self.window_size_odd = (k % 2 == 1)
for i in range(0, k - 1):
self.add((nums[i], i))
medians = []
for i in range(k - 1, len(nums)):
self.add((nums[i], i))
medians.append(self.median)
self.remove((nums[i - k + 1], i - k + 1))
return medians
def add(self, item):
if self.max_heap.size > self.min_heap.size:
self.min_heap.push(item)
else:
self.max_heap.push(item)
if self.max_heap.size == 0 or self.min_heap.size == 0:
return
if self.max_heap.top() > self.min_heap.top():
item1 = self.min_heap.pop()
item2 = self.max_heap.pop()
self.max_heap.push(item1)
self.min_heap.push(item2)
def remove(self, item):
if item > self.max_heap.top():
self.min_heap.remove(item)
else:
self.max_heap.remove(item)
@property
def median(self):
if self.window_size_odd:
return self.max_heap.top()[0] * 1.0
else:
return (self.max_heap.top()[0] + self.min_heap.top()[0]) / 2
if __name__ == '__main__':
s = Solution()
assert s.medianSlidingWindow([1, 3, -1, -3, 5, 3, 6, 7], 3) == [1.0, -1.0, -1.0, 3.0, 5.0, 6.0]
s = Solution()
assert s.medianSlidingWindow([1, 4, 2, 3], 4) == [2.5]
s = Solution()
assert s.medianSlidingWindow([1], 1) == [1.0]
s = Solution()
assert s.medianSlidingWindow([5, 5, 8, 1, 4, 7, 1, 3, 8, 4], 8) == [4.5, 4.5, 4.0]
|
import sys
import re
args = sys.argv
if len (args) < 2:
print ("Usage: Python task1.py [filename]")
exit(0)
elif len (args) > 2:
print("Usage: Python task1.py [filename]")
exit(0)
fd = open(args[1], 'r')
contains = fd.read()
fd.close
regulars = re.findall(r"\d{0,}\n", contains)
if len(regulars) == 0:
print("Error: no numbers")
exit(0)
numbers = [int(num) for num in regulars]
#нормальное человеческое решение
#import numpy as np
#fifty = np.mean(numbers)
#ninety = np.percentile(numbers, 90)
#print (sum([nbr for nbr in numbers if ninety > nbr > fifty]))
# решение с сортировкой и поиском перцентиля вручную
def quicksort(array):
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[0]
for x in array:
if x < pivot:
less.append(x)
elif x == pivot:
equal.append(x)
elif x > pivot:
greater.append(x)
return quicksort(less) + equal + quicksort(greater)
else:
return array
import math
def percentile(array, percent):
return quicksort(array)[int(math.ceil((len(array) * percent) / 100)) - 1]
def average(array):
return (sum(array) / len(array))
mean = average(numbers)
ninety = percentile(numbers, 90)
print (sum([nbr for nbr in numbers if ninety > nbr > mean])) |
from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
import bcrypt
from .models import User, Message, Comment
def index(request):
return render(request, 'login.html')
def registerUser(request):
errors = User.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
password = request.POST['password']
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
print(pw_hash)
user = User.objects.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password=pw_hash)
request.session['userid'] = user.id
return redirect('/myWall')
def validateUser(request):
errors = User.objects.user_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
user = User.objects.filter(email=request.POST['email'])
if user:
logged_user = user[0]
if bcrypt.checkpw(request.POST['password'].encode(), logged_user.password.encode()):
request.session['userid'] = logged_user.id
return redirect('/myWall')
return redirect('/')
def myWall(request):
context = {
'userInfo': User.objects.get(id=request.session['userid']),
'allPosts': Message.objects.all(),
}
return render(request, 'index.html', context)
def newPost(request):
errors = Message.objects.message_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/myWall')
else:
Message.objects.create(user_id=User.objects.get(id=request.session['userid']) ,post=request.POST['newMessage'])
return redirect('/myWall')
def delete_session(request):
try:
del request.session['userid']
except KeyError:
print("Can't Delete from session")
return redirect('/') |
import json
import socket
from confluent_kafka import Producer
class WebScrapingProducer:
def __init__(self):
conf = {'bootstrap.servers': "localhost:9092,localhost:9092",
'client.id': socket.gethostname()}
self.__producer = Producer(conf)
def criar_mensagem_producer(self, topic, dados_empresa):
dados_empresa = json.dumps(dados_empresa).encode('utf-8')
self.__producer.produce(topic=topic,
key="key",
value=dados_empresa,
callback=WebScrapingProducer.__retorno_resultado_mensagem_producer)
# Wait up to 1 second for events. Callbacks will be invoked during
# this method call if the message is acknowledged.
self.__producer.poll(1)
@staticmethod
def __retorno_resultado_mensagem_producer(err, dados_empresa):
if err is not None:
print("Failed to deliver message: %s: %s" % (str(dados_empresa), str(err)))
else:
print("Message produced: %s" % (str(dados_empresa)))
|
import os
import sys
from configparser import ConfigParser, ExtendedInterpolation
def base_dir():
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_setting(section, setting, func=str):
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read(os.path.join(base_dir(), 'config', 'settings.ini'))
config.set('Common', 'base', base_dir())
return func(config.get(section, setting))
|
prizes = [20, 30,40, 1000]
double_prizes = []
for prize in prizes:
double_prizes.append(prize // 2)
print(double_prizes)
# comprehension method
double_prizes = [prize*2 for prize in prizes]
print(double_prizes)
# squaring numbers
nums = [1,2,3,4,5,6,7,8,7]
squared_nums = []
for num in nums:
if (num ** 2) %2 == 0:
squared_nums.append(num ** 2)
print(squared_nums)
# comprehension method
nums = [1,2,3,4,5,6,7,8]
squared_nums = [num **2 for num in nums if(num **2)% 2 ==0 ]
print(squared_nums)
|
import re
import os
import datetime
from .andhra import AndhraArchive
from ..utils import utils
class Maharashtra(AndhraArchive):
def __init__(self, name, storage):
AndhraArchive.__init__(self, name, storage)
self.baseurl = 'https://egazzete.mahaonline.gov.in/Forms/GazetteSearch.aspx'
self.hostname = 'egazzete.mahaonline.gov.in'
self.search_endp = 'GazetteSearch.aspx'
self.result_table = 'CPH_GridView2'
self.start_date = datetime.datetime(2010, 1, 1)
def get_post_data(self, tags, dateobj):
datestr = utils.dateobj_to_str(dateobj, '/')
postdata = []
for tag in tags:
name = None
value = None
if tag.name == 'input':
name = tag.get('name')
value = tag.get('value')
t = tag.get('type')
if t == 'image' or name == 'ctl00$CPH$btnReset':
continue
if name == 'ctl00$CPH$txtToDate' or \
name == 'ctl00$CPH$txtfromDate':
value = datestr
elif name == 'ctl00$CPH$btnSearch':
value = 'Search'
elif tag.name == 'select':
name = tag.get('name')
if name == 'ctl00$CPH$ddldivision':
value = '-----Select----'
elif name == 'ctl00$CPH$ddlSection':
value = '-----Select-----'
if name:
if value == None:
value = ''
postdata.append((name, value))
return postdata
def get_column_order(self, tr):
order = []
for th in tr.find_all('th'):
txt = utils.get_tag_contents(th)
if txt and re.search('Division\s+Name', txt):
order.append('division')
elif txt and re.search('Subject', txt):
order.append('subject')
elif txt and re.search('View\s+Gazette', txt):
order.append('download')
elif txt and re.search('Section\s+Name', txt):
order.append('partnum')
elif txt and re.search('Gazette\s+Type', txt):
order.append('gztype')
else:
order.append('')
return order
def process_result_row(self, tr, metainfos, dateobj, order):
metainfo = utils.MetaInfo()
metainfo.set_date(dateobj)
i = 0
for td in tr.find_all('td'):
if len(order) > i:
col = order[i]
txt = utils.get_tag_contents(td)
if txt:
txt = txt.strip()
else:
continue
if col == 'gztype':
metainfo.set_gztype(txt)
elif col == 'download':
link = td.find('a')
if link:
href = link.get('href')
if href:
metainfo['download'] = href
elif col in ['partnum', 'division', 'subject']:
metainfo[col] = txt
i += 1
if 'download' not in metainfo:
self.logger.warning('No download link, ignoring: %s', tr)
else:
metainfos.append(metainfo)
def download_metainfos(self, relpath, metainfos, search_url, \
postdata, cookiejar):
dls = []
for metainfo in metainfos:
if 'download' not in metainfo or 'gztype' not in metainfo:
self.logger.warning('Required fields not present. Ignoring- %s' % metainfo)
continue
href = metainfo.pop('download')
reobj = re.search('javascript:__doPostBack\(\'(?P<event_target>[^\']+)\'', href)
if not reobj:
self.logger.warning('No event_target in the gazette link. Ignoring - %s' % metainfo)
continue
groupdict = reobj.groupdict()
event_target = groupdict['event_target']
newpost = []
for t in postdata:
if t[0] == 'ctl00$CPH$btnSearch':
continue
if t[0] == '__EVENTTARGET':
t = (t[0], event_target)
newpost.append(t)
gztype = metainfo['gztype']
if 'division' in metainfo:
gztype = '%s_%s' % (gztype, metainfo['division'])
if 'partnum' in metainfo:
gztype = '%s_%s' % (gztype, metainfo['partnum'])
gztype, n = re.subn('[()\s-]+', '-', gztype)
relurl = os.path.join(relpath, gztype)
if self.save_gazette(relurl, search_url, metainfo, \
postdata = newpost, cookiefile = cookiejar, \
validurl = False):
dls.append(relurl)
return dls
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# Author:summer_han
'''
return 返回值 ,结束函数
return 可以返回什么样的值,
1、个数,类型, 值都不固定,随意指定
2、
'''
def test1():
print("in the test1")
def test2():
print("in the test2")
return 0
def test3():
print("in the test3")
return 1,"hello",['xi','han'],{"dabao":"erbao"}
x=test1()
y=test2()
z=test3()
print(type(x),x) # 返回值=0 返回none 不定义return 隐式返回 None
print(type(y),y) #返回值=1 返回 object
print(type(z),z) #返回值=2 返回元组 tuple 全都包含在一个元组中返回,其实是返回一个值
#为什么要有返回值,它的作用是干嘛 ,后面其他程序逻辑,需要返回值结果进行操作
|
s = input()
max = [s[0]]
def check(str):
temp = list(str)
for a in range(len(temp)-1):
if temp[a+1] < temp[a]:
return False
break
return True
# go short to long
for i in range(len(s)): # first char
for j in range(i, len(s)): # last char
if check(s[i:j]) and j-i > len(max):
max = s[i:j]
print('Longest substring in alphabetical order is:')
print(''.join(max)) |
import grpc
import kvstore_pb2
import kvstore_pb2_grpc
import random
import logging
import json
import time
import os
import sys
import pickle as pkl
import threading
from threading import Condition
from enum import Enum
from KThread import *
from chaosmonkey import CMServer
class LogMod(Enum):
ADDITION = 1
APPEND = 2
REPLACEMENT = 3
DELETION = 4
class KVServer(kvstore_pb2_grpc.KeyValueStoreServicer):
follower = 0
candidate = 1
leader = 2
def __init__(self, addresses: list, id: int, server_config: dict):
### Persistent state on all servers, update on stable storage before responding to RPCs
self.currentTerm = 0
self.votedFor = -1
self.log = [] # first index is 1
# load persistent state from json file
self.id = id
self.persistent_file = 'log/config-%d' % self.id
self.diskLog = "log/log-%d.pkl" % self.id
# self.loggerFile = "log/log-%d.txt" % self.id
self.diskStateMachine = "log/state_machine-%d.pkl" % self.id
# Todo: will re-enable load later
# self.load()
self.stateMachine = {} # used to be storage
# Config
self.requestTimeout = server_config["request_timeout"] # in ms
self.maxElectionTimeout = server_config["election_timeout"]
self.keySizeLimit = server_config["key_size"]
self.valSizeLimit = server_config["value_size"]
self.appendEntriesTimeout = float(server_config["app_entries_timeout"])/1000
### Volatile state on all servers
self.commitIndex = -1 # known to be commited, init to 0
# if larger than lastApplied, apply log to state machine
self.lastApplied = -1 # index of highest log entry applied to state machine, init to 0
self.role = KVServer.candidate
self.leaderID = -1
self.peers = []
self.lastUpdate = time.time()
# Condition variables
self.appendEntriesCond = Condition()
self.appliedStateMachCond = Condition()
self.lastCommittedTermCond = Condition()
self.leaderCond = Condition()
self.candidateCond = Condition()
self.followerCond = Condition()
# Client related
self.registeredClients = []
self.clientReqResults = {} # clientID: [stateMachineOutput, sequenceNum]
# current state
self.currElectionTimeout = random.uniform(self.maxElectionTimeout / 2, self.maxElectionTimeout) / 1000 # in sec
for idx, addr in enumerate(addresses):
if idx != self.id:
self.peers.append(idx)
self.majority = int(len(addresses) / 2) + 1
self.lastLogIndex = -1
self.lastLogTerm = 0
self.addresses = addresses # number of nodes implied here
self.cmserver = CMServer(num_server=len(addresses))
# create logger with 'raft'
self.logger = logging.getLogger('raft')
self.logger.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('[%(asctime)s,%(msecs)d %(levelname)s]: %(message)s',
datefmt='%M:%S')
# create file handler which logs even debug messages
os.makedirs(os.path.dirname('log/logger-%d.txt' % self.id), exist_ok=True)
fh = logging.FileHandler('log/logger-%d.txt' % self.id)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# # Last version logging setting
# logging.basicConfig(filename='log/logger-%d.txt' % self.id,
# filemode='a',
# format='%(asctime)s,%(msecs)d %(levelname)s %(message)s',
# datefmt='%H:%M:%S')
# self.logger = logging.getLogger('raft')
# self.logger.setLevel(logging.NOTSET)
self.logger.debug(f'[Chaos]: Initial ChaosMonkey matrix: \n<{self.cmserver}>')
### Volatile state on leaders
self.nextIndex = [0] * len(addresses) # index of next log entry to send to that server
self.matchIndex = [-1] * len(addresses) # highest log entry known to be replicated
# if there exists such N that N> commitIndex and majority of matchIndex[i] >= N
# and log[N].term ==currentTerm, set commitIndex = N
self.numVotes = 0
# Todo: for debugging only
self.debug1 = 0
def load(self):
if os.path.isfile(self.persistent_file):
with open(self.persistent_file, 'r') as f:
data_store = json.load(f)
self.currentTerm = data_store["currentTerm"]
self.votedFor = data_store["votedFor"]
# Todo: check if all currentTerm and votedFor has .save() save persistent state to json file
def save(self, current_term, voted_for):
self.currentTerm = current_term
self.votedFor = voted_for
persistent = {"currentTerm": self.currentTerm, "votedFor": self.votedFor}
with open(self.persistent_file, 'w') as f:
json.dump(persistent, f)
def follower(self):
while True:
# self.role = KVServer.follower # Todo: is this correct
self.logger.critical(f'[Role]: Running as a follower, elec timeout <%.4f>' % self.currElectionTimeout)
self.save(current_term=self.currentTerm, voted_for=-1)
self.lastUpdate = time.time()
while time.time() - self.lastUpdate <= self.currElectionTimeout:
with self.followerCond:
self.followerCond.wait(self.currElectionTimeout - (time.time() - self.lastUpdate)) # -elapsed time
# self.logger.debug(f'Current time <{time.time()}>, last update <{self.lastUpdate}>, deduct to '
# f'<{time.time() - self.lastUpdate}>election timeout <{self.currElectionTimeout}>')
self.role = KVServer.candidate # Todo: change to candidate here?
with self.candidateCond:
self.candidateCond.notify_all()
with self.followerCond:
self.followerCond.wait()
def candidate(self):
with self.candidateCond:
self.candidateCond.wait()
while True:
self.logger.critical(f'[Role]: Running as a candidate, elec timeout <%.4f>' % self.currElectionTimeout)
# Upon conversion to candidate, start election
# Increment current term, vote for self, reset election timer, send requestVote RPCs to all other servers
# self.logger.critical(f'RAFT[Vote]: Server <{self.id}> initiated voting for term <{self.currentTerm}> '
# f'took <%.4f> seconds' % (time.time()-start_time))
self.save(current_term=self.currentTerm+1, voted_for=self.id)
self.numVotes = 1
self.currElectionTimeout = random.uniform(self.maxElectionTimeout / 2, self.maxElectionTimeout) / 1000
self.election = KThread(target=self.initiateVote, args=())
self.election.start()
self.logger.info(f'[Vote]: Start, voted for self <{self.id}> term <{self.currentTerm}> '
f'election timeout: <%.4f>' % self.currElectionTimeout)
self.lastUpdate = time.time()
while time.time() - self.lastUpdate <= self.currElectionTimeout and self.role == KVServer.candidate:
with self.candidateCond:
self.candidateCond.wait(self.currElectionTimeout-(time.time() - self.lastUpdate)) # - elapse time
if self.numVotes >= self.majority or self.role == KVServer.follower:
break
self.save(current_term=self.currentTerm, voted_for=-1)
if self.role == KVServer.leader:
with self.leaderCond:
self.leaderCond.notify_all()
with self.candidateCond:
# self.logger.critical(f"in candidate, larger than majority")
self.candidateCond.wait()
elif self.role == KVServer.follower:
with self.followerCond:
self.followerCond.notify_all()
with self.candidateCond:
self.candidateCond.wait()
# Todo: is this needed?
# self.lastUpdate = time.time()
# if time.time() - self.lastUpdate <= self.currElectionTimeout:
# with self.candidateCond:
# self.candidateCond.wait(self.currElectionTimeout-(time.time() - self.lastUpdate))
def leader(self):
while True:
# mcip: Use condition to control instead
with self.leaderCond:
# self.logger.critical(f"reached leader111, larger than majority")
self.leaderCond.wait()
if self.role == KVServer.follower:
with self.followerCond:
self.followerCond.notify_all()
with self.leaderCond:
self.leaderCond.wait()
elif self.role == KVServer.candidate:
with self.candidateCond:
self.candidateCond.notify_all()
with self.leaderCond:
self.leaderCond.wait()
# self.role = KVServer.leader # Todo: is this correct?
self.logger.critical(f'[Role]: Running as a leader')
self.save(current_term=self.currentTerm, voted_for=-1)
self.leaderID = self.id
# for each server it's the index of next log entry to send to that server
# init to leader last log index + 1
self.nextIndex = [self.lastLogIndex + 1] * len(self.addresses)
# for each server, index of highest log entry known to be replicated on server
# init to 0, increase monotonically
self.matchIndex = [0] * len(self.addresses)
# Todo: might need debugging?
# Upon becoming leader, append no-op entry to log (6.4)
self.logModify([self.currentTerm, f"no-op: leader-{self.id}", "no-op"], LogMod.APPEND)
self.append_entries()
def initiateVote(self):
# Todo: mcip, make sure the term is the same while request vote????
req_term = self.currentTerm
for idx, addr in enumerate(self.addresses):
if idx == self.id:
continue
# Create a thread for each request vote
# Todo: mcip: req_term should be the same
election_thread = KThread(target=self.thread_election, args=(idx, addr, req_term, ))
election_thread.start()
# Todo: All servers: If RPC request or response contains term T> currentTerm, set current term = T,
# convert to follower
def convToFollowerIfHigherTerm(self, term, voted_for):
if term > self.currentTerm:
if self.role == KVServer.candidate:
self.save(current_term=term, voted_for=voted_for)
self.role = KVServer.follower
with self.candidateCond:
self.candidateCond.notify_all()
elif self.role == KVServer.leader: # leader
self.save(current_term=term, voted_for=voted_for)
self.role = KVServer.follower
with self.leaderCond:
self.leaderCond.notify_all()
# Todo: Add chaos monkey?
def thread_election(self, idx, addr, req_term):
try:
# Todo: shouldn't always increment term here ???
vote_request = kvstore_pb2.VoteRequest(term=self.currentTerm, candidateID=self.id,
lastLogIndex=self.lastLogIndex, lastLogTerm=req_term)
# with grpc.insecure_channel(addr) as channel:
channel = grpc.insecure_channel(addr)
grpc.channel_ready_future(channel).result()
stub = kvstore_pb2_grpc.KeyValueStoreStub(channel)
# self.logger.debug(f'Send vote request to server: <{idx}>')
req_vote_resp = stub.requestVote(vote_request, timeout=self.requestTimeout) # timeout keyword ok?
# Todo: mcip, does this improve?
# Todo: Add lock here to consider concurrency
if req_vote_resp.voteGranted:
self.logger.info(f'[Vote]: received from <{idx}>, vote count: <{self.numVotes}>')
if self.role == KVServer.candidate:
self.numVotes += 1
if self.numVotes >= self.majority:
self.role = KVServer.leader
with self.candidateCond:
# self.logger.critical(f"thread_election, larger than majority")
self.candidateCond.notify_all()
else:
self.logger.info(f'[Vote]: rejected from <{idx}> its term: {req_vote_resp.term}')
# Todo: added by mcip, does this actually improve?
if self.role == KVServer.follower and req_vote_resp.term > self.currentTerm:
self.save(current_term=req_vote_resp.term, voted_for=-1)
# Todo: All servers: If RPC request or response contains term T> currentTerm, set current term = T,
# convert to follower
self.convToFollowerIfHigherTerm(req_vote_resp.term, voted_for=-1)
# self.role = KVServer.follower
# with self.candidateCond:
# self.candidateCond.notify_all()
# elif num_rej_votes > self.majority:
# self.save(current_term=self.currentTerm, votedFor=-1)
except Exception as e:
self.logger.error("[Vote]: f() thread_election:")
self.logger.error(e)
def requestVote(self, request, context): # Receiving vote request and process
# Todo: not needed?
# self.lastUpdate = time.time()
try:
req_term = request.term
req_candidate_id = request.candidateID
req_last_log_ind = request.lastLogIndex
req_last_log_term = request.lastLogTerm
# self.logger.debug(f'RAFT[Vote]: Receive request vote from <{req_candidate_id}>')
vote_granted = True
# Todo: not sure if req_last_log_term < self.lastLogTerm is needed
# Reply false if term < currentTerm
# If votedFor is null/candidateID, and candidate's log is at least as updated as receiver's log, grant vote
if req_term < self.currentTerm or req_last_log_ind < self.lastLogIndex or \
req_last_log_term < self.lastLogTerm or \
(self.votedFor != -1 and self.votedFor != req_candidate_id):
vote_granted = False
self.logger.info(f'[Vote]: reject vote request from <{req_candidate_id}>, '
f'currentTerm <{self.currentTerm}>'
f'\n reason: <{req_term < self.currentTerm}>, <{req_last_log_ind < self.lastLogIndex}>'
f', <{req_last_log_term < self.lastLogTerm}> or voted for another')
if self.role == KVServer.follower and req_term > self.currentTerm:
self.save(current_term=req_term, voted_for=-1)
# Todo: All servers: If RPC request or response contains term T> currentTerm, set current term = T,
# convert to follower
self.convToFollowerIfHigherTerm(req_term, voted_for=req_candidate_id)
elif req_term == self.currentTerm:
self.lastUpdate = time.time()
self.save(current_term=self.currentTerm, voted_for=req_candidate_id) # TODO: Add lock here?
self.logger.info(f'[Vote]: vote granted for <{req_candidate_id}> w term <{self.currentTerm}>')
# Find higher term in RequestVote message
elif req_term > self.currentTerm:
self.lastUpdate = time.time()
if self.role == KVServer.follower:
self.save(current_term=req_term, voted_for=req_candidate_id)
# Todo: All servers: If RPC request or response contains term T> currentTerm, set current term = T,
# convert to follower
self.convToFollowerIfHigherTerm(req_term, voted_for=req_candidate_id)
self.logger.critical(f'[Vote]: vote granted for <{req_candidate_id}> '
f'due to higher term <{req_term}>')
# Todo: mcip: if granting the vote to someone, should set back the lastUpdate time?
return kvstore_pb2.VoteResponse(term=self.currentTerm, voteGranted=vote_granted)
except Exception as e:
self.logger.error("[Vote]: f() requestVote:")
self.logger.error(e)
# Leader sends append_entry message as log replication and heart beat
def append_entries(self):
while self.role == KVServer.leader:
# Todo: for debugging only
# # self.debug1 += 1
# self.logModify([self.debug1, "aa", "bb"], LogMod.APPEND)
# self.logModify([self.debug1, "bb", "cc"], LogMod.APPEND)
app_ent_term = self.currentTerm
for idx, addr in enumerate(self.addresses):
if idx == self.id:
continue
# Create a thread for each append_entry message
# Todo: mcip: append entries term is the same
append_thread = KThread(target=self.thread_append_entry, args=(idx, addr, app_ent_term,))
append_thread.start()
# Send append entry every following seconds, or be notified and wake up
# Todo: will release during wait
with self.appendEntriesCond:
self.appendEntriesCond.wait(timeout=self.appendEntriesTimeout)
def thread_append_entry(self, idx, addr, app_ent_term):
try:
append_request = kvstore_pb2.AppendRequest()
append_request.term = app_ent_term # int32 term = 1;
append_request.leaderID = self.id # int32 leaderID = 2;
append_request.prevLogIndex = self.nextIndex[idx] # int32 prevLogIndex = 3;
# int32 prevLogTerm = 4;
if 0 <= self.nextIndex[idx] < len(self.log):
append_request.prevLogTerm = self.log[self.nextIndex[idx]][0]
else:
append_request.prevLogTerm = 0
append_request.leaderCommit = self.commitIndex # int32 leaderCommit = 6;
last_req_log_idx = self.lastLogIndex
self.logger.info(f"[AP_En]: thread_append_entry to <{idx}> prevLogInd <{append_request.prevLogIndex}> "
f"prevLogTerm <{append_request.prevLogTerm}>")
if self.nextIndex[idx] < len(self.log):
for row in self.log[self.nextIndex[idx]:]: # repeated LogEntry entries = 5;
entry = append_request.entries.add()
entry.term = row[0]
entry.key = row[1]
entry.val = row[2]
self.nextIndex[idx] = self.lastLogIndex + 1 # Todo: should inc to +1 here?
# with grpc.insecure_channel(addr) as channel:
channel = grpc.insecure_channel(addr)
grpc.channel_ready_future(channel).result()
# int32 term = 1;
# bool success = 2;
stub = kvstore_pb2_grpc.KeyValueStoreStub(channel)
if random.uniform(0, 1) < self.cmserver.fail_mat[self.leaderID][self.id]:
self.logger.warning(f'[ABORTED]: we will not receive from <{self.leaderID}> '
f'because of ChaosMonkey')
else:
# self.logger.info(f'[AP_En]: thread_append_entry to <{idx}>, '
# f'req last log <{last_req_log_idx}>')
# f'req entries \n<{append_request.entries}>')
append_entry_response = stub.appendEntries(
append_request, timeout=self.requestTimeout)
if not append_entry_response.success:
self.logger.info(f"[AP_En]: thread_append_entry to <{idx}> failed, "
f"its term <{append_entry_response.term}>, leader's <{self.currentTerm}>")
# Failed because of log inconsistency, decrement nextIndex and retry
if append_entry_response.term <= self.currentTerm:
self.logger.info(f"[AP_En]: log inconsistency, nextIndex for <{idx}> dec from "
f"<{self.nextIndex[idx]}> to <{max(append_request.prevLogIndex - 1, 0) }>")
# Todo: how to decrement correctly
self.nextIndex[idx] = max(append_request.prevLogIndex - 1, 0)
else:
# Todo: All servers: If RPC request or response contains term T> currentTerm,
# set current term = T, convert to follower
self.convToFollowerIfHigherTerm(append_entry_response.term, voted_for=-1)
# Success
else:
self.logger.info(f"[AP_En]: thread_append_entry to <{idx}> success")
self.matchIndex[idx] = last_req_log_idx
self.logger.debug(f'[KVStore]: matchIndex: <{self.matchIndex}>')
n_list = sorted(self.matchIndex)
# TODO: write to disk upon majority
# if there exists such N that N> commitIndex and majority of matchIndex[i] >= N
# and log[N].term ==currentTerm, set commitIndex = N
N = n_list[int(len(n_list) / 2)]
if N >= 0 and N > self.commitIndex and self.log[N][0] == self.currentTerm:
self.commitIndex = N
self.logger.info(f"RAFT: Commit index on leader updates to: {N}")
disk_write_kth = KThread(target=self.applyToStateMachine, args=(self.lastApplied,))
disk_write_kth.start()
except Exception as e:
self.logger.error("[Vote]: f() thread_append_entry, most likely name resolution error")
self.logger.error(e) # Todo: Name resolution error
def appendEntries(self, request, context): # receiving/server side
# int32 term = 1;
# int32 leaderID = 2;
# int32 prevLogIndex = 3;
# int32 prevLogTerm = 4;
# repeated LogEntry entries = 5;
# int32 leaderCommit = 6;
self.leaderID = request.leaderID
if random.uniform(0, 1) < self.cmserver.fail_mat[self.leaderID][self.id]:
self.logger.warning(f'[ABORTED]: append entries from server <{self.leaderID}> '
f'to <{self.id}>, because of ChaosMonkey')
else:
# Todo: if election timeout elapse without receiving AppendEntries RPC from current leader or granting vote
# to candidate: convert to candidate
self.lastUpdate = time.time()
success = False
try:
# Todo: If candidates receive AppendEntries RPC from a leader, convert to follower
# mcip: request term should be useless coz of last log term??????
if self.role == KVServer.candidate:
self.save(current_term=max(self.lastLogTerm, request.term), voted_for=-1)
self.role = KVServer.follower
with self.candidateCond:
self.candidateCond.notify_all()
else:
# Todo: All servers: If RPC request or response contains term T> currentTerm,
# set current term = T, convert to follower
self.convToFollowerIfHigherTerm(request.term, voted_for=-1)
tmp_entries = []
for row in request.entries:
r = [row.term, row.key, row.val]
tmp_entries.append(r)
# self.logger.info(f'row: <{r}>')
# reply false if term < currentTerm,
# or log doesn't log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
# Todo: if it doesn't match the term, it will decrement and resend, thus following will remove entries
if request.term < self.currentTerm or request.prevLogIndex > len(self.log) \
or (request.prevLogIndex < len(self.log) and
self.log[request.prevLogIndex][0] != request.prevLogTerm):
self.logger.warning(f'[AP_En]: received on <{self.id}>, will return false to <{self.leaderID}>')
self.logger.warning(f'[AP_En]: <{request.term < self.currentTerm}>, '
f'<{request.prevLogIndex > len(self.log)}>, '
f'<{(request.prevLogIndex < len(self.log) and self.log[request.prevLogIndex][0] != request.prevLogTerm)}>')
self.logger.info(f'Parameters for false: req term: <{request.term}>, cur term: '
f'<{self.currentTerm}>, req prevLogIdx: <{request.prevLogIndex}>, '
f'length of server log <{len(self.log)}>')
if request.prevLogIndex < len(self.log):
self.logger.info(f'term of log on prev log index: <{self.log[request.prevLogIndex][0]}>, '
f'request prev log term: <{request.prevLogTerm}>')
# existing entry conflicts with a new one, same idx different terms,
# delete the existing entry and all that follow it
# self.logger.info(f'RAFT: checking conflicting entries')
itr = 0
for a, b in zip(tmp_entries, self.log[request.prevLogIndex:]):
if a != b:
self.logger.warning(f'[Log]: Found conflict at index: '
f'<{request.prevLogIndex + itr}>')
self.logModify(request.prevLogIndex + itr, LogMod.DELETION)
itr += 1
else:
self.save(current_term=max(self.currentTerm, request.term), voted_for=-1)
# self.logger.info("RAFT: AppendEntries should succeed unless there is conflict entries")
success = True
# existing entry conflicts with a new one, same idx different terms,
# delete the existing entry and all that follow it
# self.logger.info(f'RAFT: checking conflicting entries')
itr = 0
if len(self.log) > 0:
for a, b in zip(tmp_entries, self.log[request.prevLogIndex:]):
if a != b:
self.logger.warning(f'[Log]: Found conflict at index: '
f'<{request.prevLogIndex + itr}>')
self.logModify(request.prevLogIndex + itr, LogMod.DELETION)
itr += 1
# Heartbeat
if len(tmp_entries) == 0:
self.logger.info("[Log]: received a heartbeat")
# Normal append entries
else:
self.logger.info(f"[Log]: append entries, leader commit <{request.leaderCommit}>")
# Append any new entries not already in the log
to_append_length = request.prevLogIndex + len(tmp_entries) - len(self.log)
# self.logger.debug(f'RAFT: length of log to append: <{to_append_length}>')
if to_append_length > 0:
self.logModify(tmp_entries[-to_append_length:], LogMod.ADDITION)
# If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry)
# print("Received log from appendEntries is: ", tmp_entries)
# self.logger.debug(f'RAFT: Checking if we need to write to disk: <{request.leaderCommit}>,'
# f'<{self.commitIndex}>, <{self.lastLogIndex}>')
if request.leaderCommit > self.lastApplied:
self.logger.info(f"[Log]: apply to state machine, leader commit <{request.leaderCommit}> "
f"last applied <{self.lastApplied}>")
self.commitIndex = min(request.leaderCommit, self.lastLogIndex)
app_state_mach_kth = KThread(target=self.applyToStateMachine, args=(self.lastApplied,))
app_state_mach_kth.start()
# int32 term = 1;
# bool success = 2;
self.lastUpdate = time.time()
return kvstore_pb2.AppendResponse(term=self.currentTerm, success=success)
except Exception as e:
self.logger.error("RAFT[Vote]: f(): appendEntries:")
self.logger.error(e)
# Todo: always use this to update log
def logModify(self, para, operation: LogMod):
# self.logger.debug(f'RAFT[Log]: Log modify: <{para}>, '
# f'operation: <{operation}>')
if operation == LogMod.ADDITION:
self.log += para
elif operation == LogMod.APPEND:
self.log.append(para)
elif operation == LogMod.REPLACEMENT:
self.log = para
elif operation == LogMod.DELETION:
self.log = self.log[:para]
self.lastLogIndex = len(self.log) - 1
self.lastLogTerm = self.log[self.lastLogIndex][0]
with open(self.diskLog, 'wb') as f:
pkl.dump(self.log, f)
# Todo: not needed when it's the leader, what about follower?
if self.id == self.leaderID:
self.matchIndex[self.id] = self.lastLogIndex
# Wait until last committed entry is from leader's term, notify all upon leader's log change
with self.lastCommittedTermCond:
self.lastCommittedTermCond.notify_all()
if self.lastLogTerm > self.currentTerm:
self.save(current_term=self.lastLogTerm, voted_for=self.votedFor)
self.logger.info(f'[Log]: Log updated on disk of server <{self.id}> ,'
f'last log index now: <{self.lastLogIndex}>, '
f'log is: LOG!!! ') # <{self.log}>')
def applyToStateMachine(self, last_applied):
# TODO: maybe we can append only? maybe we need synchronization
to_update = self.log[last_applied + 1:self.commitIndex + 1]
for row in to_update:
self.stateMachine[row[1]] = row[2]
with open(self.diskStateMachine, 'wb') as f:
pkl.dump(self.stateMachine, f)
self.lastApplied = self.commitIndex
# Apply command in log order, notify all upon completion
with self.appliedStateMachCond:
self.appliedStateMachCond.notify_all()
self.logger.info(f'[StateMach]: Last applied index: <{self.lastApplied}>, ')
# f'state machine updated to: <{self.stateMachine}>')
# def readWithKey(self, key):
# n = len(self.log)
# for i in range(n - 1, -1, -1):
# if self.log[i][1] == key: return self.log[i][2]
# return ""
def run(self):
# Create a thread to run as follower
leader_state = KThread(target=self.leader, args=())
leader_state.start()
candidate_state = KThread(target=self.candidate, args=())
candidate_state.start()
follower_state = KThread(target=self.follower, args=())
follower_state.start()
# Checkpoint 1 Get Put Methods
# Todo: no longer needed?
# def localGet(self, key):
# '''
# val = self.readWithKey(key)
# if val == "": return kvstore_pb2.GetResponse(ret = kvstore_pb2.FAILURE, value = val)
# else: return kvstore_pb2.GetResponse(ret = kvstore_pb2.SUCCESS, value = val)
# '''
# resp = kvstore_pb2.GetResponse()
# try:
# resp.value = self.stateMachine[key]
# resp.ret = kvstore_pb2.SUCCESS
# self.logger.info(f'RAFT[KVStore]: localGet <{key}, {resp.value}>')
# except KeyError:
# resp.ret = kvstore_pb2.FAILURE
# self.logger.warning(f'RAFT[KVStore]: localGet failed, no such key: [{key}]')
# return resp
# Todo: no longer needed?
# def localPut(self, key, val):
# resp = kvstore_pb2.PutResponse()
# self.stateMachine[key] = val # dictionary
# resp.ret = kvstore_pb2.SUCCESS
# self.logger.info(f'RAFT[KVStore]: localPut <{key}, {val}>')
# return resp
# Todo: add client ID and sequence number
def Get(self, request, context):
try:
# string key = 1;
# Reply NOT_LEADER if not leader, providing hint when available
if self.role != KVServer.leader:
# string value = 1;
# ClientRPCStatus status = 2;
# int32 leaderHint = 3;
self.logger.info(f'[KVStore]: Get redirect to leader <{self.leaderID}>')
return kvstore_pb2.GetResponse(value="", status=kvstore_pb2.NOT_LEADER, leaderHint=self.leaderID)
try:
# Wait until last committed entry is from leader's term
with self.lastCommittedTermCond:
self.lastCommittedTermCond.wait_for(lambda: self.log[self.commitIndex][0] == self.currentTerm)
# Save commitIndex as local variable readIndex
read_index = self.commitIndex
# Todo: Is this done?
# Send new round of heartbeats, and wait for reply from majority of servers
with self.appendEntriesCond:
self.appendEntriesCond.notify_all()
# Wait for state machine to advance at least the readIndex log entry
with self.appliedStateMachCond:
self.appliedStateMachCond.wait_for(lambda: self.lastApplied >= read_index)
# Process query
# Reply OK with state machine output
self.logger.info(f'[KVStore]: Get success: <{request.key}, {self.stateMachine[request.key]}>')
context.set_code(grpc.StatusCode.OK)
return kvstore_pb2.GetResponse(value=self.stateMachine[request.key], status=kvstore_pb2.OK2CLIENT,
leaderHint=self.id)
except KeyError:
self.logger.warning(f'[KVStore]: Get failed, no such key: [{request.key}]')
context.set_code(grpc.StatusCode.CANCELLED)
return kvstore_pb2.GetResponse(value="", status=kvstore_pb2.ERROR2CLIENT, leaderHint=self.id)
except Exception as e:
self.logger.error("RAFT[KVStore]: f(): Get")
self.logger.error(e)
def Put(self, request, context):
try:
# string key = 1;
# string value = 2;
# int32 clientID = 3;
# int32 sequenceNum = 4;
# NOT_LEADER = 0;
# SESSION_EXPIRED = 1;
# OK2CLIENT = 2;
# ERROR2CLIENT = 3;
# if command received from client: append entry to local log, respond after entry applied to state machine
# Reply NOT_LEADER if not leader, providing hint when available
if self.role != KVServer.leader:
return kvstore_pb2.PutResponse(status=kvstore_pb2.NOT_LEADER, response="", leaderHint=self.leaderID)
# Reply SESSION_EXPIRED if not record of clientID or if the response for client's sequenceNum
# already discarded
if request.clientID not in self.registeredClients or \
self.clientReqResults[request.clientID][1] > request.sequenceNum:
return kvstore_pb2.PutResponse(status=kvstore_pb2.SESSION_EXPIRED, response="", leaderHint=self.leaderID)
# If sequenceNum already processed from client, reply OK with stored response
if self.clientReqResults[request.clientID][1] == request.sequenceNum:
return kvstore_pb2.PutResponse(status=kvstore_pb2.OK2CLIENT,
response=self.clientReqResults[request.clientID][0],
leaderHint=self.leaderID)
# Todo: Following line has correct order?
# Append command to log, replicate and commit it
self.logModify([[self.currentTerm, request.key, request.value]], LogMod.ADDITION)
put_log_ind = self.lastLogIndex
# wake up threads to append entries
with self.appendEntriesCond:
self.appendEntriesCond.notify_all()
# Apply command in log order
with self.appliedStateMachCond:
self.appliedStateMachCond.wait_for(lambda: (self.lastApplied >= put_log_ind),
timeout=self.requestTimeout)
# Save state machine output with sequenceNum for client, discard any prior sequenceNum for client
self.clientReqResults[request.clientID] = [self.stateMachine[request.key], request.sequenceNum]
# ClientRPCStatus status = 1;
# string response = 2;
# int32 leaderHint = 3;
# Todo: no need for state machine output for put?
# Reply OK with state machine output
if self.lastApplied >= put_log_ind:
self.logger.info(f'[KVStore]: Server put success on leader <{self.id}>')
context.set_code(grpc.StatusCode.OK) # Todo: why is this needed?
return kvstore_pb2.PutResponse(status=kvstore_pb2.OK2CLIENT, response=self.stateMachine[request.key],
leaderHint=self.id)
else:
self.logger.warning(f'[KVStore]: Server put error (timeout?) on leader <{self.id}>')
context.set_code(grpc.StatusCode.CANCELLED)
return kvstore_pb2.PutResponse(status=kvstore_pb2.ERROR2CLIENT, response="", leaderHint=self.id)
except Exception as e:
self.logger.error("RAFT[KVStore]: f(): Put")
self.logger.error(e)
def registerClient(self, request, context):
try:
# ClientRPCStatus status = 1;
# int32 clientID = 2;
# int32 leaderHint = 3;
# Reply NOT_LEADER if not leader, provide hint when available
if self.role != KVServer.leader:
return kvstore_pb2.RegisterResponse(status=kvstore_pb2.NOT_LEADER, clientID=-1, leaderHint=self.leaderID)
else:
# Append register command to log, replicate and commit it
cur_last_log_ind = len(self.log)
self.logModify([[self.currentTerm, "client-"+str(cur_last_log_ind), str(cur_last_log_ind)]],
LogMod.ADDITION)
# wake up threads to register clients
with self.appendEntriesCond:
self.appendEntriesCond.notify_all()
self.registeredClients.append(cur_last_log_ind) # Todo: faster if we put following 2 here?
self.clientReqResults[cur_last_log_ind] = ["", -1] # init client result dictionary
# Apply command in log order, allocating session for new client
with self.appliedStateMachCond:
self.logger.info(f'[Client]: Register client: lastApplied, <{self.lastApplied}>, '
f'cur_last_log_ind, <{cur_last_log_ind}>, matchIndex, <{self.matchIndex}>')
self.appliedStateMachCond.wait_for(lambda: self.lastApplied >= cur_last_log_ind)
# Todo: allocating new session?
# Reply OK with unique client identifier (the log index of the register command could be used)
return kvstore_pb2.RegisterResponse(status=kvstore_pb2.OK2CLIENT, clientID=cur_last_log_ind,
leaderHint=self.leaderID)
except Exception as e:
self.logger.error("RAFT[Register]: f(): registerClient")
self.logger.error(e)
def clientRequest(self, request, context):
pass
def clientQuery(self, request, context):
pass
def updateConfigs(self, request, context):
# int32 requestTimeout = 1;
# int32 maxElectionTimeout = 2;
# int32 keySizeLimit = 3;
# int32 valSizeLimit = 4;
try:
self.requestTimeout = request.requestTimeout
self.maxElectionTimeout = request.maxElectionTimeout
self.keySizeLimit = request.keySizeLimit
self.valSizeLimit = request.valSizeLimit
# ReturnCode ret = 1;
return kvstore_pb2.UpdateConfigsResponse(ret=kvstore_pb2.SUCCESS)
except Exception as e:
self.logger.error("RAFT[ConfigChn]: f(): updateConfigs\n"+e)
return kvstore_pb2.UpdateConfigsResponse(ret=kvstore_pb2.FAILURE)
# Async IO implementation
# def Get(self, request, context):
# # Asyncio implementation
# # https://github.com/grpc/grpc/issues/16329
# def getProcessResponse(append_resp):
# if append_resp.ret == kvstore_pb2.SUCCESS:
# resp = kvstore_pb2.GetResponse(ret=kvstore_pb2.SUCCESS,
# value=append_resp.value)
# context.set_code(grpc.StatusCode.OK)
# return resp
# key = request.key
# resp = self.localGet(key)
# for idx, addr in enumerate(self.addresses):
# if idx == self.id:
# continue
# self.logger.info(f'RAFT: serverGet from {addr}')
# with grpc.insecure_channel(addr) as channel:
# # Asyncio implementation
# # stub = kvstore_pb2_grpc.KeyValueStoreStub(channel)
# # append_resp = stub.appendEntries.future(kvstore_pb2.AppendRequest(type=kvstore_pb2.GET, key=key))
# # append_resp.add_done_callback(getProcessResponse)
# stub = kvstore_pb2_grpc.KeyValueStoreStub(channel)
# append_resp = stub.appendEntries(kvstore_pb2.AppendRequest(
# type=kvstore_pb2.GET, key=key), timeout = self.requestTimeout) # timeout?
# if append_resp.ret == kvstore_pb2.SUCCESS:
# resp = kvstore_pb2.GetResponse(ret=kvstore_pb2.SUCCESS,
# value=append_resp.value)
# context.set_code(grpc.StatusCode.OK)
# return resp
# else: context.set_code(grpc.StatusCode.CANCELLED)
# return kvstore_pb2.GetRequest(ret=kvstore_pb2.FAILURE)
|
"""
Created on Nov 05, 2019
@author: Kuznetsov Maxim <lol8funny@gmail.com>
"""
import sys
"""
Default CGI response class.
You can set all response parameters in dict, passing in
constructor, or set each parameter by setter. All unsetted params
will be set by default.
"""
# TODO: create headers getters
class Response:
def __init__(self, body=None, content_type=None, status=None, headers=None):
if body is None:
self._body = b""
else:
self._body = body
if status is None:
self._status = "200 OK"
else:
self._status = status
if headers is None:
self._headers = []
else:
self._headers = headers
if content_type is None:
self._headers.append(("Content-Type", "text/plain"))
else:
self._headers.append(("Content-Type", content_type))
def _body__get(self):
return self._body
def _body__set(self, body):
self._body = body
body = property(_body__get, _body__set)
def _status__get(self):
return self._status
def _status__set(self, status):
self._status = status
status = property(_status__get, _status__set)
def _headers__get(self):
headers_dict = self._headers_as_dict(self._headers)
return headers_dict
def _headers__set(self, header):
self._headers.append(header)
def _headers_as_dict(self, headerslist):
def iterate():
for s in headerslist:
yield s[0], s[1]
return {k: v for k, v in iterate()}
headers = property(_headers__get, _headers__set)
def __str__(self):
parts = ["Status: " + self.status]
parts += ("{!s}: {!r}".format(k, v) for (k, v) in self.headers.items())
parts += ["", self.body]
return "\r\n".join(parts)
|
from sys import stdin
def main():
operands = list()
for line in stdin:
operands.append(int(line.strip()))
print get_max_xor_value(operands[0], operands[1])
def get_max_xor_value(a, b):
max_val = 0
for opa in range(a, b+1):
for opb in range(a, b+1):
xor_val = (opa ^ opb)
if xor_val > max_val:
max_val = xor_val
return max_val
if __name__ == '__main__':
main() |
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from numpy import split
import pandas as pd
import numpy as np
from numpy import hstack
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
# demonstrate data normalization with sklearn
from sklearn.preprocessing import StandardScaler
# np.set_printoptions(threshold=np.nan)
# import os
# import tensorflow as tf
# import random as rn
# os.environ['PYTHONHASHSEED'] = '0'
# # Setting the seed for numpy-generated random numbers
# np.random.seed(37)
# # Setting the seed for python random numbers
# rn.seed(1254)
# # Setting the graph-level random seed.
# tf.set_random_seed(89)
# from keras import backend as K
# session_conf = tf.ConfigProto(
# intra_op_parallelism_threads=1,
# inter_op_parallelism_threads=1)
# #Force Tensorflow to use a single thread
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
def split_train_test(dataset):
train, test = dataset[:662], dataset[662:-3]
print('length of training set', len(train))
print('length of tests set', len(test))
# print(train)
# print(test)
train = array(split(train, len(train)))
test = array(split(test, len(test)))
return train, test
def evaluate_forecasts(actual, predicted):
scores = list()
# calculate an RMSE score
for i in range(actual.shape[1]):
# calculate mse
mse = mean_squared_error(actual[:, i], predicted[:, i])
# calculate rmse
rmse = sqrt(mse)
scores.append(rmse)
# calculate overall RMSE
s = 0
for row in range(actual.shape[0]):
for col in range(actual.shape[1]):
s += (actual[row, col] - predicted[row, col]) ** 2
score = sqrt(s / (actual.shape[0] * actual.shape[1]))
return score, scores
# summarize scores
def summarize_scores(name, score, scores):
s_scores = ', '.join(['%.1f' % s for s in scores])
print('%s: [%.3f] %s' % (name, score, s_scores))
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=3):
# flatten data
data = train.reshape((train.shape[0] * train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
for _ in range(len(data)):
in_end = in_start + n_input
out_end = in_end + n_out
if out_end < len(data):
X.append(data[in_start:in_end, :])
y.append(data[in_end:out_end, 0])
in_start += 1
return array(X), array(y)
from keras import backend
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
# train the model
def build_model(train, n_input):
train_x, train_y = to_supervised(train, n_input)
test_x, test_y = to_supervised(test, n_input)
print(train_x.shape, train_y.shape)
print(test_x.shape, test_y.shape)
verbose, epochs, batch_size = 2, 20, 15
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
test_y = test_y.reshape((test_y.shape[0], test_y.shape[1], 1))
# train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(1000, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs))
model.add(LSTM(1000, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(1000, activation='relu')))
model.add(TimeDistributed(Dense(1)))
# model.compile(optimizer='adam', loss='mse')
model.compile(optimizer='adam', loss='mse',metrics=[rmse])
# fit network
history = model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose,
validation_data=(test_x, test_y))
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.title('loss', y=0, loc='center')
pyplot.legend()
pyplot.show()
# plot rmse
pyplot.plot(history.history['rmse'], label='train')
pyplot.plot(history.history['val_rmse'], label='test')
pyplot.title('rmse', y=0, loc='center')
pyplot.legend()
pyplot.show()
return model
# make a forecast
def forecast(model, history, n_input):
# flatten data
data = array(history)
data = data.reshape((data.shape[0] * data.shape[1], data.shape[2]))
# retrieve last observations for input data
input_x = data[-n_input:, :]
# reshape into [1, n_input, n]
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))
# forecast the next week
yhat = model.predict(input_x, verbose=0)
# we only want the vector forecast
yhat = yhat[0]
return yhat
# evaluate a single model
def evaluate_model(train, test, n_input):
# fit model
model = build_model(train, n_input)
history = [x for x in train]
predictions = list()
for i in range(len(test)):
yhat_sequence = forecast(model, history, n_input)
predictions.append(yhat_sequence)
history.append(test[i, :])
predictions = array(predictions)
score, scores = evaluate_forecasts(test[:, :, 0], predictions)
return model, score, scores, predictions
print('len of dataset', len(df_disc_temp))
df_disc_temp = df_disc_temp.fillna(method='ffill')
print(df_disc_temp.isnull().values.any())
print(df_disc_temp[662:-3].index.values)
# split into train and test
n_input = 7
train, test = split_train_test(df_disc_temp.values)
# # validate train data
print(train.shape)
# # print(train)
# print(train[0, 0, 0], train[-1, -1, 0])
# # validate test
print(test.shape)
# print(test[0, 0, 0], test[-1, -1, 0])
# evaluate model and get scores
model, score, scores, predictions = evaluate_model(train, test, n_input)
# # predictions=evaluate_model(train, test, n_input)
# print('prediction shape',predictions.shape)
print(predictions)
# np.savetxt("predicted.csv", predictions.reshape(-1,1), delimiter=",")
# # # # summarize scores
summarize_scores('lstm', score, scores)
# import pickle
# pkl_filename = "model.pkl"
# with open(pkl_filename, 'wb') as file:
# pickle.dump(model, file)
|
# python list index() #
def main():
thelist = ["less","roman","Lashley","sami",1,1]
x = thelist.index("less")
y = thelist.index(1)
print(x,y)
if __name__ == "__main__":
main()
|
import os
import sys
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import json
from tunner.utils import ROOT_DIR
import csv
output_dir = ROOT_DIR + '/output/'
finished_list = list(filter(lambda x: os.path.exists(output_dir + x + '/result.json'), os.listdir(output_dir)))
outer = open(output_dir + 'all.csv', 'w')
writer = csv.writer(outer)
keys = []
for finished_name in finished_list:
with open(output_dir + finished_name + '/param.json') as f:
params = json.load(f)
with open(output_dir + finished_name + '/result.json') as f:
result = json.load(f)
param_keys = params.keys()
keys = list(param_keys)
writer.writerow(keys + ['Epoch', 'Dev', 'Test'])
for finished_name in finished_list:
with open(output_dir + finished_name + '/param.json') as f:
params = json.load(f)
with open(output_dir + finished_name + '/result.json') as f:
result = json.load(f)
to_write = []
for key in keys:
to_write.append(params[key])
to_write.append(result['Epoch'])
to_write.append(result['Dev'])
to_write.append(result['Test'])
writer.writerow(to_write)
outer.close() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
# add current ToNgueLP location to python PATH to locate modules
#sys.path.append(os.path.abspath(os.path.join(os.getcwd(), '../../')))
# import PyQt4 QtCore and QtGui modules
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# import main window
from ui.TNLP_MW import TNLP_MW
# global app information
appName = "ToNgueLP"
appVersion = "0.4.x (----)"
if __name__ == '__main__':
# create application
app = QApplication(sys.argv)
app.setApplicationName(appName)
# create widget
w = TNLP_MW(appName, appVersion)
w.show()
# connection
QObject.connect(app, SIGNAL('lastWindowClosed()'), app, SLOT('quit()'))
# execute application
sys.exit(app.exec_())
|
n1=int(input('Enter first number'))
n2=int(input('Enter second number'))
def Karatsuba(x,y):
len_x=len(str(x))
len_y =len(str(y))
if len_x != 1:
if len_x%2!=0:
a=int(x//(10**((len_x+1)//2)))
b=int(x%(10**((len_x+1)//2)))
else:
a=int(x//(10**(len_x//2)))
b=int(x%(10**(len_x//2)))
else:
a=0
b=x
if len_y !=1:
if len_y%2!=0:
c=int(y//(10**((len_y+1)//2)))
d=int(y%(10**((len_y+1)//2)))
else:
c=int(y//(10**(len_y//2)))
d=int(y%(10**(len_y//2)))
else:
c=0
d=y
if len(str(a))!=1 and len(str(c))!=1:
mul1=Karatsuba(a,c)
else:
mul1=a*c
if len(str(b))!=1 and len(str(d))!=1:
mul2=Karatsuba(b,d)
else:
mul2=b*d
mul3=(a+b)*(c+d)
mul4=mul3-mul1-mul2
if len_x>=len_y:
if len_x%2==0:
return (10**len_x)*mul1+(10**(len_x//2))*mul4+mul2
else:
return (10**len_x)*mul1+(10**((len_x+1)//2))*mul4+mul2
else:
if len_y%2==0:
return (10**len_y)*mul1+(10**(len_y//2))*mul4+mul2
else:
return (10**len_y)*mul1+(10**((len_y+1)//2))*mul4+mul2
print('Product of ',n1,'and',n2,'is ', int(Karatsuba(n1,n2)))
|
from ch03.stack import Stack
import pytest
def test_iter():
ll_iter = Stack(10, 11, 12).__iter__()
assert ll_iter.__next__() == 12
assert ll_iter.__next__() == 11
assert ll_iter.__next__() == 10
with pytest.raises(StopIteration):
ll_iter.__next__()
with pytest.raises(StopIteration):
ll_iter.__next__()
def test_to_list():
assert list(Stack(10, 11, 12)) == [12, 11, 10]
def test_to_str():
assert str(Stack()) == "Stack()"
assert str(Stack(10)) == "Stack(10)"
assert str(Stack(10, 11)) == "Stack(11, 10)"
def test_len():
assert len(Stack(10, 11, 12, 13)) == 4
def test_eq():
assert Stack(10, 11, 12, 13) == Stack(10, 11, 12, 13)
assert Stack() == Stack()
assert Stack() != Stack(10)
assert Stack(10, 11, 12, 13) != Stack(10, 11, 12)
assert Stack(10, 11, 12, 13) != Stack(11, 12, 13)
assert Stack() != []
assert Stack() is not None
def test_is_empty():
assert Stack(10, 11, 12).is_empty() is False
assert Stack(10).is_empty() is False
assert Stack().is_empty() is True
def test_push():
stack = Stack[int]()
stack.push(10)
stack.push(11)
stack.push(12)
assert stack == Stack(10, 11, 12)
def test_pop():
stack = Stack[int]()
with pytest.raises(IndexError):
stack.pop()
stack.push(10)
stack.push(11)
stack.push(12)
assert stack.pop() == 12
assert stack.pop() == 11
assert stack.pop() == 10
with pytest.raises(IndexError):
stack.pop()
with pytest.raises(IndexError):
stack.pop()
def test_peek():
stack = Stack[int]()
with pytest.raises(IndexError):
stack.peek()
stack.push(10)
stack.push(11)
stack.push(12)
assert stack.peek() == 12
assert stack.peek() == 12
def test_sorted():
assert Stack(10, 1, 5, 7).sorted() == Stack(1, 5, 7, 10)
assert Stack(10, 1, 7, 5, 7).sorted() == Stack(1, 5, 7, 7, 10)
assert Stack().sorted() == Stack()
assert Stack(5).sorted() == Stack(5)
assert Stack(5, 6).sorted() == Stack(5, 6)
assert Stack(6, 5).sorted() == Stack(5, 6)
def test_sorted_does_not_mutate():
stack = Stack(10, 1, 5, 7)
stack.sorted()
assert stack == Stack(10, 1, 5, 7)
|
#! /usr/bin/env python
import os
import sys
import pandas as pd
import datetime
import pandas_datareader.data as web
from pandas_datareader.data import Options
from yahoo_finance import Share
from fredapi import Fred
from openpyxl import load_workbook
from optparse import OptionParser
pretty = False
parser = OptionParser()
parser.add_option("-v", "--file", action="store_true", dest='pretty', default=False)
(opts, args) = parser.parse_args()
#######################
# EQUITIES INPUTS
wb_filename = 'pandastry.xlsx'
sIndex_large =[{'SP500': 'fred'}, {'DJIA': 'fred'}, {'NASDAQCOM':"fred"},{ 'NASDAQ100':'fred'}]
sIndex_mid= ['RU2000PR', 'RUMIDCAPTR']
sIndex_small = ['WILLSMLCAP']
sIndex_large_offset = 4
sIndex_mid_offset = 11
sIndex_small_offset = 19
volatility = ['WIV', 'STLFSI']
manufacturing = ['MNFCTRIRSA', 'MNFCTRSMSA', 'MNFCTRIMSA']
housing = ['ASPUS', 'MSPNHSUS', 'MSACSR', 'MNMFS', 'HSTFCM', 'HSTFC', 'HSTFFHAI', 'HSTFVAG', 'CSUSHPINSA']
fredKey = 'eb701cc93a5ec9bc22987c681c138b12'
########################
def print_x(msg=''):
if(msg=='') | opts.pretty == False:
return
else:
print(msg)
def dict_to_datapoints(lists=[]):
dpoints=[]
for d in lists:
for key in d:
vl = key
sc = d[key]
dp = datapoint(vl, sc)
dpoints.append(dp)
return(dpoints)
#########################
class wbook(object):
def __init__(self):
pass
class sheet(object):
def __init__(self):
pass
class component(object):
def __init__(self, dp=[]):
self.dpoints = dp
print(len(self.dpoints))
pass
class datapoint(object):
def __init__(self, symbol, source):
self.sym = symbol
self.source = source
if(self.source == 'fred'):
self.fred = Fred(fredKey)
self.fred_info(symbol)
def fred_info(self, symbol):
info = self.fred.get_series_info(symbol)
self.title = info.title
self.frequency = info.frequency_short.lower()
self.units = info.units_short.lower()
#########################
class bot(object):
def __init__(self):
self.fred = Fred(api_key=fredKey)
print("DailyUpdate\nv1.0\n...")
print_x("\nStock indicies:\nLarge-cap: ")
print_x(equity_large)
print_x("Stock indicies:\nMid-cap: ")
print_x(equity_mid)
print_x("Stock indicies:\nSmall-cap:")
print_x(equity_small)
print_x("\n\nFilenames:")
print_x("Stock indicies: " + equity_filename)
def fredDownload(self, keyList=[]):
#init dictionary array to hold series
df = {}
#create numPy series for each key in the given key list, add to array df
for key in keyList:
df[key] = self.fred.get_series(key, observation_start='2017-01-01', observation_end='2017-06-27')
#create a data frame from array df
df = pd.DataFrame(df)
#sort descending by date
sorted = df.sort_index(axis=0, ascending=False)
print(sorted)
return(sorted)
def addTitles(self, keyList=[]):
dicts = {}
def dataReader(self, service, tickerlist=[]):
start = datetime.datetime(2017,1,1)
end = datetime.datetime(2017,1,27)
df = web.DataReader(tickerlist, service, start, end)
return(df)
def largeCap(self):
print_x("Downloading data for LARGE CAP STOCK INDICIES.....")
df = self.dataReader('fred', equity_large)
print_x("...SUCCESS")
print_x("Downloading data for LARGE CAP STOCK INDICIES.....")
print_x('...')
print_x('Writing to excel.....')
self.excel_write_exist(equity_filename,df, 22, large_offset)
print_x("...SUCCESS")
return(df)
def midCap(self):
print_x("Downloading data for MID CAP STOCK INDICIES......")
df = self.dataReader('fred', equity_mid)
print_x("...SUCCESS")
print_x('Writing to excel.....')
self.excel_write_exist(equity_filename,df, 22, mid_offset)
print_x("...SUCCESS")
return(df)
def smallCap(self):
print_x("Downloading data for SMALL CAP STOCK INDICIES......")
df = self.dataReader('fred', equity_small)
print_x("...SUCCESS")
print_x('Writing to excel.....')
self.excel_write_exist(equity_filename,df, 22, small_offset)
print_x("...SUCCESS")
return(df)
def runSheets(self):
pass
def runComponent(self, datapoints=[]):
pass
def indicies(self):
input("Press ENTER to update indicies sheet")
print("\n\n\nDownloading data from indicies in indicies_list and extracting to excel")
self.largeCap()
self.midCap()
self.smallCap()
def excel_write_exist(self, filename, dFrame, row, col):
book = load_workbook(filename)
writer = pd.ExcelWriter(filename, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title,ws) for ws in book.worksheets)
df = dFrame
df.to_excel(writer, index=False, header=False, sheet_name='Sheet1', startrow=row, startcol=col)
writer.save()
return
def excel_write_new(self, dFrame, sheetName,row, col):
df = dFrame
writer = pd.ExcelWriter('pandas_simple.xlsx', engine= 'xlsxwriter')
df.to_excel(writer, sheet_name=sheetName, startrow=row, startcol=col)
writer.save()
def main():
#bot1= bot()
dps = dict_to_datapoints(sIndex_large)
comp = component(dps)
#nm = 'SP500'
#dp = datapoint('SP500', 'fred')
#print(dp.title)
#df = bot1.largeCap()
#filename='pandastry.xlsx'
#bot1.excel_write_exist(filename,df,22,4)
#input('\n\n\nPress enter to begin...')
#bot1.indicies()
#print("\n\n\noperation complete.")
'''
amzn = web.get_quote_yahoo('AMZN')
print(amzn['last'])
print(gdp)
book = load_workbook('pandastry.xlsx')
writer = pd.ExcelWriter('pandastry.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title,ws) for ws in book.worksheets)
gdp.to_excel(writer, index=False, header=False, sheet_name='Sheet1', startrow=22, startcol=4)
writer.save()
aapl = Options('aapl', 'yahoo')
data = aapl.get_all_data()
print(data.iloc[0:5, 0:5])
'''
if __name__ == "__main__":
main()
|
import sys
import numpy as np
from timeit import default_timer as timer
import matplotlib.pyplot as plt
from naive import Naive
from backtracking import Backtracking
from branch_and_bound import BranchAndBound
class Comparison:
def __init__(self):
self.MinIteration = 6
self.MaxIteration = 12
self.IterationOfSizes = [i for i in range(self.MinIteration, self.MaxIteration)]
self.CurrentMatrixSize = 0
self.BranchAndBoundTime = np.zeros(self.MaxIteration - self.MinIteration)
self.BacktrackingTime = np.zeros(self.MaxIteration - self.MinIteration)
self.NaiveTime = np.zeros(self.MaxIteration - self.MinIteration)
self.SymmetricalMatrix = []
def matrix_init(self, maxsize):
self.CurrentMatrixSize = maxsize
b = np.random.randint(low=1, high=101, size=(self.CurrentMatrixSize, self.CurrentMatrixSize))
self.SymmetricalMatrix = (b + b.T) / 2
np.fill_diagonal(self.SymmetricalMatrix, 0)
self.SymmetricalMatrix = self.SymmetricalMatrix.astype(int)
def bb_time(self, index, iter):
BBAlgorithm = BranchAndBound(index, self.SymmetricalMatrix)
start = timer()
BBAlgorithm.TSP()
end = timer()
self.BranchAndBoundTime[iter] = end - start
def backtrack_time (self, index, iter):
BacktrackingAlgorithm = Backtracking(index, self.SymmetricalMatrix)
start = timer()
BacktrackingAlgorithm.tsp(0, 1, 0)
end = timer()
self.BacktrackingTime[iter] = end - start
def naive_time(self, index, iter):
NaiveAlgorithm = Naive(index, self.SymmetricalMatrix)
start = timer()
NaiveAlgorithm.travellingSalesmanProblem()
end = timer()
self.NaiveTime[iter] = end - start
def create_plot(self):
plt.title("TSP Algorithm's Comparison")
plt.xlabel("Square Matrix Dimension")
plt.ylabel("Time")
plt.grid()
plt.plot(self.IterationOfSizes, self.BranchAndBoundTime, label="Branch and Bound")
plt.plot(self.IterationOfSizes, self.BacktrackingTime, label="BackTracking")
plt.plot(self.IterationOfSizes, self.NaiveTime, label="Naive")
plt.legend(loc='upper left')
plt.savefig("comparison_plt.png")
plt.show()
if __name__ == "__main__":
comparison = Comparison()
iteration = 0
for i in range(comparison.MinIteration, comparison.MaxIteration):
comparison.matrix_init(i)
comparison.bb_time(i, iteration)
comparison.backtrack_time(i, iteration)
comparison.naive_time(i, iteration)
iteration += 1
comparison.create_plot()
sys.exit()
|
def slices(series, length):
if not series:
raise ValueError('series cannot be empty')
elif length == 0:
raise ValueError('slice length cannot be zero')
elif length < 0:
raise ValueError('slice length cannot be negative')
elif len(series) < length:
raise ValueError('slice length cannot be greater than series length')
return [series[idx:idx + length] for idx in range(len(series) - length + 1)]
|
from picamera.array import PiYUVArray, PiRGBArray
from picamera import PiCamera
from scipy.signal import find_peaks, butter, filtfilt
from simple_pid import PID
from pwm import PWM
import ipywidgets as ipw
import time
import matplotlib.pyplot as plt
import skimage as ski
import numpy as np
import time
import threading
def setEngineSpeed(percent, pwm):
if percent < 0:
percent = 0
if percent > 1:
percent = 1
newSpeed = int(1000000 * percent + 1000000)
pwm.duty_cycle = newSpeed
def turn(percent, pwm):
if percent < -1:
percent = -1
if percent > 1:
percent = 1
if percent < 0:
pwm.duty_cycle = int(1450000 + 500000 * percent)
else:
pwm.duty_cycle = int(500000 * percent + 1450000)
# Set the speed to be a discounted rate of the maxSpeed
def getNewSpeed(turnPercent):
speed = 0.25 - (turnPercent * 1.8*0.25)
if(speed < 0.17):
speed = 0.17
return speed
# Setup and export pwm0 and pwm1
pwm0 = PWM(0)
pwm1 = PWM(1)
pwm0.export()
pwm1.export()
pwm0.period = 20000000
pwm1.period = 20000000
setEngineSpeed(0, pwm1)
turn(0, pwm0)
pwm0.enable = True
pwm1.enable = True
time.sleep(5)
# START OF COMPUTER VISION
res = (640,480)
b, a = butter(3, 0.007)
camera = PiCamera()
camera.sensor_mode = 6
camera.resolution = res
camera.framerate = 60
rawCapture = PiYUVArray(camera, size=res)
stream = camera.capture_continuous(rawCapture, format="yuv", use_video_port=True)
whiteCenterLine = 0 # Know where the center is
def func():
turning = 'n' # Indicate which direction it is turning
output = 0 # result of the PID
oldNumsPeaks = 1
finLineCounter = 0
pid = PID(0.1, 0, 0.1, setpoint=0) # PID used to follow the line
pid.sample_time = 0.016
pid.output_limits = (-1, 1)
pid.proportional_on_measurement = True
start_time = time.time()
for f in stream:
L = f.array[440, :, 0]
rawCapture.truncate(0)
Lf = filtfilt(b, a, L)
# Find peaks which are higher than 0.5
p = find_peaks(Lf, height=128)
num_peaks = len(p[0])
if num_peaks == 1:
whiteCenterLine = p[0][0]
elif num_peaks >= 2:
whiteCenterLine = (p[0][0] + p[0][len(p)-1]) / 2
finLineCounter += 1
elif (num_peaks == 0):
pass # REMEMBER LAST TURN
if num_peaks == 0:
# print("I AM NOT SEEING PEAKS")
if turning == 'l':
output = 1
elif turning == 'r':
output = -1
else:
percentOff = (whiteCenterLine - 320) / 240
#print("percentOff")
#print(percentOff)
if percentOff >= 0.5:
setEngineSpeed(0.16,pwm1)
output = pid(percentOff) # Get the output of the PID as a percent
#print("output")
#print(output)
if whiteCenterLine < 320:
turning = 'l'
else:
turning = 'r'
newSpeed = getNewSpeed(abs(output))
if newSpeed <= 0.25:
setEngineSpeed(newSpeed, pwm1)
else:
setEngineSpeed(0.25, pwm1)
#print("Output: ", output)
turn(output, pwm0) # Turn the car
oldNumPeaks = num_peaks
if not running.value:
running.value = True
break
if time.time() - start_time < 4:
pass
elif finLineCounter >= 2:
time.sleep(0.5) # Give time to cross the finish line (May need to be changed with testing)
break
if finLineCounter == 1:
if time.time() - start_time > 5:
time.sleep(0.70)
break
else:
finLineCounter -= 1
# Stop the motor
setEngineSpeed(0, pwm1)
# Reposition the servo to 0
turn(0, pwm0)
time.sleep(.1)
stream.close()
rawCapture.close()
camera.close()
pwm0.enable = False
pwm1.enable = False
t = threading.Thread(target=func)
t.start() |
#!/usr/bin/env python
"""
test that everything works
This should be superseeded by an eventual unittesting
"""
from latgas import lattice
from latgas.lattice import Lattice, Potential, System
sz = 30
def inter(r):
if r <= 1.0: return -4
if r <= 3.0: return 1
lattice = Lattice(sz, dim = 2)
potential = Potential(inter, 3.0, 20)
sys = System(lattice, potential)
sys.lattice.random(1)
sys.set_mu(8.0)
sys.set_T(20.0)
sys.N = sys.tot_population()
sys.E = sys.tot_energy()
sys.run(3000)
print "E = {0}, N = {1}".format(sys.E, sys.N)
|
'''
Author: Ben Joris
Created: October 24, 2019
Purpose: Extract country of origin info from file names and add it to tsv to import into anvio
'''
import glob # glob is a library to get lists of files on your system
countrydict={} # initialize empty dictionary that will be populated with the metadata
######
'''
- This for loop goes through all the full file paths of individual Anvio profiles in a certain directory
- These file paths contain the information on what cohort they belong to, which is the value of the dictionary
- Also added to the value is a number which can be used as a coarse way to order your samples
- Ordering of samples can be done on the metadata directly, but this gives more control
- The keys in the dictionary are the stripped file names
- {samplename:metadata order_vector}
- To generalize, create a dictionary of your sample name as a key and a tab separated list of the values you wish to add
'''
######
for file in glob.glob("/Volumes/data/gutDB/conjugative_operons/population_mapping/*/*/PROFILE.db"):
if "africa" in file:
countrydict[file.split("/")[7].split(".")[0].upper()]="West African Adult"+"\t"+"1"
if "south_america" in file:
countrydict[file.split("/")[7].split(".")[0].upper()]="South American Adult"+"\t"+"2"
if "asia" in file:
countrydict[file.split("/")[7].split(".")[0].upper()]="South East Asian Adult"+"\t"+"3"
if "north_america_firstnations" in file:
countrydict[file.split("/")[7].split(".")[0].upper()]="First Nations Adult"+"\t"+"4"
if "north_america/" in file:
countrydict[file.split("/")[7].split(".")[0].upper()]="North American Adult"+"\t"+"5"
if "north_america_infants" in file:
countrydict[file.split("/")[7].split(".")[0].upper()]="North American Infant"+"\t"+"6"
if "europe/" in file:
countrydict[file.split("/")[7].split(".")[0].upper()]="Western European Adult"+"\t"+"7"
if "europe_infants" in file:
countrydict[file.split("/")[7].split(".")[0].upper()]="Western European Infant"+"\t"+"8"
######
'''
- Opens original layer data file
- Takes original line, appends new data, and writes to new file in append mode
- This section depends on the file names (which are in your dictionary) being exact matches to what's in the layer data file
'''
######
with open("originalmisc.txt") as miscfh:
for line in miscfh:
if "layers" in line: # for the first line in the file, add the two new column names
with open("newmisc.txt","a") as newfh:
newfh.write(line.strip()+"\t"+"cohort"+ "\t"+ "order_vector""\n")
else: # adding the data to the samples
with open("newmisc.txt","a") as newfh:
newfh.write(line.strip()+"\t"+countrydict[line.split("\t")[0]]+"\n")
|
# -*- coding: utf-8 -*-
"""
Created on Thu May. 23. 2018
@author: MRChou
Code used for create submission file for models.
Used for the Avito Demand Prediction Challenge:
https://www.kaggle.com/c/avito-demand-prediction/
"""
import os
import pickle
import numpy
import pandas
import xlearn
import xgboost as xgb
from scipy.sparse import load_npz
from keras.models import load_model
from Avito_TrainScripts import feature_scaling
def ffm_submit(model, test, file_name='submission.txt'):
ffm_model = xlearn.create_ffm()
ffm_model.setTest(test)
ffm_model.predict(model, './' + file_name)
def xgb_submit(model, df_test, file_name='submission.csv'):
"""Create the submission file of the model on test data."""
xgb_test = xgb.DMatrix(
data=df_test.drop(['item_id', 'description'], axis=1)
)
df_test['deal_probability'] = model.predict(xgb_test)
# if prediction is < 0, let it be 0
df_test.loc[df_test['deal_probability'] < 0, 'deal_probability'] = 0
df_test.loc[df_test['deal_probability'] > 1, 'deal_probability'] = 1
df_test[['item_id', 'deal_probability']].to_csv(file_name, index=False)
return None
def lgbm_submit(model, df_test, file_name='submission.csv'):
"""Create the submission file of the model on test data."""
df_test['deal_probability'] = model.predict(
df_test.drop(['item_id', 'description'], axis=1))
# if prediction is < 0, let it be 0
df_test.loc[df_test['deal_probability'] < 0, 'deal_probability'] = 0
df_test.loc[df_test['deal_probability'] > 1, 'deal_probability'] = 1
df_test[['item_id', 'deal_probability']].to_csv(file_name, index=False)
return None
def lgbm_submmit_sparse(model,
sparse_mat,
item_id,
file_name='submission.csv'):
"""lgbm_submit with data in scipy sparse matrix."""
# make prediction
df_test = pandas.DataFrame({'deal_probability': model.predict(sparse_mat)})
# add item_id to df_test
df_test['item_id'] = item_id
# if prediction is < 0, let it be 0
df_test.loc[df_test['deal_probability'] < 0, 'deal_probability'] = 0
df_test.loc[df_test['deal_probability'] > 1, 'deal_probability'] = 1
df_test[['item_id', 'deal_probability']].to_csv(file_name, index=False)
return None
def nn_submit(model, df_test, file_name='submission.csv'):
model = load_model(model)
feature_scaling(df_test)
df_test['deal_probability'] = model.predict(
df_test.drop(['item_id', 'description'], axis=1)
)
# if prediction is < 0, let it be 0; if is > 1, let it be 1
df_test.loc[df_test['deal_probability'] < 0, 'deal_probability'] = 0
df_test.loc[df_test['deal_probability'] > 1, 'deal_probability'] = 1
df_test[['item_id', 'deal_probability']].to_csv(file_name, index=False)
return None
def script_ffm():
path = '/archive/Avito/data_preprocess/'
test = '/archive/Avito/data_preprocess/FFM_test.txt'
model = './av03_ffm_0607.txt'
ffm_submit(model, test)
with open(os.path.join(path, 'test.pickle'), 'rb') as f:
predict = numpy.loadtxt('./av03_ffm_0607_output.txt')
df_test = pickle.load(f)
df_test['deal_probability'] = predict
df_test[['item_id', 'deal_probability']].to_csv(
'av03_ffm_0607_submission.csv', index=False)
def script_xgb():
path = '/archive/Avito/data_preprocess/'
with open('/archive/Avito/models/av06_xgb_0611.pickle', 'rb') as fin1:
with open(os.path.join(path, 'F02_test.pickle'), 'rb') as fin2:
model = pickle.load(fin1)
print('Evaluation on vali: ', model.best_score)
df_test = pickle.load(fin2)
xgb_submit(model, df_test, file_name='av06_xgb_submission.csv')
def script_lgbm():
path = '/archive/Avito/data_preprocess/'
with open('/archive/Avito/models/av08_lgb-gbdt_0627.pickle', 'rb') as fin1:
with open(os.path.join(path, 'F02_test.pickle'), 'rb') as fin2:
model = pickle.load(fin1)
print('Evaluation on vali: ', model.best_score)
# df_test = pickle.load(fin2)
# lgbm_submit(model, df_test, file_name='av07_lgbm_submission.csv')
item_ids = pickle.load(fin2)['item_id'].values
mat_test = load_npz(os.path.join(path, 'F04_test.npz'))
lgbm_submmit_sparse(model,
mat_test,
item_ids,
file_name='av08_lgb-gbdt_submission.csv'
)
def script_nn():
path = '/archive/Avito/data_preprocess/'
with open(os.path.join(path, 'F01_test.pickle'), 'rb') as f:
model = '/home/mrchou/code/KaggleWidget/av01_nn.h5'
df_test = pickle.load(f)
nn_submit(model, df_test, file_name='av01_nn_submission.csv')
def blend_model():
path = '/archive/Avito/submissions/'
file_name = 'ensemble_publics_submission.csv'
model_av07 = pandas.read_csv(path + 'av07_lgb-gbdt_submission.csv')
model_public0 = pandas.read_csv(path + 'public0_02211.csv')
model_public1 = pandas.read_csv(path + 'public1_02212.csv')
model_public2 = pandas.read_csv(path + 'public2_02212.csv')
model_public3 = pandas.read_csv(path + 'public3_02237.csv')
model_public4 = pandas.read_csv(path + 'public4_02246.csv')
model_public5 = pandas.read_csv(path + 'public5_02211.csv')
models = [model_public0, model_public1, model_public2,
model_public3, model_public4, model_public5]
weights = [0.2211, 0.2212, 0.2212, 0.2237, 0.2246, 0.2211]
# initialize dataframe
result = pandas.DataFrame()
result['item_id'] = model_av07['item_id']
result['deal_probability'] = 0.
# normalize the weights
weights = [i/sum(weights) for i in weights]
for model, weight in zip(models, weights):
result['deal_probability'] += model['deal_probability']*weight
# if prediction is < 0, let it be 0
result.loc[result['deal_probability'] < 0, 'deal_probability'] = 0
result.loc[result['deal_probability'] > 1, 'deal_probability'] = 1
result[['item_id', 'deal_probability']].to_csv(file_name, index=False)
if __name__ == '__main__':
blend_model()
|
#!/usr/bin/env python3
erste_eingabe = input('Erster Summand: ')
zweite_eingabe = input('Zweiter Summand: ')
a = float(erste_eingabe)
b = float(zweite_eingabe)
s = a + b
print('Die Summe ist: ' + str(s))
for i in range(10):
|
# coding: utf-8
import os
import time
import tensorflow as tf
import matplotlib
from ml_utils import *
# global setting
vector_type = "2vectors" # 1vector
print("Processing {}".format(vector_type))
if vector_type == "1vector":
one_vector_flag = True
else:
one_vector_flag = False
######################################################################################################
# set model parameters
batch_size = 4000 # Batch size : 4000 debug ****
seq_len = 512 # word embedding length
learning_rate = 0.0001
lambda_loss_amount = 0.001
epochs = 200 # 20000 debug ****
n_classes = 2
if one_vector_flag:
n_channels = 2
else:
n_channels = 4
iterations_list = [10]
# iterations_list = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10] # debug ****
# , 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50
hier_list = ["procedure"] # "clinical_finding,"
negative_flags = ["hier", "area", "parea"] # []
c_list = [(x, y, z) for x in iterations_list for y in hier_list for z in negative_flags]
# global variables
# hier_name = "clinical_finding"
# hier_name = "procedure"
# directory_path = "/home/h/hl395/mlontology/SNO/"
directory_path = "SNO/" # debug ****
img_path = directory_path + "img/dilated/"
#############################################################################################################
for iterations, hier_name, negative_flag in c_list:
# for iterations in iterations_list:
print("Run training with hierarchy: {} iteration: {}".format(hier_name, iterations))
print("Testing with negative taxonomy {}".format(negative_flag))
vector_model_path = directory_path + "vectorModel/" + hier_name + "/" + str(iterations) + "/"
data_path = directory_path + "data/" + hier_name + "/"
# negative_flag = "area" #
if negative_flag == "hier":
cnn_model_path = directory_path + "cnnModel/hier/" + vector_type + "/" + str(iterations) + "/"
notPair_file = data_path + "taxNotPairs_sno_" + hier_name + "_hier.txt"
img_name = hier_name + "_hier_" + vector_type + "_iter_" + str(iterations) + "_"
elif negative_flag == "area":
cnn_model_path = directory_path + "cnnModel/area/" + vector_type + "/" + str(iterations) + "/"
notPair_file = data_path + "taxNotPairs_sno_" + hier_name + "_area.txt"
img_name = hier_name + "_area_" + vector_type + "_iter_" + str(iterations) + "_"
else:
cnn_model_path = directory_path + "cnnModel/parea/" + vector_type + "/" + str(iterations) + "/"
notPair_file = data_path + "taxNotPairs_sno_" + hier_name + "_parea.txt"
img_name = hier_name + "_parea_" + vector_type + "_iter_" + str(iterations) + "_"
positive_flag = "hier" #
if positive_flag == "hier":
pair_file = data_path + "taxPairs_sno_" + hier_name + "_hier.txt"
elif positive_flag == "area":
pair_file = data_path + "taxPairs_sno_" + hier_name + "_area.txt"
else:
pair_file = data_path + "taxPairs_sno_" + hier_name + "_parea.txt"
print("Positive training data from {}".format(positive_flag))
print("Negative training data from {}".format(negative_flag))
label_file_2017 = directory_path + "data/ontClassLabels_july2017.txt"
conceptLabelDict_2017, _ = read_label(label_file_2017)
label_file_2018 = directory_path + "data/ontClassLabels_jan2018.txt"
conceptLabelDict_2018, _ = read_label(label_file_2018)
# read positive samples
conceptPairList, _ = read_pair(pair_file)
checkpairs = conceptPairList[10:15]
print(checkpairs)
print("number of pairs: ", len(conceptPairList))
# read negative samples
conceptNotPairList, _ = read_not_pair(notPair_file)
checkNonpairs = conceptNotPairList[10:15]
print(checkNonpairs)
print("number of not pairs: ", len(conceptNotPairList))
# remove duplicates
print("remove duplicates")
conceptPairList = remove_duplicates(conceptPairList)
print("After remove duplicates in linked pairs: ")
print(len(conceptPairList))
conceptNotPairList = remove_duplicates(conceptNotPairList)
print("After remove duplicates in not linked pairs: ")
print(len(conceptNotPairList))
# leave out 2000 positive and 2000 negative samples for testing
conceptPairList, conceptNotPairList, testing_pair_lists = leave_for_testing(conceptPairList, conceptNotPairList,
4000)
conceptPairList, conceptNotPairList = sampling_data(conceptPairList, conceptNotPairList)
# PV-DBOW
vector_model_file_0 = vector_model_path + "model0"
pvdbow_model = load_vector_model(vector_model_file_0)
# PV-DM seems better??
vector_model_file_1 = vector_model_path + "model1"
pvdm_model = load_vector_model(vector_model_file_1)
# read both negative and positive pairs into pairs list and label list
idpairs_list, label_list = readFromPairList(conceptPairList, conceptNotPairList)
print(label_list[:20])
# split samples into training and validation set
from sklearn.model_selection import train_test_split
X_train, X_validation, y_train, y_validation = train_test_split(idpairs_list, label_list, test_size=0.3,
shuffle=True)
print(X_train[:20])
print(X_validation[:20])
print(y_train[:20])
print(y_validation[:20])
# build the model
graph = tf.Graph()
# Construct placeholders
with graph.as_default():
inputs_ = tf.placeholder(tf.float32, [None, seq_len, n_channels], name='inputs')
labels_ = tf.placeholder(tf.float32, [None, n_classes], name='labels')
keep_prob_ = tf.placeholder(tf.float32, name='keep')
learning_rate_ = tf.placeholder(tf.float32, name='learning_rate')
with graph.as_default():
# (batch, 512, 4) --> (batch, 256, 18)
conv1 = tf.layers.conv1d(inputs=inputs_, filters=32, kernel_size=15, strides=1,
padding='same', activation=tf.nn.leaky_relu, dilation_rate=1)
max_pool_1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2, padding='same')
# (batch, 256, 18) --> (batch, 128, 36)
conv2 = tf.layers.conv1d(inputs=max_pool_1, filters=32, kernel_size=10, strides=1,
padding='same', activation=tf.nn.leaky_relu, dilation_rate=2)
max_pool_2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2, padding='same')
# (batch, 128, 36) --> (batch, 64, 72)
conv3 = tf.layers.conv1d(inputs=max_pool_2, filters=64, kernel_size=10, strides=1,
padding='same', activation=tf.nn.leaky_relu, dilation_rate=2)
max_pool_3 = tf.layers.max_pooling1d(inputs=conv3, pool_size=2, strides=2, padding='same')
# (batch, 64, 72) --> (batch, 32, 144)
conv4 = tf.layers.conv1d(inputs=max_pool_3, filters=64, kernel_size=10, strides=1,
padding='same', activation=tf.nn.leaky_relu, dilation_rate=2)
max_pool_4 = tf.layers.max_pooling1d(inputs=conv4, pool_size=2, strides=2, padding='same')
# (batch, 32, 144) --> (batch, 16, 144) # 288
conv5 = tf.layers.conv1d(inputs=max_pool_4, filters=64, kernel_size=5, strides=1,
padding='same', activation=tf.nn.leaky_relu, dilation_rate=3)
max_pool_5 = tf.layers.max_pooling1d(inputs=conv5, pool_size=2, strides=2, padding='same')
# (batch, 16, 144) --> (batch, 8, 144) #576
conv6 = tf.layers.conv1d(inputs=max_pool_5, filters=64, kernel_size=5, strides=1,
padding='same', activation=tf.nn.leaky_relu, dilation_rate=3)
max_pool_6 = tf.layers.max_pooling1d(inputs=conv6, pool_size=2, strides=2, padding='same')
with graph.as_default():
# Flatten and add dropout
flat = tf.reshape(max_pool_6, (-1, 8 * 64))
flat = tf.layers.dense(flat, 200)
flat = tf.nn.dropout(flat, keep_prob=keep_prob_)
# Predictions
logits = tf.layers.dense(flat, n_classes, name='logits')
logits_identity = tf.identity(input=logits, name="logits_identity")
predict = tf.argmax(logits, 1, name="predict") # the predicted class
predict_identity = tf.identity(input=predict, name="predict_identity")
probability = tf.nn.softmax(logits, name="probability")
probability_identity = tf.identity(input=probability, name="probability_identity")
# L2 loss prevents this overkill neural network to overfit the data
l2 = lambda_loss_amount * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Cost function and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_)) + l2
optimizer = tf.train.AdamOptimizer(learning_rate_).minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels_, 1), name='correct_pred')
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
# In[17]:
if (os.path.exists(cnn_model_path) == False):
os.makedirs(cnn_model_path)
# In[ ]:
validation_acc = []
validation_loss = []
train_acc = []
train_loss = []
# Best validation accuracy seen so far.
best_validation_accuracy = 0.0
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 0
training_start_time = time.time()
# Loop over epochs
for e in range(epochs):
# Loop over batches
for x, y in get_batches(X_train, y_train, one_vector_flag=one_vector_flag, conceptLabelDict=conceptLabelDict_2017, pvdm_model=pvdm_model, pvdbow_model=pvdbow_model, batch_size=batch_size):
# Feed dictionary
feed = {inputs_: x, labels_: y, keep_prob_: 0.5, learning_rate_: learning_rate}
# Loss
loss, _, acc = sess.run([cost, optimizer, accuracy], feed_dict=feed)
train_acc.append(acc)
train_loss.append(loss)
# Print at each 50 iters
if (iteration % 50 == 0):
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {:d}".format(iteration),
"Train loss: {:6f}".format(loss),
"Train acc: {:.6f}".format(acc)
)
# Compute validation loss at every 100 iterations
if (iteration % 100 == 0):
val_acc_ = []
val_loss_ = []
for x_v, y_v in get_batches(X_validation, y_validation, one_vector_flag=one_vector_flag, conceptLabelDict= conceptLabelDict_2017, pvdbow_model=pvdbow_model, pvdm_model=pvdm_model, batch_size= batch_size):
# Feed
feed = {inputs_: x_v, labels_: y_v, keep_prob_: 1}
# Loss
loss_v, acc_v = sess.run([cost, accuracy], feed_dict=feed)
val_acc_.append(acc_v)
val_loss_.append(loss_v)
# If validation accuracy is an improvement over best-known.
acc_validation = np.mean(val_acc_)
if acc_validation > best_validation_accuracy:
# Update the best-known validation accuracy.
best_validation_accuracy = acc_validation
# Save all variables of the TensorFlow graph to file.
saver.save(sess=sess, save_path=cnn_model_path + "har.ckpt")
# A string to be printed below, shows improvement found.
improved_str = '*'
else:
# An empty string to be printed below.
# Shows that no improvement was found.
improved_str = ''
# Print info
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {:d}".format(iteration),
"Validation loss: {:6f}".format(np.mean(val_loss_)),
"Validation acc: {:.6f}".format(np.mean(val_acc_)),
"{}".format(improved_str*3))
# Store
validation_acc.append(np.mean(val_acc_))
validation_loss.append(np.mean(val_loss_))
# Iterate
iteration += 1
training_duration = time.time() - training_start_time
print("Total training time: {}".format(training_duration))
# saver.save(sess, cnn_model_path + "har.ckpt")
# In[ ]:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Plot training and test loss
t = np.arange(iteration)
plt.figure(figsize=(8, 6))
plt.plot(t, np.array(train_loss), 'r-', t[t % 100 == 0][:len(validation_loss)], np.array(validation_loss), 'b*')
plt.xlabel("Iteration")
plt.ylabel("Loss")
plt.legend(['train', 'validation'], loc='upper right')
plt.savefig(img_path + img_name + 'loss.png')
plt.show()
# In[ ]:
# Plot Accuracies
plt.figure(figsize=(8, 6))
plt.plot(t, np.array(train_acc), 'r-', t[t % 100 == 0][:len(validation_acc)], validation_acc, 'b*')
plt.xlabel("Iteration")
plt.ylabel("Accuracy")
# plt.ylim(0.4, 1.0)
plt.legend(['train', 'validation'], loc='lower right')
plt.savefig(img_path + img_name + 'accuracy.png')
plt.show()
# In[ ]:
print("result for testing leave-out samples: ")
idpairs_list, label_list = readFromPairList([], testing_pair_lists)
test_rest_acc = []
batch_size = 4000
n_classes = 2
with tf.Session(graph=graph) as sess:
# Restore
saver.restore(sess, tf.train.latest_checkpoint(cnn_model_path))
test_iteration = 1
for x_t, y_t in get_batches(idpairs_list, label_list, one_vector_flag=one_vector_flag,
conceptLabelDict=conceptLabelDict_2017, random_flag=False, pvdm_model=pvdm_model,
pvdbow_model=pvdbow_model, batch_size=batch_size):
feed = {inputs_: x_t,
labels_: y_t,
keep_prob_: 1}
batch_acc = sess.run(accuracy, feed_dict=feed)
test_rest_acc.append(batch_acc)
label_pred = sess.run(tf.argmax(logits, 1), feed_dict=feed)
pred_prob = sess.run(tf.nn.softmax(logits), feed_dict=feed)
test_iteration += 1
print("Test accuracy: {:.6f}".format(np.mean(test_rest_acc)))
# Now we're going to assess the quality of the neural net using ROC curve and AUC
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
# send the actual dependent variable classifications for param 1,
# and the confidences of the true classification for param 2.
FPR, TPR, _ = roc_curve(label_list, pred_prob[:, 1])
# Calculate the area under the confidence ROC curve.
# This area is equated with the probability that the classifier will rank
# a randomly selected defaulter higher than a randomly selected non-defaulter.
AUC = auc(FPR, TPR)
# What is "good" can dependm but an AUC of 0.7+ is generally regarded as good,
# and 0.8+ is generally regarded as being excellent
print("AUC is {}".format(AUC))
# Now we'll plot the confidence ROC curve
plt.figure()
plt.plot(FPR, TPR, label='ROC curve (area = %0.2f)' % AUC)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.02])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.savefig(img_path + img_name + 'roc.png')
plt.show()
from sklearn.metrics import classification_report
print(classification_report(label_list, label_pred))
plot_confusion_matrix(label_list, label_pred, img_path=img_path, img_name=img_name)
# test new data
print('\n\nTesting with new data')
import json
from pprint import pprint
jsonFile = data_path + "2018" + hier_name + "_newconcepts_2.json"
test_data = json.load(open(jsonFile))
parents_dict = processConceptParents(test_data)
uncles_dict = processConceptUncles(test_data)
existing_parents_dict = find_concepts_with_existing_parents(parents_dict, conceptLabelDict_2017)
existing_uncles_dict = find_concepts_with_existing_uncles(uncles_dict, conceptLabelDict_2017)
positive_lists, negative_lists = prepare_samples_for_concepts_with_multiple_existing_parents_and_uncles(existing_parents_dict, existing_uncles_dict)
# remove duplicates
positive_lists = remove_duplicates(positive_lists)
print("remove duplicates in positive test data, the number is: {}".format(len(positive_lists)))
# remove duplicates
negative_lists = remove_duplicates(negative_lists)
print("remove duplicates in negative test data, the number is: {}".format(len(negative_lists)))
# sampling data otherwise the negative testing sample is too huge
# positive_lists, negative_lists = sampling_data(positive_lists, negative_lists)
data_list, label_list, borrowed_list = read_samples_into_data_label_borrowed(positive_lists, negative_lists)
test_rest_acc = []
true_label_list = []
predicted_label_list = []
predicted_prob = []
batch_size = 4000
with tf.Session(graph=graph) as sess:
# Restore
saver.restore(sess, tf.train.latest_checkpoint(cnn_model_path))
iteration = 0
for x_t, y_t in sno_get_batches_for_testing_new(data_list, label_list, one_vector_flag=one_vector_flag,
conceptLabelDict=conceptLabelDict_2018, pvdbow_model=pvdbow_model,
pvdm_model=pvdm_model, batch_size=batch_size,
random_flag=False, borrowed_list=borrowed_list):
feed = {inputs_: x_t,
labels_: y_t,
keep_prob_: 1}
# true_label_list.extend(label_list)
batch_acc = sess.run(accuracy, feed_dict=feed)
test_rest_acc.append(batch_acc)
# label_pred = sess.run(tf.argmax(logits, 1), feed_dict=feed)
label_pred = sess.run(predict, feed_dict=feed)
predicted_label_list.extend(label_pred)
# pred_prob = sess.run(tf.nn.softmax(logits), feed_dict=feed)
pred_prob = sess.run(probability, feed_dict=feed)
predicted_prob.extend(pred_prob[:, 1])
# print("\t\t Predict: ", label_pred)
# print("\t\t True label: ", label_list)
# Print at each 50 iters
if (iteration % 5 == 0):
print("Iteration: {:d}".format(iteration),
"batch acc: {:.6f}".format(batch_acc)
)
iteration += 1
print("Test accuracy: {:.6f}".format(np.mean(test_rest_acc)))
from sklearn.metrics import classification_report
true_label_list = label_list
print(classification_report(true_label_list, predicted_label_list))
# Now we're going to assess the quality of the neural net using ROC curve and AUC
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
# send the actual dependent variable classifications for param 1,
# and the confidences of the true classification for param 2.
FPR, TPR, _ = roc_curve(true_label_list, predicted_prob)
# Calculate the area under the confidence ROC curve.
# This area is equated with the probability that the classifier will rank
# a randomly selected defaulter higher than a randomly selected non-defaulter.
AUC = auc(FPR, TPR)
# What is "good" can dependm but an AUC of 0.7+ is generally regarded as good,
# and 0.8+ is generally regarded as being excellent
print("AUC is {}".format(AUC))
# Now we'll plot the confidence ROC curve
plt.figure()
plt.plot(FPR, TPR, label='ROC curve (area = %0.2f)' % AUC)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.02])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.savefig(img_path + img_name + 'roc2.png')
plt.show()
negative_sample_error_list = []
for i, x in enumerate(true_label_list):
if x != predicted_label_list[i] and x == 0:
negative_sample_error_list.append(data_list[i])
print(len(negative_sample_error_list))
def get_label_from_id(id):
if id in conceptLabelDict_2017:
return conceptLabelDict_2017[id]
elif id in conceptLabelDict_2018:
return conceptLabelDict_2018[id]
else:
print("{} not exists in dictionary".format(id))
# for batch_sample in negative_sample_error_list:
# print("{} : {} ".format(batch_sample[0], batch_sample[1]))
# print("{} -> {} ".format(get_label_from_id(batch_sample[0]), get_label_from_id(batch_sample[1])))
sess.close()
tf.reset_default_graph()
print("One single test done \n\n")
print("testing done") |
#Código para verificação da aprovação de um aluno usando função
nota1 = 7.5
nota2 = 4.8
notas = [nota1, nota2]
#Definimos aqui o que é a função verificar uma aprovação
def verificar_aprovacao() :
media = calcular_media(nota1, nota2)
if media >= 6:
print("O aluno foi aprovado!")
else:
print("O aluno foi reprovado!")
#definimos aqui a função calcular a média
def calcular_media(nota1, nota2):
quantidade = len(notas)
soma = 0
for nota in notas:
soma = soma + nota
media = soma / quantidade
return media
# Agora basta chamar a função verificar aprovação
verificar_aprovacao()
|
# -*- coding:utf-8 -*-
# s = 10
# number = ()
# print(number == None)
# for n in number:
# print(s * n)
# a = ' '
# print(a == None)
# print(len(a))
#
# b = 'abcd'
# print(b[0:])
# for n, value in enumerate(b):
# print(n, value)
# print(5, value)
# def findMinAndMax(L):
# min = max = 0
# for x in L:
# max = max > x and max or x
# min = min < x and x or min
# return (min or None, max or None)
# print(findMinAndMax([0, 1]))
# https://www.zhihu.com/question/20152384
# 尽管方法错了,但是依旧值得学习其中的想法
# min = max = 0
# x = 10
# a = False or 10
# b = False or False
# print (a, b)
# c = 2 and 1 or 3 and 4
# print (c)
# d = 1 or 2
# print (d)
# L = [1, 2, 3]
# l = 'aaa'
# print(l + 'str')
# K = list(L)
# print(K)
# print(K == L)
# for i in range(5):
# print(i)
# from enum import Enum
# Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
a = 1
# def aaa():
# print(a)
#
# aaa()
|
#!/usr/bin/env python3
import os.path
import subprocess
from base64 import b64encode
def _write_secret(name, fpath):
with open(fpath, 'rt') as f:
secret_data = f.read()
secret_data_b64 = b64encode(secret_data.encode()).decode()
secret_yaml = '\n {}: {}'.format(
os.path.basename(fpath), secret_data_b64
)
with open('kubernetes/{}-secret.yaml'.format(name), 'w') as f:
f.write("""\
apiVersion: v1
kind: Secret
metadata:
name: {}
type: Opaque
data: {}
""".format(name, secret_yaml))
def _main():
for i in range(1, 3):
command_str_tpl = r'source authority/.env.authority{0} && ' \
'envsubst \$AUTHORITY_NAME,\$NFS_SERVER,\$NFS_PATH,\$NETWORK_ID,' \
'\$BOOT_NODE_ID,\$AUTHORITY_ADDRESS < ' \
'authority/authority.template.yaml > kubernetes/authority{0}.yaml'
command_str = command_str_tpl.format(i)
subprocess.check_call(command_str, shell=True)
_write_secret('authority1-password', 'authority1-password.txt')
_write_secret('authority2-password', 'authority2-password.txt')
_write_secret('genesis', 'genesis.json')
_write_secret('boot', 'boot.key')
_write_secret(
'authority1-keystore',
'authority1/keystore/UTC--2018-05-07T22-54-50.644182705Z--'
'e236d5c0d5ea75d866c56b9966a9b9f35bdb3ad0'
)
_write_secret(
'authority2-keystore',
'authority2/keystore/UTC--2018-05-07T22-58-06.079818290Z--'
'8084b0f2cc92c2672db34b6df9656d2889dfa85e'
)
_main()
|
from .zc_evaluator import ZeroCostPredictorEvaluator
from .full_evaluation import full_evaluate_predictor |
# -*- coding: utf-8 -*-
""" Polynomial Time SEMI Algorithm
This algorithm allows the computation of the central nodes of a network
with a polynomial time complexity, differently from the classic exponential version.
"""
import numpy as np
from ALGORITHM.TOOLS.mathematical_tools import fast_binomial
def shapley_beta_function(game):
""" Shapley Beta Function Application
Method that gives the beta function result of the Shapley Indices.
Args:
game (Game): the game characterized by a matrix,
a characteristic function and the input parameters for the SEMI algorithm.
Returns:
int: the beta function of the game with cardinality k.
"""
return 1 / len(game.matrix)
def semi_algorithm(game, centrality_measure_choice):
""" Algorithm Application
Method that applies the algorithm with different important part:
- Definition of the centrality measure within the characteristic function.
- Calculation of the marginal contribution:
- MC1: first marginal contribution part, based on positive and neutral relation
- MC2: second marginal contribution part, based on positive and negative relation
- MC3: first marginal contribution part, based on
the sum of positive and neutral relation
- Calculation of the weighted general marginal contribution
- Update of the Shapley Values
Args:
game (Game): the game characterized by a matrix,
a characteristic function and the input parameters for the SEMI algorithm.
centrality_measure_choice (string): the centrality measure chosen by the user.
Returns:
no return is needed.
"""
# Initialization
# Shapley Value vector has size equals to the number of nodes.
shapley_value = np.zeros(game.length)
# For each node considered in order to find its shapley value
for evaluated_node in range(0, game.length):
# For each possible coalition size, starting from the empty and going to |V| - 1
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("NODE EVALUATED: ", evaluated_node)
for k in range(0, game.length):
# Initialize each time the marginal contribution of that node to 0
marginal_contribution = 0
# For each l possible value that that the partition of set can assume
print("\tK: ", k)
for l_cardinality in range(1, max(game.item) + 1):
print("\t\tL: ", l_cardinality)
# If there is not an item belonging to the partition of size l,
# just continue the cycle
# avoiding all operations for that l and jumping to the l + 1 partition cardinality
if game.item_class[l_cardinality - 1] == 0:
print("\t\t\tNO CORRESPONDENCE")
continue
# Otherwise, continue with the marginal contribution computation
# MC1 - FIRST PART COMPUTATION
# ***************************************************
# ***[R(v, teta) and N(C, teta)]***
# |N#k^(-1)(Teta_l)| <- cN_G(teta in Teta_l, k);
# MC[1] <- g(k+1) * f(Teta_l) * |N#k^(-1)(Teta_l)|
# MCk <- MCk + |R_Teta_l({v})| * MC[1]
# ***************************************************
# Definition of f and g parameters
(centrality_measure_f_parameter, centrality_measure_g_of_k_plus_1_parameter) = \
game.characteristic_function.centrality_measure(l_cardinality,
k + 1,
centrality_measure_choice)
# Definition of the set of coalitions of size k
# to which item ϑ is neutrally related
neutral_contribution = game.neutral[game.item_class[l_cardinality - 1] - 1][k]
print("\t\t\t#1 - SET OF COALITIONS OF SIZE K TO WHICH TETA IS NEUTRALLY RELATED: ",
neutral_contribution)
print("\t\t\t#1 - f CONTRIBUTION: ", centrality_measure_f_parameter)
print("\t\t\t#1 - g CONTRIBUTION: ", centrality_measure_g_of_k_plus_1_parameter)
# Definition of the first type marginal contribution, by the product of
# the f and g parameters and the neutral matrix contribution
marginal_contribution_first_type = \
centrality_measure_f_parameter * \
centrality_measure_g_of_k_plus_1_parameter * \
neutral_contribution
print("\t\t\t#1 - FIRST MARGINAL CONTRIBUTION: ", marginal_contribution_first_type)
print("\t\t\t#1 - SET OF ITEMS IN GROUP TETA_L THAT IS POSITIVELY RELATED TO NODE v: ",
game.positive_relation[evaluated_node][l_cardinality - 1])
print("\t\t\t#1 - OLD GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
# First type marginal contribution addition to the general marginal contribution,
# weighted by the value that the set of items in group Teta_l
# is positively related to C has
marginal_contribution = \
marginal_contribution + \
game.positive_relation[evaluated_node][l_cardinality - 1] * \
marginal_contribution_first_type
print("\t\t\t#1 - NEW GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
print("\t\t\t----------")
# MC2 - SECOND COMPUTATION
# ***************************************************
# ***[~R(v, teta) and R(C, teta)]***
# |R#k^(-1)(Teta_l)| ← cR_G(teta in Teta_l, k);
# MC[2] ← g(k) * f(Teta_l) * |R#k^(-1)(Teta_l)|
# MCk ← MCk + |-R_Teta_l({v})| * MC[2]
# ***************************************************
(centrality_measure_f_parameter, centrality_measure_g_of_k_parameter) = \
game.characteristic_function.centrality_measure(l_cardinality,
k,
centrality_measure_choice)
# Definition of the set of coalitions of size k
# to which item _teta is positively related
positive_contribution = game.positive[game.item_class[l_cardinality - 1] - 1][k]
print("\t\t\t#2 - SET OF COALITIONS OF SIZE K TO WHICH TETA IS POSITIVELY RELATED: ",
positive_contribution)
print("\t\t\t#2 - f CONTRIBUTION: ", centrality_measure_f_parameter)
print("\t\t\t#2 - g CONTRIBUTION: ", centrality_measure_g_of_k_parameter)
# Definition of the second type marginal contribution, by the product of
# the f and g parameters and the positive matrix contribution
marginal_contribution_second_type = \
centrality_measure_f_parameter * \
centrality_measure_g_of_k_parameter * \
positive_contribution
print("\t\t\t#2 - SECOND MARGINAL CONTRIBUTION: ", marginal_contribution_second_type)
print("\t\t\t#2 - SET OF ITEMS IN GROUP TETA_L THAT IS NEGATIVELY RELATED TO NODE v: ",
game.negative_relation[evaluated_node][l_cardinality - 1])
print("\t\t\t#2 - OLD GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
# Second type marginal contribution subtraction
# to the general marginal contribution,
# weighted by the value that the set of items in group Teta_l
# is negatively related to C has
marginal_contribution = \
marginal_contribution - \
game.negative_relation[evaluated_node][l_cardinality - 1] * \
marginal_contribution_second_type
print("\t\t\t#2 - NEW GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
print("\t\t\t----------")
# MC3 - THIRD COMPUTATION
# ***************************************************
# ***[R(v, teta) or N(v, teta), and R(C, teta)]***
# MC[3] <- (g(k+1) - g(k)) * f(Teta_l) * |R#k^(-1)(Teta_l)|
# MCk <- MCk + |R_Teta_l({v}) ∪ N_Teta_l({v})| * MC[3]
# ***************************************************
print("\t\t\t#3 - SET OF COALITIONS OF SIZE K TO WHICH TETA IS POSITIVELY RELATED: ",
positive_contribution)
print("\t\t\t#3 - f CONTRIBUTION: ", centrality_measure_f_parameter)
print("\t\t\t#3 - g CONTRIBUTION: ",
centrality_measure_g_of_k_plus_1_parameter - centrality_measure_g_of_k_parameter)
# Definition of the third type marginal contribution, by the product of
# the f parameter, the difference between the g parameter for k + 1 and k,
# and the positive matrix contribution
marginal_contribution_third_type = \
centrality_measure_f_parameter * \
(centrality_measure_g_of_k_plus_1_parameter -
centrality_measure_g_of_k_parameter) * \
positive_contribution
print("\t\t\t#3 - THIRD MARGINAL CONTRIBUTION: ", marginal_contribution_third_type)
print("\t\t\t#3 - SET OF ITEMS IN GROUP TETA_L THAT IS POSITIVELY AND NEUTRALLY RELATED TO NODE v: ",
game.positive_relation[evaluated_node][l_cardinality - 1] +
game.neutral_relation[evaluated_node][l_cardinality - 1])
print("\t\t\t#3 - OLD GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
# Third type marginal contribution addiction to the general marginal contribution,
# weighted by the value that the set of items in group Teta_l
# is negatively related to C has summed to the value that the set of items
# in group Teta_l is positively related to C
marginal_contribution = \
marginal_contribution + \
(game.positive_relation[evaluated_node][l_cardinality - 1] +
game.neutral_relation[evaluated_node][l_cardinality - 1]) * \
marginal_contribution_third_type
print("\t\t\t#3 - NEW GENERAL MARGINAL CONTRIBUTION: ", marginal_contribution)
# MC - END STEP
# ***************************************************
# MCk <- (Beta(k) / (|V|−1 k)) * MCk
# phi_v <- phi_v + MCk
# ***************************************************
# Final computation of the marginal contribution as the value previously calculated
# weighted by the division between the Shapley Beta function and the Newton binomial
# coefficient of |V| - 1 and k
print("\t\tEND STEP")
print("\t\tWEIGHT FUNCTION: ", (shapley_beta_function(game) /
fast_binomial(game.length - 1, k)))
marginal_contribution *= (shapley_beta_function(game) /
fast_binomial(game.length - 1, k))
print("\t\tWEIGHTED MARGINAL CONTRIBUTION: ", marginal_contribution)
# Update of the Shapley Value of the node evaluated with the sum of the previous value
# and the weighted marginal contribution
shapley_value[evaluated_node] += marginal_contribution
print("\t\tSHAPLEY VALUES:\n\t\t", shapley_value)
print("SHAPLEY VALUES SUM: ", np.sum(shapley_value))
|
# @tokoroten-lab 氏のコードを参考にしています
# https://qiita.com/tokoroten-lab/items/bb27351b393f087650a9
import numpy as np
import matplotlib.pyplot as plt
import pickle
import time
import socket
import sys
import glob
import time
# plt.rcParams['xtick.direction'] = 'in'
# plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['axes.grid'] = True
IP_ADDR = '192.168.1.2'
# IP_ADDR = '127.0.0.1'
PACKET_HEADER_SIZE = 8
save_date = time.strftime("%Y%m%d", time.localtime())
SAVE_FILE_BASE_NAME = "./data/recv_buff_v3_"
SAVE_FILE_EXTENTION = ".bin"
# SAVE_FIG_EXTENTION = ".pkl"
def data_recv(sock):
buff = bytes()
while True:
try:
data = sock.recv(int(20E6))
if(len(buff)==0):
start = time.perf_counter()
print("rx start")
if not data:
end = time.perf_counter()
print("rx end. socket is closed by client")
break
buff += data
except KeyboardInterrupt:
print('interrupted!')
print("{0:d} byte remains in buffer".format(len(buff)))
break
return buff, start, end
if __name__ == "__main__":
recv_buff = bytes()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((IP_ADDR, 5001))
s.listen(1)
print("Waiting connection from dummy FEE")
conn, addr = s.accept()
with conn:
try:
print("Connection from ", addr)
recv_buff, recv_start, recv_end = data_recv(conn)
finally:
print("socket close ", addr)
s.close()
print("Receive size:{0:.2f}MBytes, Speed:{1:.2f} Mbps".format(len(recv_buff)*1E-6, len(recv_buff)/(recv_end-recv_start)*8*1E-6))
file_number = 0
while file_number < 100:
save_file_name = SAVE_FILE_BASE_NAME+save_date+"_{0:02d}".format(file_number)+SAVE_FILE_EXTENTION
if (glob.glob(save_file_name)==[]):
break
else:
file_number += 1
with open(save_file_name, "wb") as fout:
fout.write(recv_buff)
# x = np.array([])
# delta_arry = np.array([])
# y = np.array([])
# for i, ele in enumerate(recv_info):
# rela_stime = ele[0]-start_time
# rela_etime = ele[1]-start_time
# delta = ele[1]-ele[0]
# byte_size = ele[2]
# x = np.append(x, [rela_etime])
# delta_arry = np.append(delta_arry, [delta])
# y = np.append(y, [byte_size])
# if (delta>0):
# Mbps_speed = (ele[2]*8/rela_etime)*1E-6
# else:
# Mbps_speed = -1
# print("Loop No.:{0:4d} | Start:{1:10.5f} sec | End:{2:10.5f} sec | Delta:{3:15.5f} msec | Size:{4:3.2f} MByte | Speed:{5:5.2f} Mbps".format(i, rela_stime, rela_etime, delta*1E3, byte_size*1E-6, Mbps_speed))
# valid_data_index = np.where((delta_arry*1E3)<5000)
# valid_x = x[valid_data_index]
# valid_y = y[valid_data_index]
# MEAN_NUM = 10
# smooth_ary = np.ones(MEAN_NUM)/MEAN_NUM
# smoothed_valid_y = np.convolve(valid_y, smooth_ary, mode='valid')
# smoothed_valid_x = np.convolve(valid_x, smooth_ary, mode='valid')
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# ax1.set_title("Receiving speed time variation")
# ax1.set_xlabel("Time [msec]")
# ax1.set_ylabel("Buffer size(Accumulation) [MByte]")
# ax2.set_ylabel("Speed [Mbps]")
# ax2.plot(smoothed_valid_x*1E3, np.gradient(smoothed_valid_y, smoothed_valid_x)*8*1E-6, label="Speed", color='darkorange', marker='.', markersize=8, zorder=0)
# ax1.plot(valid_x*1E3, valid_y*1E-6, label="Buffer size", color='steelblue', marker='.', markersize=8, zorder=10)
# handler1, label1 = ax1.get_legend_handles_labels()
# handler2, label2 = ax2.get_legend_handles_labels()
# ax1.legend(handler1 + handler2, label1 + label2, loc='lower right')
# save_fig_name = SAVE_FILE_BASE_NAME+save_date+"_{0:02d}".format(file_number)+SAVE_FIG_EXTENTION
# with open(save_fig_name, mode='wb') as figout:
# pickle.dump(fig, figout)
plt.show() |
def myfunc(firstname, lastname):
print('hello "%s %s", how are you?' % (firstname, lastname))
myfunc('Tony', 'Starks') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.