id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
88564
|
import os
import wandb
import torch
import torchvision
from config import set_params
from kws.utils import set_random_seed, transforms
from kws.utils.data import SpeechCommandsDataset, load_data, split_data
from kws.model import treasure_net
from kws.train import train
def main():
# set parameters and random seed
params = set_params()
set_random_seed(params['random_seed'])
params['device'] = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
if params['verbose']:
print('Using device', params['device'])
# load and split data
data = load_data(params['data_root'])
train_data, valid_data = split_data(data, params['valid_ratio'])
if params['verbose']:
print('Data loaded and split')
# create dataloaders
train_transform = torchvision.transforms.Compose([
transforms.RandomVolume(gain_db=params['gain_db']),
transforms.RandomPitchShift(sample_rate=params['sample_rate'],
pitch_shift=params['pitch_shift']),
torchvision.transforms.RandomChoice([
transforms.GaussianNoise(scale=params['noise_scale']),
transforms.AudioNoise(scale=params['audio_scale'],
sample_rate=params['sample_rate']),
]),
])
train_dataset = SpeechCommandsDataset(root=params['data_root'], labels=train_data,
keywords=params['keywords'], audio_seconds=params['audio_seconds'],
sample_rate=params['sample_rate'], transform=train_transform)
valid_dataset = SpeechCommandsDataset(root=params['data_root'], labels=valid_data,
keywords=params['keywords'], audio_seconds=params['audio_seconds'],
sample_rate=params['sample_rate'])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params['batch_size'],
num_workers=params['num_workers'], shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=params['batch_size'],
num_workers=params['num_workers'], shuffle=True)
if params['verbose']:
print('Data loaders prepared')
# initialize model and optimizer
model = treasure_net(params).to(params['device'])
optimizer = torch.optim.Adam(model.parameters(), lr=params['lr'], weight_decay=params['weight_decay'])
if params['load_model']:
checkpoint = torch.load(params['model_checkpoint'])
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optim_state_dict'])
if params['verbose']:
print('Model and optimizer initialized')
# create checkpoints folder
if not os.path.isdir(params['checkpoint_dir']):
os.mkdir(params['checkpoint_dir'])
# initialize wandb
if params['use_wandb']:
wandb.init(project=params['wandb_project'])
wandb.watch(model)
# train
train(model, optimizer, train_loader, valid_loader, params)
if __name__ == '__main__':
main()
|
88595
|
import flux
import pytest
import requests
from flask_app import models
from .utils import model_for
def test_start_session_with_subjects(client, subjects):
session = client.report_session_start(
subjects=subjects
)
assert session.refresh().subjects == [dict(s) for s in subjects]
def test_add_subject(started_session, subjects, flask_app):
started_session.add_subject(**subjects[0])
with flask_app.app_context():
session = model_for(started_session)
assert len(session.subject_instances) == 1
assert session.subject_instances[0].subject.name == subjects[0]['name']
assert session.subject_instances[
0].revision.product_version.product.name == subjects[0]['product']
def test_product_rendered_field(started_session, subjects):
started_session.add_subject(**subjects[0])
started_session.refresh()
assert started_session.subjects == [subjects[0]]
def test_subject_activity(client, subjects, flask_app):
current_time = flux.current_timeline.time() # pylint: disable=no-member
client.report_session_start(subjects=subjects)
with flask_app.app_context():
for subject in subjects:
s = models.Subject.query.filter_by(name=subject.name).one()
assert s.last_activity == current_time
@pytest.mark.parametrize('field_name', ['product', 'version', 'revision'])
def test_add_subject_deduplication(started_session, flask_app, field_name):
first_subject = {'name': 'some_subject',
'product': 'car', 'version': '1', 'revision': 'a'}
started_session.add_subject(**first_subject)
second_subject = first_subject.copy()
second_subject[field_name] = 'new_field_value'
started_session.add_subject(**second_subject)
with flask_app.app_context():
session = model_for(started_session)
assert len(session.subject_instances) == 2
prod1, prod2 = [x.revision.product_version.product.id for x in session.subject_instances]
ver1, ver2 = [x.revision.product_version.id for x in session.subject_instances]
rev1, rev2 = [x.revision.id for x in session.subject_instances]
if field_name == 'product':
assert prod1 != prod2
assert ver1 != ver2
assert rev1 != rev2
elif field_name == 'version':
assert prod1 == prod2
assert ver1 != ver2
assert rev1 != rev2
elif field_name == 'revision':
assert prod1 == prod2
assert ver1 == ver2
assert rev1 != rev2
else:
raise NotImplementedError() # pragma: no cover
def test_query_sessions_by_subjects(client, subjects):
session1 = client.report_session_start(
subjects=[subjects[0]]
)
session2 = client.report_session_start(
subjects=[subjects[1]]
)
assert subjects[0].name != subjects[1].name
_get = lambda subject: requests.get(client.api.url.add_path('rest/sessions').add_query_param('subject_name', subject.name)).json()['sessions']
[s1] = _get(subjects[0])
assert s1['id'] == session1.id
[s2] = _get(subjects[1])
assert s2['id'] == session2.id
|
88605
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import types_axi_slave_readwrite_lite_simultaneous
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [32-1:0] sum;
reg [32-1:0] myaxi_awaddr;
reg [4-1:0] myaxi_awcache;
reg [3-1:0] myaxi_awprot;
reg myaxi_awvalid;
wire myaxi_awready;
reg [32-1:0] myaxi_wdata;
reg [4-1:0] myaxi_wstrb;
reg myaxi_wvalid;
wire myaxi_wready;
wire [2-1:0] myaxi_bresp;
wire myaxi_bvalid;
reg myaxi_bready;
reg [32-1:0] myaxi_araddr;
reg [4-1:0] myaxi_arcache;
reg [3-1:0] myaxi_arprot;
reg myaxi_arvalid;
wire myaxi_arready;
wire [32-1:0] myaxi_rdata;
wire [2-1:0] myaxi_rresp;
wire myaxi_rvalid;
reg myaxi_rready;
reg [32-1:0] _axi_awaddr;
wire [4-1:0] _axi_awcache;
wire [3-1:0] _axi_awprot;
reg _axi_awvalid;
wire _axi_awready;
reg [32-1:0] _axi_wdata;
reg [4-1:0] _axi_wstrb;
reg _axi_wvalid;
wire _axi_wready;
wire [2-1:0] _axi_bresp;
wire _axi_bvalid;
wire _axi_bready;
reg [32-1:0] _axi_araddr;
wire [4-1:0] _axi_arcache;
wire [3-1:0] _axi_arprot;
reg _axi_arvalid;
wire _axi_arready;
wire [32-1:0] _axi_rdata;
wire [2-1:0] _axi_rresp;
wire _axi_rvalid;
wire _axi_rready;
assign _axi_awcache = 3;
assign _axi_awprot = 0;
assign _axi_bready = 1;
assign _axi_arcache = 3;
assign _axi_arprot = 0;
reg [3-1:0] outstanding_wcount_0;
wire [32-1:0] _tmp_1;
assign _tmp_1 = _axi_awaddr;
always @(*) begin
myaxi_awaddr = _tmp_1;
end
wire [4-1:0] _tmp_2;
assign _tmp_2 = _axi_awcache;
always @(*) begin
myaxi_awcache = _tmp_2;
end
wire [3-1:0] _tmp_3;
assign _tmp_3 = _axi_awprot;
always @(*) begin
myaxi_awprot = _tmp_3;
end
wire _tmp_4;
assign _tmp_4 = _axi_awvalid;
always @(*) begin
myaxi_awvalid = _tmp_4;
end
assign _axi_awready = myaxi_awready;
wire [32-1:0] _tmp_5;
assign _tmp_5 = _axi_wdata;
always @(*) begin
myaxi_wdata = _tmp_5;
end
wire [4-1:0] _tmp_6;
assign _tmp_6 = _axi_wstrb;
always @(*) begin
myaxi_wstrb = _tmp_6;
end
wire _tmp_7;
assign _tmp_7 = _axi_wvalid;
always @(*) begin
myaxi_wvalid = _tmp_7;
end
assign _axi_wready = myaxi_wready;
assign _axi_bresp = myaxi_bresp;
assign _axi_bvalid = myaxi_bvalid;
wire _tmp_8;
assign _tmp_8 = _axi_bready;
always @(*) begin
myaxi_bready = _tmp_8;
end
wire [32-1:0] _tmp_9;
assign _tmp_9 = _axi_araddr;
always @(*) begin
myaxi_araddr = _tmp_9;
end
wire [4-1:0] _tmp_10;
assign _tmp_10 = _axi_arcache;
always @(*) begin
myaxi_arcache = _tmp_10;
end
wire [3-1:0] _tmp_11;
assign _tmp_11 = _axi_arprot;
always @(*) begin
myaxi_arprot = _tmp_11;
end
wire _tmp_12;
assign _tmp_12 = _axi_arvalid;
always @(*) begin
myaxi_arvalid = _tmp_12;
end
assign _axi_arready = myaxi_arready;
assign _axi_rdata = myaxi_rdata;
assign _axi_rresp = myaxi_rresp;
assign _axi_rvalid = myaxi_rvalid;
wire _tmp_13;
assign _tmp_13 = _axi_rready;
always @(*) begin
myaxi_rready = _tmp_13;
end
reg [32-1:0] read_fsm;
localparam read_fsm_init = 0;
reg [32-1:0] rsum;
reg __axi_cond_0_1;
reg __axi_cond_1_1;
assign _axi_rready = (read_fsm == 1) || (read_fsm == 3);
reg [32-1:0] write_fsm;
localparam write_fsm_init = 0;
reg __axi_cond_2_1;
reg [32-1:0] wdata;
reg __axi_cond_3_1;
reg __axi_cond_4_1;
reg __axi_cond_5_1;
main
uut
(
.CLK(CLK),
.RST(RST),
.sum(sum),
.myaxi_awaddr(myaxi_awaddr),
.myaxi_awcache(myaxi_awcache),
.myaxi_awprot(myaxi_awprot),
.myaxi_awvalid(myaxi_awvalid),
.myaxi_awready(myaxi_awready),
.myaxi_wdata(myaxi_wdata),
.myaxi_wstrb(myaxi_wstrb),
.myaxi_wvalid(myaxi_wvalid),
.myaxi_wready(myaxi_wready),
.myaxi_bresp(myaxi_bresp),
.myaxi_bvalid(myaxi_bvalid),
.myaxi_bready(myaxi_bready),
.myaxi_araddr(myaxi_araddr),
.myaxi_arcache(myaxi_arcache),
.myaxi_arprot(myaxi_arprot),
.myaxi_arvalid(myaxi_arvalid),
.myaxi_arready(myaxi_arready),
.myaxi_rdata(myaxi_rdata),
.myaxi_rresp(myaxi_rresp),
.myaxi_rvalid(myaxi_rvalid),
.myaxi_rready(myaxi_rready)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
_axi_awaddr = 0;
_axi_awvalid = 0;
_axi_wdata = 0;
_axi_wstrb = 0;
_axi_wvalid = 0;
_axi_araddr = 0;
_axi_arvalid = 0;
outstanding_wcount_0 = 0;
read_fsm = read_fsm_init;
rsum = 0;
__axi_cond_0_1 = 0;
__axi_cond_1_1 = 0;
write_fsm = write_fsm_init;
__axi_cond_2_1 = 0;
wdata = 100;
__axi_cond_3_1 = 0;
__axi_cond_4_1 = 0;
__axi_cond_5_1 = 0;
#100;
RST = 1;
#100;
RST = 0;
#100000;
$finish;
end
always @(posedge CLK) begin
if(RST) begin
outstanding_wcount_0 <= 0;
_axi_araddr <= 0;
_axi_arvalid <= 0;
__axi_cond_0_1 <= 0;
__axi_cond_1_1 <= 0;
_axi_awaddr <= 0;
_axi_awvalid <= 0;
__axi_cond_2_1 <= 0;
_axi_wdata <= 0;
_axi_wvalid <= 0;
_axi_wstrb <= 0;
__axi_cond_3_1 <= 0;
__axi_cond_4_1 <= 0;
__axi_cond_5_1 <= 0;
end else begin
if(__axi_cond_0_1) begin
_axi_arvalid <= 0;
end
if(__axi_cond_1_1) begin
_axi_arvalid <= 0;
end
if(__axi_cond_2_1) begin
_axi_awvalid <= 0;
end
if(__axi_cond_3_1) begin
_axi_wvalid <= 0;
end
if(__axi_cond_4_1) begin
_axi_awvalid <= 0;
end
if(__axi_cond_5_1) begin
_axi_wvalid <= 0;
end
if(_axi_wvalid && _axi_wready && !(_axi_bvalid && _axi_bready) && (outstanding_wcount_0 < 7)) begin
outstanding_wcount_0 <= outstanding_wcount_0 + 1;
end
if(!(_axi_wvalid && _axi_wready) && (_axi_bvalid && _axi_bready) && (outstanding_wcount_0 > 0)) begin
outstanding_wcount_0 <= outstanding_wcount_0 - 1;
end
if((read_fsm == 0) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 1024;
_axi_arvalid <= 1;
end
__axi_cond_0_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
if((read_fsm == 2) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 2048;
_axi_arvalid <= 1;
end
__axi_cond_1_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
if((write_fsm == 0) && (_axi_awready || !_axi_awvalid)) begin
_axi_awaddr <= 1024;
_axi_awvalid <= 1;
end
__axi_cond_2_1 <= 1;
if(_axi_awvalid && !_axi_awready) begin
_axi_awvalid <= _axi_awvalid;
end
if((write_fsm == 1) && ((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid))) begin
_axi_wdata <= wdata;
_axi_wvalid <= 1;
_axi_wstrb <= { 4{ 1'd1 } };
end
__axi_cond_3_1 <= 1;
if(_axi_wvalid && !_axi_wready) begin
_axi_wvalid <= _axi_wvalid;
end
if((write_fsm == 2) && (_axi_awready || !_axi_awvalid)) begin
_axi_awaddr <= 1024;
_axi_awvalid <= 1;
end
__axi_cond_4_1 <= 1;
if(_axi_awvalid && !_axi_awready) begin
_axi_awvalid <= _axi_awvalid;
end
if((write_fsm == 3) && ((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid))) begin
_axi_wdata <= wdata;
_axi_wvalid <= 1;
_axi_wstrb <= { 4{ 1'd1 } };
end
__axi_cond_5_1 <= 1;
if(_axi_wvalid && !_axi_wready) begin
_axi_wvalid <= _axi_wvalid;
end
end
end
localparam read_fsm_1 = 1;
localparam read_fsm_2 = 2;
localparam read_fsm_3 = 3;
localparam read_fsm_4 = 4;
localparam read_fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
read_fsm <= read_fsm_init;
rsum <= 0;
end else begin
case(read_fsm)
read_fsm_init: begin
if(_axi_arready || !_axi_arvalid) begin
read_fsm <= read_fsm_1;
end
end
read_fsm_1: begin
if(_axi_rready && _axi_rvalid) begin
rsum <= rsum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
read_fsm <= read_fsm_2;
end
end
read_fsm_2: begin
if(_axi_arready || !_axi_arvalid) begin
read_fsm <= read_fsm_3;
end
end
read_fsm_3: begin
if(_axi_rready && _axi_rvalid) begin
rsum <= rsum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
read_fsm <= read_fsm_4;
end
end
read_fsm_4: begin
$display("rsum=%d expected_rsum=%d", rsum, 768);
read_fsm <= read_fsm_5;
end
endcase
end
end
localparam write_fsm_1 = 1;
localparam write_fsm_2 = 2;
localparam write_fsm_3 = 3;
localparam write_fsm_4 = 4;
localparam write_fsm_5 = 5;
localparam write_fsm_6 = 6;
localparam write_fsm_7 = 7;
localparam write_fsm_8 = 8;
localparam write_fsm_9 = 9;
localparam write_fsm_10 = 10;
localparam write_fsm_11 = 11;
localparam write_fsm_12 = 12;
localparam write_fsm_13 = 13;
localparam write_fsm_14 = 14;
localparam write_fsm_15 = 15;
always @(posedge CLK) begin
if(RST) begin
write_fsm <= write_fsm_init;
wdata <= 100;
end else begin
case(write_fsm)
write_fsm_init: begin
wdata <= 100;
if(_axi_awready || !_axi_awvalid) begin
write_fsm <= write_fsm_1;
end
end
write_fsm_1: begin
if((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid)) begin
write_fsm <= write_fsm_2;
end
end
write_fsm_2: begin
wdata <= 200;
if(_axi_awready || !_axi_awvalid) begin
write_fsm <= write_fsm_3;
end
end
write_fsm_3: begin
if((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid)) begin
write_fsm <= write_fsm_4;
end
end
write_fsm_4: begin
write_fsm <= write_fsm_5;
end
write_fsm_5: begin
write_fsm <= write_fsm_6;
end
write_fsm_6: begin
write_fsm <= write_fsm_7;
end
write_fsm_7: begin
write_fsm <= write_fsm_8;
end
write_fsm_8: begin
write_fsm <= write_fsm_9;
end
write_fsm_9: begin
write_fsm <= write_fsm_10;
end
write_fsm_10: begin
write_fsm <= write_fsm_11;
end
write_fsm_11: begin
write_fsm <= write_fsm_12;
end
write_fsm_12: begin
write_fsm <= write_fsm_13;
end
write_fsm_13: begin
write_fsm <= write_fsm_14;
end
write_fsm_14: begin
$display("sum=%d expected_sum=%d", sum, 300);
write_fsm <= write_fsm_15;
end
endcase
end
end
endmodule
module main
(
input CLK,
input RST,
output reg [32-1:0] sum,
input [32-1:0] myaxi_awaddr,
input [4-1:0] myaxi_awcache,
input [3-1:0] myaxi_awprot,
input myaxi_awvalid,
output myaxi_awready,
input [32-1:0] myaxi_wdata,
input [4-1:0] myaxi_wstrb,
input myaxi_wvalid,
output myaxi_wready,
output [2-1:0] myaxi_bresp,
output reg myaxi_bvalid,
input myaxi_bready,
input [32-1:0] myaxi_araddr,
input [4-1:0] myaxi_arcache,
input [3-1:0] myaxi_arprot,
input myaxi_arvalid,
output myaxi_arready,
output reg [32-1:0] myaxi_rdata,
output [2-1:0] myaxi_rresp,
output reg myaxi_rvalid,
input myaxi_rready
);
assign myaxi_bresp = 0;
assign myaxi_rresp = 0;
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [32-1:0] addr_0;
reg writevalid_1;
reg readvalid_2;
reg prev_awvalid_3;
reg prev_arvalid_4;
assign myaxi_awready = (fsm == 0) && (!writevalid_1 && !readvalid_2 && !myaxi_bvalid && prev_awvalid_3);
assign myaxi_arready = (fsm == 0) && (!readvalid_2 && !writevalid_1 && prev_arvalid_4 && !prev_awvalid_3);
reg [32-1:0] rdata;
reg _myaxi_cond_0_1;
assign myaxi_wready = fsm == 100;
always @(posedge CLK) begin
if(RST) begin
myaxi_bvalid <= 0;
prev_awvalid_3 <= 0;
prev_arvalid_4 <= 0;
writevalid_1 <= 0;
readvalid_2 <= 0;
addr_0 <= 0;
myaxi_rdata <= 0;
myaxi_rvalid <= 0;
_myaxi_cond_0_1 <= 0;
end else begin
if(_myaxi_cond_0_1) begin
myaxi_rvalid <= 0;
end
if(myaxi_bvalid && myaxi_bready) begin
myaxi_bvalid <= 0;
end
if(myaxi_wvalid && myaxi_wready) begin
myaxi_bvalid <= 1;
end
prev_awvalid_3 <= myaxi_awvalid;
prev_arvalid_4 <= myaxi_arvalid;
writevalid_1 <= 0;
readvalid_2 <= 0;
if(myaxi_awready && myaxi_awvalid && !myaxi_bvalid) begin
addr_0 <= myaxi_awaddr;
writevalid_1 <= 1;
end else if(myaxi_arready && myaxi_arvalid) begin
addr_0 <= myaxi_araddr;
readvalid_2 <= 1;
end
if((fsm == 1) && (myaxi_rready || !myaxi_rvalid)) begin
myaxi_rdata <= rdata;
myaxi_rvalid <= 1;
end
_myaxi_cond_0_1 <= 1;
if(myaxi_rvalid && !myaxi_rready) begin
myaxi_rvalid <= myaxi_rvalid;
end
end
end
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_100 = 100;
localparam fsm_101 = 101;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
rdata <= 0;
sum <= 0;
end else begin
case(fsm)
fsm_init: begin
if(readvalid_2) begin
rdata <= addr_0 >> 2;
end
if(writevalid_1) begin
fsm <= fsm_100;
end
if(readvalid_2) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(myaxi_rready || !myaxi_rvalid) begin
rdata <= rdata + 1;
end
if(myaxi_rready || !myaxi_rvalid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
fsm <= fsm_init;
end
fsm_100: begin
if(myaxi_wready && myaxi_wvalid) begin
sum <= sum + myaxi_wdata;
end
if(myaxi_wready && myaxi_wvalid) begin
fsm <= fsm_101;
end
end
fsm_101: begin
fsm <= fsm_init;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = types_axi_slave_readwrite_lite_simultaneous.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
88622
|
import unittest
import datetime
from unittest.mock import MagicMock
from osgar.drivers.winsen_gas_detector import WinsenCO2
class WinsenCO2Test(unittest.TestCase):
def test_parse_packet(self):
ref_packet = bytes.fromhex('ff86063b3d000000fc')
sensor = WinsenCO2(config={}, bus=MagicMock())
sensor._buf += ref_packet
packet = sensor.get_packet()
self.assertEqual(packet, ref_packet)
co2_value = sensor.parse_CO2_packet(packet)
self.assertEqual(co2_value, 1595)
def test_clear_buffer(self):
sensor = WinsenCO2(config={}, bus=MagicMock())
sensor._buf = bytes([0] * 1000)
packet = sensor.get_packet()
self.assertIsNone(packet)
self.assertEqual(sensor._buf, b'')
def test_keep_ff_in_buffer(self):
sensor = WinsenCO2(config={}, bus=MagicMock())
sensor._buf = bytes([0] * 1000) + b'\xFF'
packet = sensor.get_packet()
self.assertIsNone(packet)
self.assertEqual(sensor._buf, bytes([0xFF]))
def test_checksum_error(self):
sensor = WinsenCO2(config={}, bus=MagicMock())
sensor._buf = bytes.fromhex('ff86063b3d000000fe') # valid should be 0xFC
packet = sensor.get_packet()
self.assertIsNone(packet)
self.assertEqual(sensor.errors, 1)
# vim: expandtab sw=4 ts=4
|
88643
|
from boa3.builtin.nativecontract.stdlib import StdLib
def main() -> int:
return StdLib.atoi('100', 10, 'extra')
|
88666
|
import numpy
import matplotlib.pylab as plt
class LinearHeadHead(object):
"""
Solves the system:
\div \frac{\rho}{\mu} k \grad (p + \rho g z) = 0 on the domain [x_0, x_1] \cross [z_0,z_1]
Boundary conditions are given by:
h(x_0,z,t) = h_0 [m] => p(x_0,z,t)=(h_0-z) \rho g
h(x_1,z,t) = h_L [m] => p(x_1,z,t)=(h_L-z) \rho g
Parameters are in units of:
\rho : density, [kg/m^3]
\mu : viscosity, [kg / m s^2]
K : absolute permeability, [ m^2 ]
g : gravity, used in converting head to pressure, [ m / s^2 ]
"""
def __init__(self, params=None):
if params is None:
params = dict()
params.setdefault("x_0",0)
params.setdefault("x_1",100)
params.setdefault("z_0",0)
params.setdefault("z_1",10)
params.setdefault("k",1.1847e-12)
params.setdefault("rho",998.2)
params.setdefault("mu",1.002e-3)
params.setdefault("h_0",20.0)
params.setdefault("h_1",19.0)
params.setdefault("g",9.80665)
params.setdefault("p_atm",101325.0)
self.__dict__.update(params)
def head(self, coords):
"""
Compute the head at the x-values given by coords[:]
h(x) = h_0 + (x/L)*(h_1-h_0)
"""
head = numpy.zeros(len(coords))
head[:] = self.h_0 + ((self.h_1-self.h_0 )/(self.x_1-self.x_0))*coords[:,0]
return head
def pressure(self, coords):
"""
Compute the pressure at (x,z)-coordinates in the coords[:,:] array.
Note: coords has dimension len(coords) x 2.
"""
pressure = numpy.zeros((len(coords),),'d')
head = self.head(coords)
pressure[:]=self.p_atm+(head[:]-coords[:,1])*self.rho*self.g
return pressure
def createFromXML(filename):
# grab params from input file
params = dict()
import amanzi_xml.utils.io
xml = amanzi_xml.utils.io.fromFile(filename)
import amanzi_xml.utils.search as search
#
# Domain Size
#
xyz = search.find_tag_path(xml, ["amanzi_input","mesh","generate","box",]).get("low_coordinates")
params["x_0"] = float(xyz.split(',')[0])
params["z_0"] = float(xyz.split(',')[2])
xyz = search.find_tag_path(xml, ["amanzi_input","mesh","generate","box",]).get("high_coordinates")
params["x_1"] = float(xyz.split(',')[0])
params["z_1"] = float(xyz.split(',')[2])
#
# Material Properties
#
strK = search.find_tag_path(xml, ["amanzi_input","materials","material","permeability",]).get('x')
params["k"] = float(strK)
strMu = search.find_tag_path(xml, ["amanzi_input","phases","liquid_phase","viscosity",]).text
params["mu"] = float(strMu)
strRho = search.find_tag_path(xml, ["amanzi_input","phases","liquid_phase","density",]).text
params["rho"] = float(strRho)
#
# Boundary Conditions
#
strh0 = search.find_tag_path(xml, ["amanzi_input","boundary_conditions","boundary_condition,LeftBC","liquid_phase","liquid_component","hydrostatic",]).get("value")
params["h_0"] = float(strh0)
strhL = search.find_tag_path(xml, ["amanzi_input","boundary_conditions","boundary_condition,RightBC","liquid_phase","liquid_component","hydrostatic",]).get("value")
params["h_L"] = float(strhL)
#
# Standard Gravity
#
params.setdefault("g",9.80665)
# instantiate the class
return LinearHeadHead(params)
if __name__ == "__main__":
# Instantiate the class
lhh = LinearHeadHead()
# Get 11 equally spaced points: dx=(x_1-x_0)/10
x = numpy.linspace(lhh.x_0,lhh.x_1,11)
# Create space for a set of (x,z) points
coords = numpy.zeros((11,2))
# set x
coords[:,0]=x
# set z
coords[:,1]=3
# compute heads and pressures
h1 = lhh.head(coords)
p1 = lhh.pressure(coords)
# reset z
coords[:,1]=7
# compute heads and pressures
h2 = lhh.head(coords)
p2 = lhh.pressure(coords)
# plot
plt.plot(x,p1)
plt.plot(x,p2)
plt.xlabel('x-coordinate [m]')
plt.ylabel('Pressure [Pa]')
# show the plot
# plt.show()
|
88712
|
import requests
from bs4 import BeautifulSoup
# WHY
# The SearchRequest module contains all the internal logic for the library.
#
# This encapsulates the logic,
# ensuring users can work at a higher level of abstraction.
# USAGE
# req = search_request.SearchRequest("[QUERY]", search_type="[title]")
class SearchRequest:
col_names = [
"ID",
"Author",
"Title",
"Publisher",
"Year",
"Pages",
"Language",
"Size",
"Extension",
"Mirror_1",
"Mirror_2",
"Mirror_3",
"Mirror_4",
"Mirror_5",
"Edit",
]
def __init__(self, query, search_type="title"):
self.query = query
self.search_type = search_type
def strip_i_tag_from_soup(self, soup):
subheadings = soup.find_all("i")
for subheading in subheadings:
subheading.decompose()
def get_search_page(self):
query_parsed = "%20".join(self.query.split(" "))
if self.search_type.lower() == "title":
search_url = (
f"http://gen.lib.rus.ec/search.php?req={query_parsed}&column=title"
)
elif self.search_type.lower() == "author":
search_url = (
f"http://gen.lib.rus.ec/search.php?req={query_parsed}&column=author"
)
search_page = requests.get(search_url)
return search_page
def aggregate_request_data(self):
search_page = self.get_search_page()
soup = BeautifulSoup(search_page.text, "lxml")
self.strip_i_tag_from_soup(soup)
# Libgen results contain 3 tables
# Table2: Table of data to scrape.
information_table = soup.find_all("table")[2]
# Determines whether the link url (for the mirror)
# or link text (for the title) should be preserved.
# Both the book title and mirror links have a "title" attribute,
# but only the mirror links have it filled.(title vs title="libgen.io")
raw_data = [
[
td.a["href"]
if td.find("a")
and td.find("a").has_attr("title")
and td.find("a")["title"] != ""
else "".join(td.stripped_strings)
for td in row.find_all("td")
]
for row in information_table.find_all("tr")[
1:
] # Skip row 0 as it is the headings row
]
output_data = [dict(zip(self.col_names, row)) for row in raw_data]
return output_data
|
88732
|
import re
def get_query():
query = {
"_source": [
"text",
"full_text",
"extended_tweet.full_text",
"quoted_status.text",
"quoted_status.full_text",
"quoted_status.extended_tweet.full_text"
],
"query": {
"bool": {
"filter": [
{
"bool": {
"must_not": {
"exists": {
"field": "sentiment.vader.primary"
}
}
}
},
{
"bool": {
"must_not": {
"exists": {
"field": "retweeted_status.id"
}
}
}
}
]
}
}
}
return query
def get_tweet_text(hit):
text = (hit["extended_tweet"]["full_text"] if "extended_tweet" in hit
else hit["full_text"] if "full_text" in hit
else hit["text"])
quoted_text = None
if "quoted_status" in hit:
quoted_status = hit["quoted_status"]
quoted_text = (quoted_status["extended_tweet"]["full_text"] if "extended_tweet" in quoted_status
else quoted_status["full_text"] if "full_text" in quoted_status
else quoted_status["text"])
return text, quoted_text
def clean_text_for_vader(text):
text = re.sub(r"[\s]+", " ", text)
text = re.sub(r"http\S+", "", text)
text = re.sub(r" +", " ", text)
text = text.strip()
return text
|
88733
|
import hashlib
import pickle
import os
import sys
import subprocess
import time
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
cache = {}
hashes = []
def get_cache():
try:
with open('tests/results.dat', 'rb') as afile:
if afile:
global cache
cache = pickle.load(afile)
except FileNotFoundError:
os.makedirs('tests')
def save_cache():
ks = [k for k in cache.keys() if not k in hashes]
for k in ks:
del cache[k]
afile = open('tests/results.dat', 'wb')
pickle.dump(cache, afile)
def get_tests_results(question):
global cache
main_folder = question
script = '{0}.py'.format(question)
hasher = hashlib.md5()
with open(script, 'rb') as f:
hasher.update(f.read())
hsh = hasher.digest()
hashes.append(hsh)
if hsh in cache:
return cache[hsh]
tests = os.listdir(main_folder)
right_answers = 0
total_answers = 0
for test_folder in tests:
test_folder_name = '{0}/{1}'.format(main_folder, test_folder)
tests_cases = [case for case in os.listdir(test_folder_name)]
for index in range(int(len(tests_cases) / 2)):
case_input_name = '{0}/{1}{2}'.format(
test_folder_name, 'in', index + 1)
case_output_name = '{0}/{1}{2}'.format(
test_folder_name, 'out', index + 1)
input_file = open(case_input_name)
proc = subprocess.Popen(
['python', script], stdin=input_file,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = proc.communicate()[0].decode('utf-8').strip()
output_file = open(case_output_name).read().strip()
if result == output_file:
right_answers += 1
sys.stdout.write('.')
else:
sys.stdout.write('F')
total_answers += 1
sys.stdout.flush()
cache[hsh] = (right_answers, total_answers)
return cache[hsh]
if __name__ == '__main__':
is_successful = True
obis = [f for f in os.listdir('.') if os.path.isdir(f) and ('obi' in f)]
obis.sort()
get_cache()
for obi in obis:
print(obi.upper())
levels = os.listdir(obi)
for level in levels:
print(' {0}'.format(level))
level = '{0}/{1}'.format(obi, level)
stages = os.listdir(level)
for stage in stages:
print(' {0}'.format(stage))
stage = '{0}/{1}'.format(level, stage)
questions = [f for f in os.listdir(stage) if f.endswith('.py')]
for question in questions:
s = time.time()
name = question
print(' {0} '.format(name), end='')
question = '{0}/{1}'.format(stage, question)
folder = '{0}/{1}'.format(stage, os.path.splitext(name)[0])
if os.path.isdir(folder):
right_answers, total_answers = get_tests_results(folder)
if right_answers < total_answers:
is_successful = False
color = Colors.GREEN if right_answers == total_answers else Colors.FAIL
print(' {1}/{2}'.format(name,right_answers, total_answers), end='')
print(' [{:.2f}s]'.format(time.time() - s))
save_cache()
if not is_successful:
exit(1)
exit(0)
|
88737
|
import pandas as pd
import os
from tqdm import tqdm
from model.bert_things.pytorch_pretrained_bert.tokenization import BertTokenizer
from model.bert_things.pytorch_pretrained_bert import BertConfig, BertModel, BertPreTrainedModel
from model.bert_text_model import BertTextModel
from data_loader.utils.vocab import Vocab
import pickle
import sys
import logging
import numpy as np
import argparse
import torch
# ignore warnings from tokenization (sequence length is very long)
logging = logging.getLogger("model.bert_things.pytorch_pretrained_bert.tokenization").setLevel(logging.CRITICAL)
def embed_text(text_codes, device, bert, bert_seq_length=512, max_seq_len_text=30):
"""
Embed text using the pretrained bert model, this is done to speed up training later (we keep the bert model fixed, you can explore
fine tuning bert model also for downstream tasks..)
Args:
text_codes: tokenized text
device: device to run model on
bert: the bert model
bert_seq_length: maximum bert sequence length
max_seq_len_text: maximum occuring text sequence length in the corpus in terms of bert_seq_length
Output:
pooled_output: embedding
"""
x_text = torch.zeros((bert_seq_length, max_seq_len_text), dtype=torch.long)
x_mask = torch.zeros((bert_seq_length, max_seq_len_text,))
n = len(text_codes) // bert_seq_length - 1
for i in range(len(text_codes) // bert_seq_length):
x_text[:, i] = torch.Tensor(text_codes[i * bert_seq_length: (1 + i) * bert_seq_length])
x_mask[:, i] = 1
if (n * bert_seq_length <= len(text_codes)):
x_mask[len(text_codes) - bert_seq_length * (n + 1), n] = 1
x_text[:len(text_codes) - bert_seq_length * (n + 1), n] = torch.Tensor(text_codes[(n + 1) * bert_seq_length:])
x_text = x_text.to(device)
x_mask = x_mask.to(device)
with torch.no_grad():
_, pooled_output = bert(x_text.t(), attention_mask=x_mask.t())
return pooled_output
def compute_max_seq_len_text(df, col, tokenizer):
"""
Compute the maximum occuring sequence length in the dataset
Args:
df: dataframe containing the complete dataset
col: column name containing the text
tokenizer: map used to convert tokents to ids (assumes tokenier has a convert_tokens_to_ids function)
Output:
max_seq_len_text: (int)
"""
max_seq_len_text = 0
for i, r in df.iterrows():
text = r[col]
ttok = tokenizer.tokenize(text)
ttok = tokenizer.convert_tokens_to_ids(ttok)
if (len(ttok) > max_seq_len_text):
max_seq_len_text = len(ttok)
return max_seq_len_text
def _prepare_device(n_gpu_use):
"""
setup GPU device if available, move model into configured device
if n_gpu_use = 0, use cpu
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
logging.warning("Warning: There\'s no GPU available on this machine, training will be performed on CPU.")
n_gpu_use = -1
if n_gpu_use > n_gpu:
logging.warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def main():
"""
Will generate a dictionary as follows:
<key> patientid : <value> lsit of dicts, where each dict contains admission data
[
{<key> feature/label name : <value> feature/label value}
]
"""
parser = argparse.ArgumentParser(description='Generate Text+Code dataset')
parser.add_argument('-p', '--path', default=None, type=str, help='path to pandas dataframe where rows are admissions')
parser.add_argument('-vp', '--vocab_path', default='', type=str, help='path to where code vocabulary are stored assumes diagnoses vocab file named as diag.vocab and cpt vocab as cpt.vocab')
parser.add_argument('-s', '--save', default='./', type=str, help='path to save pkl files')
parser.add_argument('-et', '--embed_text', default=False, action='store_true', help='flag wether to embed text or not')
parser.add_argument('-cpb', '--bert_config_path', default=None, type=str, help='path to bert config')
parser.add_argument('-vpb', '--bert_vocab_path', default=None, type=str, help='path to bert vocab ')
parser.add_argument('-sdp', '--state_dict_path', default=None, type=str, help='path to bert state dict')
parser.add_argument('-gpu', '--gpu', default=0, type=int)
parser.add_argument('-bsl', '--max_bert_seq_len', default=512, type=int, help='maximum sequence length of bert model')
parser.add_argument('-tsld', '--text_seq_length_discharge', default=0, type=int, help='pass this if maximum text sequence length is known for discharge text to avoid long processing time')
parser.add_argument('-tslr', '--text_seq_length_rest', default=0, type=int, help='pass this if maximum text sequence length is known for rest of text (other than discharge) to avoid longer processing time')
parser.add_argument('-sc', '--short_code', default=False, action='store_true', help='flag for using short codes ')
parser.add_argument('-diag', '--diagnoses', default=False, action='store_true', help='flag for including diagnoses codes')
parser.add_argument('-proc', '--procedures', default=False, action='store_true', help='flag for including procedures codes')
parser.add_argument('-med', '--medications', default=False, action='store_true', help='flag for including medication codes')
parser.add_argument('-cpt', '--cpts', default=False, action='store_true', help='flag for including cpt codes')
parser.add_argument('-ma', '--min_adm', default=0, type=int)
args = parser.parse_args()
df = pd.read_pickle(args.path)
df_orig = df
# remove organ donor admissions
if ('DIAGNOSIS' in df.columns):
REMOVE_DIAGNOSIS = ~((df['DIAGNOSIS'] == 'ORGAN DONOR ACCOUNT') | (df['DIAGNOSIS'] == 'ORGAN DONOR') | \
(df['DIAGNOSIS'] == 'DONOR ACCOUNT'))
df = df[REMOVE_DIAGNOSIS]
df = df[~df['ICD9_CODE'].isna()] # drop patients with no icd9 code?
df = df[~(df['TEXT_REST'].isna() | df['TEXT_REST'].isna())]
if ('TIMEDELTA' in df.columns):
df['TIMEDELTA'] = df['TIMEDELTA'].fillna(pd.to_timedelta("0"))
df['TIMEDELTA'] = pd.to_timedelta(df['TIMEDELTA'])
df['TIMEDELTA'] = df['TIMEDELTA'].apply(lambda x: x.seconds)
pids = list(set(df['SUBJECT_ID'].tolist()))
# lambda
demographic_cols = {'AGE': [], 'GENDER': [], 'LAST_CAREUNIT': [],
'MARITAL_STATUS': [], 'ETHNICITY': [],
'DISCHARGE_LOCATION': []}
df.loc[:, 'MARITAL_STATUS'], demographic_cols['MARITAL_STATUS'] = pd.factorize(df['MARITAL_STATUS'])
df.loc[:, 'ETHNICITY'], demographic_cols['ETHNICITY'] = pd.factorize(df['ETHNICITY'])
df.loc[:, 'DISCHARGE_LOCATION'], demographic_cols['DISCHARGE_LOCATION'] = pd.factorize(df['DISCHARGE_LOCATION'])
df.loc[:, 'LAST_CAREUNIT'], demographic_cols['LAST_CAREUNIT'] = pd.factorize(df['LAST_CAREUNIT'])
df.loc[:, 'GENDER'], demographic_cols['GENDER'] = pd.factorize(df['GENDER'])
df.loc[:, 'AGE'] = df['AGE'].astype(int)
los_bins = [1, 2, 3, 4, 5, 6, 7, 8, 14, float('inf')]
los_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9]
df.loc[:, 'LOS'] = pd.cut(df['LOS'], bins=los_bins, labels=los_labels)
temp_data = []
data = {}
diag_vocab = Vocab()
cpt_vocab = Vocab()
med_vocab = Vocab()
proc_vocab = Vocab()
if (args.vocab_path != ''):
#to use below checkout https://github.com/sajaddarabi/HCUP-US-EHR
if (args.diagnoses):
diag_vocab._build_from_file(os.path.join(args.vocab_path, 'diag.vocab'))
if (args.cpts):
cpt_vocab._build_from_file(os.path.join(args.vocab_path, 'cpt.vocab'))
#if (args.procedures):
# proc_vocab._build_from_file(os.path.join(args.vocab_path, 'proc.vocab'))
#if (args.med):
#med_vocab._build_from_file(os.path.join(args.vocab_path, 'med.vocab'))
if (os.path.exists(os.path.join(args.save, 'data.pkl'))):
temp_data = pickle.load(open(os.path.join(args.save, 'data.pkl'), 'rb'))
temp_data = temp_data['data']
t = list(temp_data.keys())
t = t[0]
d = 'text_embedding' in temp_data[t][0]
if (not d):
temp_data = []
else:
model = None
bert_config = None
torch.cuda.empty_cache()
if args.embed_text:
tokenizer = BertTokenizer(args.bert_vocab_path)
if args.embed_text and (len(temp_data) == 0):
bert_config = BertConfig(args.bert_config_path)
model = BertTextModel(bert_config)
state_dict = torch.load(args.state_dict_path)
model.init_bert_weights(state_dict)
device, _ = _prepare_device(args.gpu)
model = model.to(device)
max_seq_len_text_d = args.text_seq_length_discharge
max_seq_len_text_r = args.text_seq_length_rest
if max_seq_len_text_d == 0:
max_seq_len_text = compute_max_seq_len_text(df, 'TEXT_DISCHARGE', tokenizer)
max_seq_len_text = max_seq_len_text // args.max_bert_seq_len + 1
max_seq_len_text_d = max_seq_len_text
print("text sequence discharge length: {}".format(max_seq_len_text_d))
if max_seq_len_text_r == 0:
max_seq_len_text = compute_max_seq_len_text(df, 'TEXT_REST', tokenizer)
max_seq_len_text = max_seq_len_text // args.max_bert_seq_len + 1
max_seq_len_text_r = max_seq_len_text
print("text sequence rest length: {}".format(max_seq_len_text_r))
try:
for pid in tqdm(pids):
pid_df = df[df['SUBJECT_ID'] == pid]
pid_df = pid_df.sort_values('ADMITTIME').reset_index()
if (len(pid_df) < 1): # must atleast have two data points
continue
data[pid] = []
t = 0
hadm_ids = set(df['HADM_ID'])
for i, r in pid_df.iterrows():
#filt notes prior to n days and concatenate them
# leave discharge summary seperate
admit_data = {}
demographics = [r['AGE'], r['GENDER'], r['MARITAL_STATUS']]
icu_unit = np.zeros((demographic_cols['LAST_CAREUNIT'].size, ), dtype=int)
icu_unit[r['LAST_CAREUNIT']] = 1
demographics += list(icu_unit)
ethnicity = np.zeros((demographic_cols['ETHNICITY'].size, ), dtype=int)
ethnicity[r['ETHNICITY']] = 1
demographics += list(ethnicity)
ethnicity = np.zeros((demographic_cols['ETHNICITY'].size, ), dtype=int)
ethnicity[r['ETHNICITY']] = 1
demographics += list(ethnicity)
admit_data['demographics'] = demographics
dtok, ptok, mtok, ctok = [], [], [], []
diagnosis_codes, proc_codes, med_codes, cpt_codes = np.nan, np.nan, np.nan, np.nan
if args.diagnoses:
diagnosis_codes = r['ICD9_CODE']
if (diagnosis_codes == diagnosis_codes):
dtok = diag_vocab.convert_to_ids(diagnosis_codes , 'D', args.short_code)
if (args.procedures):
proc_codes = r['ICD9_CODE_PROCEDURE']
if (proc_codes == proc_codes):
ptok = proc_vocab.convert_to_ids(proc_codes, 'P', args.short_code)
if args.medications:
med_codes = r['NDC'] # issue with NDC what mapping version is being used..?
if (med_codes == med_codes):
mtok = med_vocab.convert_to_ids(med_codes, 'M')
if args.cpts:
cpt_codes = r['CPT_CD']
if (cpt_codes == cpt_codes):
ctok = cpt_vocab.convert_to_ids(cpt_codes, 'C')
admit_data['diagnoses'] = dtok
admit_data['procedures'] = ptok
admit_data['medications'] = mtok
admit_data['cptproc'] = ctok
if (r['TIMEDELTA'] == r['TIMEDELTA']):
t += r['TIMEDELTA']
admit_data['timedelta'] = t
text_discharge = r['TEXT_DISCHARGE']
text_rest = r['TEXT_REST']
ttokd = tokenizer.tokenize(text_discharge)
ttokd = tokenizer.convert_tokens_to_ids(ttokd)
ttokr = tokenizer.tokenize(text_rest)
ttokr = tokenizer.convert_tokens_to_ids(ttokr)
admit_data['text_discharge_raw'] = text_discharge
admit_data['text_rest_raw'] = text_rest
admit_data['text_discharge_len'] = len(ttokd)
admit_data['text_rest_len'] = len(ttokr)
admit_data['text_discharge_token'] = ttokd
admit_data['text_rest_token'] = ttokr
if len(temp_data) == 0:
if (args.embed_text):
ttokd = embed_text(ttokd, device, model, args.max_bert_seq_len, max_seq_len_text_d)
ttokd = ttokd.cpu().numpy()
ttokr = embed_text(ttokr, device, model, args.max_bert_seq_len, max_seq_len_text_r)
ttokr = ttokr.cpu().numpy()
else:
ttok = temp_data[pid][i]['text_embedding']
admit_data['text_embedding_discharge'] = ttokd
admit_data['text_embedding_rest'] = ttokr
admit_data['los'] = r['LOS']
admit_data['readmission'] = r['readmission_label']
admit_data['mortality'] = r['DEATHTIME'] == r['DEATHTIME']
data[pid].append(admit_data)
except Exception as error:
print(error)
import pdb; pdb.set_trace()
if (not os.path.exists(args.save)):
os.makedirs(args.save)
# temporarly save data incase something goes wrong ...
try:
with open(os.path.join(args.save, 'data.pkl'), 'wb') as handle:
data_dict = {}
data_dict['data'] = data
pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
except:
import pdb; pdb.set_trace()
pids = list(data.keys())
flatten = lambda x: [item for sublist in x for item in sublist]
data_info = {}
num_icd9_codes, num_proc_codes, num_med_codes = 0, 0, 0
data_info['num_patients'] = len(pids)
data_info['max_seq_len_text_d'] = max_seq_len_text_d
data_info['max_seq_len_text_r'] = max_seq_len_text_r
data_info['num_icd9_codes'] = 0
data_info['num_proc_codes'] = 0
data_info['num_med_codes'] = 0
if (args.diagnoses):
num_icd9_codes = len(set(flatten(df_orig['ICD9_CODE'].dropna())))
data_info['num_icd9_codes'] = num_icd9_codes
if (args.procedures):
num_proc_codes = len(set(flatten(df_orig['ICD9_CODE_PROCEDURE'].dropna())))
data_info['num_proc_codes'] = num_proc_codes
if (args.medications):
num_med_codes = len(set(flatten(df_orig['NDC'].dropna())))
data_info['num_med_codes'] = num_med_codes
data_info['demographics_shape'] = len(data[pids[0]][0]['demographics'])
data_info['demographic_cols'] = demographic_cols
data_info['total_codes'] = data_info['num_icd9_codes'] + data_info['num_proc_codes'] + data_info['num_med_codes']
if (not os.path.exists(args.save)):
os.makedirs(args.save)
with open(os.path.join(args.save, 'data.pkl'), 'wb') as handle:
data_dict = {}
data_dict['info'] = data_info
data_dict['data'] = data
pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.save, 'cpt_vocab.pkl'), 'wb') as handle:
pickle.dump(cpt_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.save, 'diag_vocab.pkl'), 'wb') as handle:
pickle.dump(diag_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.save, 'med_vocab.pkl'), 'wb') as handle:
pickle.dump(med_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.save, 'proc_vocab.pkl'), 'wb') as handle:
pickle.dump(proc_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
main()
|
88742
|
from .generic_wrappers import * # NOQA
from .lambda_wrappers import action_lambda_v1, observation_lambda_v0, reward_lambda_v0 # NOQA
from .multiagent_wrappers import agent_indicator_v0, black_death_v2, \
pad_action_space_v0, pad_observations_v0 # NOQA
from supersuit.generic_wrappers import frame_skip_v0, color_reduction_v0, resize_v0, dtype_v0, \
flatten_v0, reshape_v0, normalize_obs_v0, clip_actions_v0, clip_reward_v0, \
delay_observations_v0, frame_stack_v1, max_observation_v0, \
sticky_actions_v0 # NOQA
from .vector.vector_constructors import gym_vec_env_v0, stable_baselines_vec_env_v0, \
stable_baselines3_vec_env_v0, concat_vec_envs_v1, pettingzoo_env_to_vec_env_v1 # NOQA
from .aec_vector import vectorize_aec_env_v0 # NOQA
class DeprecatedWrapper(ImportError):
pass
def __getattr__(wrapper_name):
"""
Gives error that looks like this when trying to import old version of wrapper:
File "./supersuit/__init__.py", line 38, in __getattr__
raise DeprecatedWrapper(f"{base}{version_num} is now deprecated, use {base}{act_version_num} instead")
supersuit.DeprecatedWrapper: concat_vec_envs_v0 is now deprecated, use concat_vec_envs_v1 instead
"""
start_v = wrapper_name.rfind("_v") + 2
version = wrapper_name[start_v:]
base = wrapper_name[:start_v]
try:
version_num = int(version)
is_valid_version = True
except ValueError:
is_valid_version = False
globs = globals()
if is_valid_version:
for act_version_num in range(1000):
if f"{base}{act_version_num}" in globs:
if version_num < act_version_num:
raise DeprecatedWrapper(f"{base}{version_num} is now deprecated, use {base}{act_version_num} instead")
raise ImportError(f"cannot import name '{wrapper_name}' from 'supersuit'")
__version__ = "3.3.2"
|
88774
|
import os
try:
basestring
except NameError:
basestring = str
class ListenImports:
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, imports):
self.imports = open(imports, 'w')
def library_import(self, name, attrs):
self._imported("Library", name, attrs)
def resource_import(self, name, attrs):
self._imported("Resource", name, attrs)
def variables_import(self, name, attrs):
self._imported("Variables", name, attrs)
def _imported(self, import_type, name, attrs):
self.imports.write("Imported %s\n\tname: %s\n" % (import_type, name))
for name in sorted(attrs):
self.imports.write("\t%s: %s\n" % (name, self._pretty(attrs[name])))
def _pretty(self, entry):
if isinstance(entry, list):
return '[%s]' % ', '.join(entry)
if isinstance(entry, basestring) and os.path.isabs(entry):
entry = entry.replace('$py.class', '.py').replace('.pyc', '.py')
tokens = entry.split(os.sep)
index = -1 if tokens[-1] != '__init__.py' else -2
return '//' + '/'.join(tokens[index:])
return entry
def close(self):
self.imports.close()
|
88791
|
class Solution:
def depthSumInverse(self, nestedList: List[NestedInteger]) -> int:
dic = {}
level = 0
q = collections.deque()
q.append(nestedList)
res = 0
while q:
size = len(q)
level += 1
sums = 0
for _ in range(size):
nl = q.popleft()
for n in nl:
if n.isInteger():
sums += n.getInteger()
else:
q.append(n.getList())
dic[level] = sums
for k, v in dic.items():
res += (level+1-k)*v
return res
|
88826
|
import os
import csv
import pkg_resources
import re
import traceback
import nbformat as nf
import nbconvert as nc
from urllib.parse import quote as urlquote
def isdir(path):
'''
Checks whether given path is a directory
'''
if not os.path.isdir(path):
raise OSError('"' + path + '"' + ' is not a direcotry')
else:
return path
def try_config(configdir, filename):
'''
Tries to read specified config, else uses global config
returns file handle to requested file
'''
resource_package = 'nbfancy'
config_path = '/config' # Do not use os.path.join()
if not os.path.isdir(configdir):
configdir = pkg_resources.resource_filename(resource_package, config_path)
try:
filepath = os.path.join(configdir, filename)
filehandle = open(filepath, 'r')
except FileNotFoundError:
configdir = pkg_resources.resource_filename(resource_package, config_path)
filepath = os.path.join(configdir, filename)
filehandle = open(filepath, 'r')
return filehandle
def read_header(configdir):
'''
Reads header from config directory
'''
# Open file and extract text in second cell
with try_config(configdir, 'header.ipynb') as fh:
notebook = nf.read(fh, nf.NO_CONVERT)
template = notebook['cells'][1]
return template
def read_footer(configdir):
'''
Reads footer from config directory
'''
# Open file and extract text in second cell
with try_config(configdir, 'footer.ipynb') as fh:
notebook = nf.read(fh, nf.NO_CONVERT)
template = notebook['cells'][1]
return template
def read_box_template(configdir):
'''
Reads box template from given file handle
'''
filehandle = try_config(configdir, 'box.ipynb')
# File is already open
# Open file and extract text in second cell
notebook = nf.read(filehandle, nf.NO_CONVERT)
box = notebook['cells'][1]
template = box['source']
# Replace known values with placeholders
template = template.replace('pale-green', '{bg-colour}')
template = template.replace('green', '{fg-colour}')
template = template.replace('fa-star', '{symbol}')
template = template.replace('TITLE', '{title}')
template = template.replace('BODY', '{body}')
return template
def colour2fgbg(colour):
'''
Pairs foreground colour with background colour
'''
colour = colour.lower()
colour_list = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']
colour_list += ['brown', 'black', 'grey', 'gray', 'white']
assert colour in colour_list
fg = colour
if fg == 'red':
bg = 'pale-red'
elif fg == 'orange':
bg = 'sand'
elif fg == 'yellow':
bg = 'pale-yellow'
elif fg == 'green':
bg = 'pale-green'
elif fg == 'blue':
bg = 'pale-blue'
elif fg == 'purple':
bg = 'pale-red'
elif fg == 'brown':
bg = 'khaki'
elif fg == 'black':
bg = 'gray'
elif (fg == 'gray') or (fg == 'grey'):
fg = 'gray'
bg = 'light-gray'
elif fg == 'white':
bg = 'white'
return fg, bg
def read_box_colour_config(configdir):
'''
Create a dict of configurations for each keyword in filename
Lines starting with # are ignored as are blank lines
'''
config = dict()
def isTF(val):
'''
Return true or false if val is boolean
'''
true_words = ['true', 't', '1']
false_words = ['false', 'f', '0']
test_val = val.strip().lower()
if test_val in true_words:
test_val = True
elif test_val in false_words:
test_val = False
return test_val
with try_config(configdir, 'keywords.cfg') as fh:
no_comments = filter(lambda line: len(line)>3 and line.lstrip()[0]!='#' , fh)
reader = csv.DictReader(no_comments)
for row in reader:
key = row.pop('Keyword')
row_dict = {key.strip().lower() : isTF(row[key]) for key in row}
row_dict['fg-colour'], row_dict['bg-colour'] = colour2fgbg(row_dict['colour'])
config[key.strip().lower()] = row_dict
return config
def box_title(line, config):
'''
Creates title for box.
Returns html formattted title, index and which keyword was found
'''
keywords = config.keys()
# Search for keyword (lowercase) in first line and set that as the key
for word in keywords:
if word in line.lower().split(':')[0]:
key = word
# Recover paramters from keyword
keep_keyword = config[key]['keep_keyword']
hidden = config[key]['hide']
# Whether to print keyword in title
if keep_keyword:
title = line.lstrip('#')
else:
subtitle = line.split(':')
title = ':'.join(subtitle[1:])
# Safe version of title for links
safetitle = title.replace(' ', '-')
safetitle = safetitle.replace('`', '')
index = urlquote(safetitle, safe='?!$\\') + '%0A'
# Mark up title, incase markdown syntax is used
htmltitle = nc.filters.markdown2html(title)
htmltitle = htmltitle.replace('<p>', '')
htmltitle = htmltitle.replace('</p>', '')
#link = './' + solnfilename.split('/')[-1] + '#' + index
return htmltitle, index, key
def recursion_detector(f):
'''
Detects whether a given function is calling itself
'''
def decorated_f(*args, **kwargs):
stack = traceback.extract_stack()
if len([1 for line in stack if line[2] == f.__name__]) > 0:
print('Warning: Nested environments detected, this is actively discouraged!')
return f(*args, **kwargs)
return decorated_f
@recursion_detector
def box_body(body, config, template, solnfilename, link=None, multicell=None):
'''
Creates body of the box
'''
# If an empty link to a solution is found, populate it with link
# that was generated by the title (for single cell)
if len(body) > 0 and '[solution]()' in body[-1].lower():
k = body[-1].lower().find('[solution]()')
solution_phrase = body[-1][k:k+13]
new_solution_phrase = '\n\n' + solution_phrase.replace('()','({link})')
new_solution_phrase = new_solution_phrase.format(link=link)
body[-1] = body[-1].replace(solution_phrase, new_solution_phrase)
body = '\n'.join(body)
# Apply markup
htmlbody = nc.filters.markdown2html(body)
if multicell is not None:
# Bit of recursion
#print('Warning nested cell environments')
rendered, soln = notebook2rendered(multicell, config, template, solnfilename)
# Export to html to include in cell
html_exp = nc.HTMLExporter()
html_exp.template_file = 'basic'
temphtml, resources = html_exp.from_notebook_node(rendered)
# Remove multiple newlines
temphtml = re.sub(r'(\n\s*)+\n', '\n', temphtml)
# Add boxy thing
temphtml = temphtml.replace('class="input_area"',
'class="output_area" style="background-color:#F7F7F7;border:1px solid #CFCFCF"')
# If an empty link to a solution is found, populate it with link
# that was generated by the title (for multicell)
if '<a href="">solution</a>' in temphtml.lower():
k = temphtml.lower().find('<a href="">solution</a>')
solution_phrase = temphtml[k:k+24]
new_solution_phrase = solution_phrase.replace('href=""','href="{link}"')
new_solution_phrase = new_solution_phrase.format(link=link)
temphtml = temphtml.replace(solution_phrase, new_solution_phrase)
htmlbody += temphtml
# Escape symbols
htmlbody = htmlbody.replace('*', '*')
#htmlbody = htmlbody.replace('_', '_')
# Format tables
htmlbody = htmlbody.replace('<table>', '<table class="w3-table w3-striped w3-hoverable">')
htmlbody = htmlbody.replace('<thead>', '<thead class="w3-black">')
# Be sure to remove final newline
if len(htmlbody) > 0 and htmlbody[-1] == '\n':
htmlbody = htmlbody[:-1]
return htmlbody
def notebook2rendered(plain, config, template, solnfilename):
'''
Converts notebook JSON to rendered notebook JSON for output
'''
# List all the markdown cells
celllist = plain['cells']
markdownlist = [c for c in celllist if c['cell_type']=='markdown']
solnb = None
# For each markdown cell check for keywords and format according to
# the cell template and config files
end = -1
for c in markdownlist:
line = c['source'].split('\n')
# Check for a colon in the first line
if line[0].find(':') < 0:
continue
# Check for a keyword if a colon is found
temp_line = line[0].split(':')
if any(keyword in temp_line[0].lower().strip('# ') for keyword in config.keys()):
htmltitle, index, key = box_title(line[0], config)
# Recover paramters from keyword
hidden = config[key]['hide']
# Multicell procedure
if key + '+' in temp_line[0].lower().strip('# '):
start = celllist.index(c) + 1
end = None
# Find end cell
for subcell in celllist[start:]:
if subcell['cell_type'] == 'markdown':
lastline = subcell['source'].split('\n')
temp_lastline = lastline[-1].split(':')
if key in temp_lastline[-1].lower().strip():
end = celllist.index(subcell) + 1
lastline[-1] = ':'.join(temp_lastline[:-1]).strip()
subcell['source'] = '\n'.join(lastline)
break
else:
# If no end cell found print warning
try:
print('Warning in file', infile, ':')
print('\tNo end tag found for', key + '+', 'environment in cell', start)
except NameError:
print('Warning in temporary file:')
print('\tNo end tag found for', key + '+', 'environment in cell', start)
print('\tCheck you haven\'t nested environments')
# Move multicells to new notebook for processing
multicell = celllist[start:end]
for subcell in multicell:
celllist.remove(subcell)
multicellnb = nf.v4.new_notebook()
multicellnb['metadata'] = plain['metadata']
multicellnb['cells'] = multicell
else:
# If we aren't in a multicell environment
# we don't need the additional notebook
multicellnb = None
# If hidden move cell to new notebook
if hidden:
# Make a new notebook if it doesn't exist already
if solnb is None:
solnb = nf.v4.new_notebook()
solnb['metadata'] = plain['metadata']
solnb['cells'].append(nf.v4.new_markdown_cell(source='# Solutions'))
solnb['cells'].append(nf.v4.new_markdown_cell(source=''))
# REDEFINE c
solnb['cells'][-1] = c.copy()
plain['cells'].remove(c)
c = solnb['cells'][-1]
htmlbody = box_body(line[1:], config, template, solnfilename, multicell=multicellnb)
else:
link = './' + solnfilename.split('/')[-1] + '#' + index
htmlbody = box_body(line[1:], config, template, solnfilename, link=link, multicell=multicellnb)
values = config[key].copy()
values['index'] = index
values['title'] = htmltitle
values['body'] = htmlbody
c['source'] = template.format_map(values)
return plain, solnb
def notebook2HTML(filename):
'''
Converts notebook file to a html string
'''
html_exp = nc.HTMLExporter()
html, resources = html_exp.from_filename(filename)
# SED rules:
# Replace '../folders' in links with './folders'
# for folders images, data, code
html = html.replace('../images', './images')
html = html.replace('../data', './data')
html = html.replace('../code', './code')
# Replace '.ipynb' in links with '.html'
# the '"' ensures this (hopefully) only happens in links
html = html.replace('.ipynb"', '.html"')
html = html.replace('.ipynb#', '.html#')
# Horrible hack because <code> environment doesn't seem to work with CSS sheet
# For plaintext blocks
html = html.replace('<pre><code>', '<pre><code style="">')
# For inline highlighting
html = html.replace('<code>', '<code style="background-color:#F7F7F7;border:1px solid #CFCFCF">')
# Another hack since \n is converted to [space] in links
html = html.replace('%0A"','%20"')
# Add the favicon
html = html.replace('<head><meta charset="utf-8" />',
'<head><meta charset="utf-8" />\n<link rel="icon" type="image/png" href="css/favicon.png"/>')
return html
def notebook2slides(filename):
'''
Converts notebook file to a slide show
'''
slides_exp = nc.SlidesExporter()
slides_exp.reveal_scroll = True # Doesn't work?
slides, resources = slides_exp.from_filename(filename)
# Custom CSS is in the directory above slides
slides = slides.replace('href="custom.css"', 'href="../custom.css"')
# Replace '.ipynb' in links with '.html'
# the '"' ensures this (hopefully) only happens in links
slides = slides.replace('.ipynb"', '.html"')
slides = slides.replace('.ipynb#', '.html#')
# Horrible hack because <code> environment doesn't seem to work with CSS sheet
# For plaintext blocks
slides = slides.replace('<pre><code>', '<pre><code style="">')
# For inline highlighting
slides = slides.replace('<code>', '<code style="background-color:#F7F7F7;border:1px solid #CFCFCF">')
# Another hack since \n is converted to [space] in links
slides = slides.replace('%0A"','%20"')
# Add the favicon
slides = slides.replace('<head><meta charset="utf-8" />',
'<head><meta charset="utf-8" />\n<link rel="icon" type="image/png" href="css/favicon.png"/>')
return slides
def directory_contents(directory):
'''
Returns directory notebook contents
split into lessons and solutions
'''
# Store contents of directory as list
contents = os.listdir(directory)
contents.sort()
try:
# Remove checkpoints folder from list
contents.remove('.ipynb_checkpoints')
except ValueError:
pass
# Removes everything that isn't a notebook ending with .ipynb
contents = [f for f in contents if '.ipynb' in f]
# Remove solution files from contents and store in seperate list
soln_contents = [f for f in contents if '-soln' in f]
contents = [f for f in contents if '-soln' not in f]
return contents, soln_contents
def navigation_triple(directory, inputfile):
'''
Given a directory and file determines which file is
- previous lesson
- schedule
- next lesson
and returns these files as a dict
'''
contents, _ = directory_contents(directory)
contents.append(contents[0])
current = inputfile.split('/')[-1]
# Exceptional case if you're making a new solution document
if '-soln' in current:
current = current.replace('-soln','')
index = contents.index(current)
outdir = './'
triple = { 'previous' : outdir+contents[index-1],
'index' : outdir+contents[0],
'next' : outdir+contents[index+1] }
return triple
|
88832
|
import pytest
config = """
from universum.configuration_support import Configuration
def step(name, cmd=False):
return Configuration([dict(name=name, command=[] if not cmd else ["bash", "-c", '''echo "run step"'''])])
configs = step('parent 1 ') * (step('step 1', True) + step('step 2', True))
configs += step('parent 2 ') * (step('step 1', True) + step('step 2', True))
"""
@pytest.mark.parametrize("filters,expected_logs, unexpected_logs", (
["parent 1", ["parent 1", "step 1", "step 2"], ["parent 2"]],
["!parent 2", ["parent 1", "step 1", "step 2"], ["parent 2"]],
["parent 1:parent 2", ["parent 1", "step 1", "step 2", "parent 2"], []],
["parent 1:parent 2:!step 1", ["parent 1", "step 2", "parent 2"], ["step 1"]],
["step 2", ["parent 1", "parent 2", "step 2"], ["step 1"]],
["!step 2:parent 1", ["parent 1", "step 1"], ["parent 2", "step 2"]],
["step :!step 1", ["parent 1", "parent 2", "step 2"], ["step 1"]],
["", ["parent 1", "parent 2", "parent 1 step 1", "parent 2 step 1", "parent 1 step 2", "parent 2 step 2"], []],
["!", ["parent 1", "parent 2", "parent 1 step 1", "parent 2 step 1", "parent 1 step 2", "parent 2 step 2"],
[]],))
def test_steps_filter(docker_main_and_nonci, filters, expected_logs, unexpected_logs):
console_out_log = docker_main_and_nonci.run(config, additional_parameters=f"-o console -f='{filters}'")
for log_str in expected_logs:
assert log_str in console_out_log
for log_str in unexpected_logs:
assert log_str not in console_out_log
def test_steps_filter_few_flags(docker_main_and_nonci):
console_out_log = docker_main_and_nonci.run(config,
additional_parameters="-o console -f='parent 1:parent 2' -f='!step 1'")
for log_str in ["parent 1", "step 2", "parent 2"]:
assert log_str in console_out_log
assert "step 1" not in console_out_log
|
88839
|
def isExistingClassification(t):
pass
def getSrcNodeName(dst):
"""
Get the name of the node connected to the argument dst plug.
"""
pass
def getCollectionsRecursive(parent):
pass
def disconnect(src, dst):
pass
def findVolumeShader(shadingEngine, search='False'):
"""
Returns the volume shader (as MObject) of the given shading engine (as MObject).
"""
pass
def transferPlug(src, dst):
"""
Transfer the connection or value set on plug 'src' on to the plug 'dst'.
"""
pass
def findSurfaceShader(shadingEngine, search='False'):
"""
Returns the surface shader (as MObject) of the given shading engine (as MObject).
"""
pass
def findPlug(userNode, attr):
"""
Return plug corresponding to attr on argument userNode.
If the argument userNode is None, or the attribute is not found, None
is returned.
"""
pass
def disconnectSrc(src):
"""
Disconnect a source (readable) plug from all its destinations.
Note that a single plug can be both source and destination, so this
interface makes the disconnection intent explicit.
"""
pass
def isSurfaceShaderNode(obj):
pass
def getSrcUserNode(dst):
"""
Get the user node connected to the argument dst plug.
Note: Only applies to MPxNode derived nodes
If the dst plug is unconnected, None is returned.
"""
pass
def plugSrc(dstPlug):
"""
Return the source of a connected destination plug.
If the destination is unconnected, returns None.
"""
pass
def getOverridesRecursive(parent):
pass
def nameToUserNode(name):
pass
def isShadingType(typeName):
pass
def canOverrideNode(node):
pass
def createSrcMsgAttr(longName, shortName):
"""
Create a source (a.k.a. output, or readable) message attribute.
"""
pass
def deleteNode(node):
"""
Remove the argument node from the graph.
This function is undoable.
"""
pass
def connect(src, dst):
"""
Connect source plug to destination plug.
If the dst plug is None, the src plug will be disconnected from all its
destinations (if any). If the src plug is None, the dst plug will be
disconnected from its source (if any). If both are None, this function
does nothing. If the destination is already connected, it will be
disconnected.
"""
pass
def isExistingType(t):
pass
def getDstUserNodes(src):
"""
Get the user nodes connected to the argument src plug.
Note: Only applies to MPxNode derived nodes
If the src plug is unconnected, None is returned.
"""
pass
def plugDst(srcPlug):
"""
Return the destinations of a connected source plug.
If the source is unconnected, returns None.
"""
pass
def _recursiveSearch(colList):
"""
# Fonctions to compute the number of operations when layer are switched
"""
pass
def getTotalNumberOperations(model):
pass
def _isDestination(plug):
"""
Returns True if the given plug is a destination plug, and False otherwise.
If the plug is a compond attribute it returns True if any of it's children is a
destination plug.
"""
pass
def isShadingNode(obj):
pass
def disconnectDst(dst):
"""
Disconnect a destination (writable) plug from its source.
Note that a single plug can be both source and destination, so this
interface makes the disconnection intent explicit.
"""
pass
def findDisplacementShader(shadingEngine, search='False'):
"""
Returns the displacement shader (as MObject) of the given shading engine (as MObject).
"""
pass
def _findShader(shadingEngine, attribute, classification='None'):
"""
Returns the shader connected to given attribute on given shading engine.
Optionally search for nodes from input connections to the shading engines
satisfying classification if plug to attribute is not a destination and
a classification string is specified.
"""
pass
def isInheritedType(parentTypeName, childTypeName):
pass
def getSrcNode(dst):
"""
Get the node connected to the argument dst plug.
"""
pass
def createGenericAttr(longName, shortName):
pass
def nameToExistingUserNode(name):
pass
def _transferConnectedPlug(src, dst):
pass
def connectMsgToDst(userNode, dst):
"""
Connect the argument userNode's message attribute to the
argument dst plug.
If the userNode is None the dst plug is disconnected
from its sources.
If the dst plug is None the userNode's message plug
is disconnected from its destinations
"""
pass
def isSurfaceShaderType(typeName):
pass
def notUndoRedoing(f):
"""
Decorator that will call the decorated method only if not currently in undoing or redoing.
Particularly useful to prevent callbacks from generating commands since that would clear the redo stack.
"""
pass
def createDstMsgAttr(longName, shortName):
"""
Create a destination (a.k.a. input, or writable) message attribute.
"""
pass
kNoSuchNode = []
kSupportedVectorTypes = set()
kSupportedSimpleTypes = set()
kPlugTypeMismatch = []
|
88920
|
import apache_beam as beam
import tensorflow as tf
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from regnety.utils.image_utils import *
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def create_collection(list1, list2, list3):
final_list = [(list1[i], list2[i], list3[i]) for i in range(len(list1))]
with beam.Pipeline() as pipeline:
coll = pipeline | beam.Create(final_list)
return coll
class MakeImageDoFn(beam.DoFn):
def process(self, batch):
ret_examples = []
for info_tuple in batch:
filepath, label, synset = info_tuple
image_str = tf.io.read_file(filepath)
if is_png(filepath):
image_str = png_to_jpeg(image_str)
if is_cmyk(filepath):
image_str = cmyk_to_rgb(image_str)
image_tensor = tf.io.decode_jpeg(image_str)
height, width = 512, 512
if not is_rgb(image_tensor):
image_tensor = tf.image.grayscale_to_rgb(image_tensor)
image_tensor = tf.cast(tf.image.resize(image_tensor, (512, 512)), tf.uint8)
image_str = tf.io.encode_jpeg(image_tensor)
assert len(image_tensor.shape) == 3
try:
ret_examples.append(
tf.train.Example(
features=tf.train.Features(
feature={
"image": _bytes_feature(image_str),
"height": _int64_feature(height),
"width": _int64_feature(width),
"filename": _bytes_feature(
bytes(os.path.basename(filepath)).encode("utf8")
),
"label": _int64_feature(label),
"synset": _bytes_feature(bytes(synset).encode("utf8")),
}
)
).SerializeToString()
)
except:
ret_examples.append(
tf.train.Example(
features=tf.train.Features(
feature={
"image": _bytes_feature(image_str),
"height": _int64_feature(height),
"width": _int64_feature(width),
"filename": _bytes_feature(
bytes(os.path.basename(filepath), encoding="utf8")
),
"label": _int64_feature(label),
"synset": _bytes_feature(
bytes(synset, encoding="utf8")
),
}
)
).SerializeToString()
)
return ret_examples
def __call__(self, *args):
self.process(*args)
class MakeExampleDoFn(beam.DoFn):
def process(self, batch):
examples = []
for example in batch:
examples.append(example.SerializeToSrring())
return examples
def __call__(self, *args):
self.process(*args)
|
88921
|
from mock import AsyncMock
import pytest
from opentrons.drivers.smoothie_drivers import SmoothieDriver
from opentrons.tools import write_pipette_memory
@pytest.fixture
def mock_driver() -> AsyncMock:
return AsyncMock(spec=SmoothieDriver)
async def test_write_identifiers(mock_driver: AsyncMock) -> None:
"""It should call driver to write a new id and model."""
mount = "left"
new_id = "some id"
new_model = "some model"
mock_driver.read_pipette_id.return_value = new_id
mock_driver.read_pipette_model.return_value = new_model
await write_pipette_memory.write_identifiers(
mount=mount, new_id=new_id, new_model=new_model, driver=mock_driver
)
mock_driver.write_pipette_id.assert_called_once_with(mount, new_id)
mock_driver.read_pipette_id.assert_called_once_with(mount)
mock_driver.write_pipette_model.assert_called_once_with(mount, new_model)
mock_driver.read_pipette_model.assert_called_once_with(mount)
async def test_write_identifiers_id_mismatch(mock_driver: AsyncMock) -> None:
"""It should fail when written id doesn't match read id."""
mount = "left"
new_id = "some id"
new_model = "some model"
mock_driver.read_pipette_id.return_value = new_id + "_wrong"
with pytest.raises(Exception):
await write_pipette_memory.write_identifiers(
mount=mount, new_id=new_id, new_model=new_model, driver=mock_driver
)
async def test_write_identifiers_model_mismatch(mock_driver: AsyncMock) -> None:
"""It should fail when written model doesn't match read model."""
mount = "left"
new_id = "some id"
new_model = "some model"
mock_driver.read_pipette_id.return_value = new_id
mock_driver.read_pipette_model.return_value = new_model + "_wrong"
with pytest.raises(Exception):
await write_pipette_memory.write_identifiers(
mount=mount, new_id=new_id, new_model=new_model, driver=mock_driver
)
async def test_check_previous_data(mock_driver: AsyncMock) -> None:
"""It should read the pipette id and model"""
mount = "left"
await write_pipette_memory.check_previous_data(mount, mock_driver)
mock_driver.read_pipette_id.assert_called_once_with(mount)
mock_driver.read_pipette_model.assert_called_once_with(mount)
pipette_barcode_to_model = {
"P10S20180101A01": "p10_single_v1",
"P10M20180101A01": "p10_multi_v1",
"P50S180101A01": "p50_single_v1",
"P50M20180101B01": "p50_multi_v1",
"P300S20180101A01": "p300_single_v1",
"P300M20180101A01": "p300_multi_v1",
"P1000S20180101A01": "p1000_single_v1",
"P10SV1318010101": "p10_single_v1.3",
"P10MV1318010102": "p10_multi_v1.3",
"P50SV1318010103": "p50_single_v1.3",
"P50MV1318010104": "p50_multi_v1.3",
"P3HSV1318010105": "p300_single_v1.3",
"P3HMV1318010106": "p300_multi_v1.3",
"P1KSV1318010107": "p1000_single_v1.3",
"P10SV1418010101": "p10_single_v1.4",
"P10MV1418010102": "p10_multi_v1.4",
"P50SV1418010103": "p50_single_v1.4",
"P50MV1418010104": "p50_multi_v1.4",
"P3HSV1418010105": "p300_single_v1.4",
"P3HMV1418010106": "p300_multi_v1.4",
"P1KSV1418010107": "p1000_single_v1.4",
"P20MV2120120204": "p20_multi_v2.1",
"P1KSV2218010107": "p1000_single_v2.2",
"P20SV2220020501": "p20_single_v2.2",
}
def test_parse_model_from_barcode() -> None:
for barcode, model in pipette_barcode_to_model.items():
assert write_pipette_memory._parse_model_from_barcode(barcode) == model
with pytest.raises(Exception):
write_pipette_memory._parse_model_from_barcode("P1HSV1318010101")
with pytest.raises(Exception):
write_pipette_memory._parse_model_from_barcode("P1KSV1218010101")
with pytest.raises(Exception):
write_pipette_memory._parse_model_from_barcode("aP300S20180101A01")
|
88934
|
import os
from pyfakefs.fake_filesystem_unittest import TestCase
from starwhale.utils import config as sw_config
from starwhale.utils.error import NotFoundError
from starwhale.utils.config import (
SWCliConfigMixed,
load_swcli_config,
get_swcli_config_path,
)
from .. import get_predefined_config_yaml
_existed_config_contents = get_predefined_config_yaml()
class SWCliConfigTestCase(TestCase):
def setUp(self):
self.setUpPyfakefs()
sw_config._config = {}
def test_config_path(self):
path = get_swcli_config_path()
assert path.endswith(".config/starwhale/config.yaml")
def test_load_default_swcli_config(self):
_config = load_swcli_config()
path = get_swcli_config_path()
assert oct(os.stat(path).st_mode & 0o777) == "0o600"
assert "instances" in _config
assert _config["storage"]["root"].endswith(".cache/starwhale") is True
assert "local" == _config["current_instance"]
assert len(_config["instances"]) == 1
assert "local" == _config["instances"]["local"]["uri"]
assert "standalone" == _config["instances"]["local"]["type"]
assert "self" == _config["instances"]["local"]["current_project"]
def test_load_existed_swcli_config(self):
path = get_swcli_config_path()
self.assertFalse(os.path.exists(path))
self.fs.create_file(path, contents=_existed_config_contents)
_config = load_swcli_config()
assert len(_config["instances"]) == 2
assert "pre-bare" == _config["current_instance"]
assert "cloud" == _config["instances"]["pre-bare"]["type"]
assert "starwhale" == _config["instances"]["pre-bare"]["user_name"]
assert "http://1.1.1.1:8182" == _config["instances"]["pre-bare"]["uri"]
def test_swcli_config_mixed(self):
path = get_swcli_config_path()
self.fs.create_file(path, contents=_existed_config_contents)
sw = SWCliConfigMixed()
assert str(sw.rootdir).endswith(".cache/starwhale")
assert sw.user_name == "starwhale"
assert sw.user_role == "admin"
assert sw.sw_remote_addr == "http://1.1.1.1:8182"
sw.delete_instance("local")
sw.delete_instance("xxx")
sw.delete_instance("pre-bare")
_config = load_swcli_config()
assert "local" == sw.current_instance
assert len(_config["instances"]) == 1
sw.update_instance(
uri="console.pre.intra.starwhale.ai",
user_name="test",
alias="pre-k8s",
)
_config = load_swcli_config()
assert len(_config["instances"]) == 2
assert "pre-k8s" in _config["instances"]
assert (
"http://console.pre.intra.starwhale.ai"
== _config["instances"]["pre-k8s"]["uri"]
)
assert "test" == _config["instances"]["pre-k8s"]["user_name"]
def test_select(self):
path = get_swcli_config_path()
self.fs.create_file(path, contents=_existed_config_contents)
sw = SWCliConfigMixed()
assert sw.current_instance == "pre-bare"
assert sw.current_project == "self"
sw.select_current_default(instance="pre-bare", project="first")
assert sw.current_project == "first"
sw.select_current_default(instance="local", project="self")
self.assertRaises(NotFoundError, sw.select_current_default, instance="notfound")
self.assertRaises(
NotFoundError,
sw.select_current_default,
instance="local",
project="notfound",
)
|
88944
|
from xweb import App, Model, RESTController
class UserModel(Model):
schema = {
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
},
"required": ['username']
}
class EventController(RESTController):
async def get(self):
Model.validate(self.ctx.json)
self.ctx.body = {"Hello": "World"}
app = App()
app.routes = {
'/': EventController
}
app.listen()
|
88954
|
import math
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from apis.betterself.v1.constants import UNIQUE_KEY_CONSTANT
from apis.betterself.v1.signup.fixtures.builders import DemoHistoricalDataBuilder
from events.models import SupplementLog, SleepLog, DailyProductivityLog
from supplements.models import Supplement
User = get_user_model()
class BaseSupplementAnalyticsTests(TestCase):
@classmethod
def setUpAnalyticsData(cls):
cls.default_user, _ = User.objects.get_or_create(username='default')
builder = DemoHistoricalDataBuilder(cls.default_user)
builder.create_historical_fixtures()
supplement = Supplement.objects.filter(user=cls.default_user).first()
cls.supplement = supplement
def setUp(self):
self.client = APIClient()
self.client.force_login(self.default_user)
class BaseSupplementsAnalyticsTestCasesMixin(object):
def test_view_with_no_sleep_data(self):
SleepLog.objects.filter(user=self.default_user).delete()
response = self.client.get(self.url)
# make sure that no nans come across the data, should always be none
values_returned = [item['value'] for item in response.data]
for value in values_returned:
if isinstance(value, float):
self.assertFalse(math.isnan(value))
self.assertEqual(response.status_code, 200)
def test_view_with_no_productivity_data(self):
DailyProductivityLog.objects.filter(user=self.default_user).delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_view_with_no_supplement_data(self):
SupplementLog.objects.filter(user=self.default_user).delete()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class SupplementAnalyticsSummaryTests(BaseSupplementAnalyticsTests, BaseSupplementsAnalyticsTestCasesMixin):
@classmethod
def setUpTestData(cls):
cls.setUpAnalyticsData()
cls.url = reverse('supplement-analytics-summary', args=[str(cls.supplement.uuid)])
super().setUpTestData()
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
expected_keys = {'productivity_correlation',
'sleep_correlation',
'most_taken',
'most_taken_dates',
'creation_date'}
response_keys = set([item[UNIQUE_KEY_CONSTANT] for item in response.data])
self.assertEqual(expected_keys, response_keys)
first_event = SupplementLog.objects.filter(supplement=self.supplement).order_by('time').first()
for data in response.data:
if data[UNIQUE_KEY_CONSTANT] == 'creation_date':
self.assertEqual(first_event.time.isoformat(), data['value'])
class SupplementAnalyticsSleepTest(BaseSupplementAnalyticsTests, BaseSupplementsAnalyticsTestCasesMixin):
@classmethod
def setUpTestData(cls):
cls.setUpAnalyticsData()
cls.url = reverse('supplement-analytics-sleep', args=[str(cls.supplement.uuid)])
super().setUpTestData()
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class SupplementAnalyticsProductivityTest(BaseSupplementAnalyticsTests, BaseSupplementsAnalyticsTestCasesMixin):
@classmethod
def setUpTestData(cls):
cls.setUpAnalyticsData()
cls.url = reverse('supplement-analytics-productivity', args=[str(cls.supplement.uuid)])
super().setUpTestData()
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
class SupplementDosagesAnalyticsTest(BaseSupplementAnalyticsTests, BaseSupplementsAnalyticsTestCasesMixin):
@classmethod
def setUpTestData(cls):
cls.setUpAnalyticsData()
cls.url = reverse('supplement-analytics-dosages', args=[str(cls.supplement.uuid)])
super().setUpTestData()
def test_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
|
88975
|
import datetime
import logging
from airflow import DAG
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from airflow.hooks.S3_hook import S3Hook
def list_keys():
hook = S3Hook(aws_conn_id='aws_credentials')
bucket = Variable.get('s3_bucket')
logging.info(f"Listing Keys from {bucket}")
keys = hook.list_keys(bucket)
for key in keys:
logging.info(f"- s3://{bucket}/{key}")
dag = DAG(
'lesson1.solution4',
start_date=datetime.datetime.now())
list_task = PythonOperator(
task_id="list_keys",
python_callable=list_keys,
dag=dag
)
|
88978
|
class ListView:
__slots__ = ['_list']
def __init__(self, list_object):
self._list = list_object
def __add__(self, other):
return self._list.__add__(other)
def __getitem__(self, other):
return self._list.__getitem__(other)
def __contains__(self, item):
return self._list.__contains__(item)
def __eq__(self, other):
return self._list.__eq__(other)
def __hash__(self):
return self._list.__hash__()
def __ge__(self, other):
if isinstance(other, ListView):
return self._list.__ge__(other._list)
return self._list.__ge__(other)
def __gt__(self, other):
if isinstance(other, ListView):
return self._list.__gt__(other._list)
return self._list.__gt__(other)
def __iter__(self):
return self._list.__iter__()
def __le__(self, other):
if isinstance(other, ListView):
return self._list.__le__(other._list)
return self._list.__le__(other)
def __len__(self):
return self._list.__len__()
def __lt__(self, other):
if isinstance(other, ListView):
return self._list.__lt__(other._list)
return self._list.__lt__(other)
def __ne__(self, other):
return self._list.__ne__(other)
def __mul__(self, other):
return self._list.__mul__(other)
def __rmul__(self, n):
return self._list.__rmul__(n)
def __reversed__(self):
return self._list.__reversed__()
def __repr__(self):
return self._list.__repr__()
def __str__(self):
return self._list.__str__()
def __radd__(self, other):
return other + self._list
def __iadd__(self, other):
raise TypeError("unsupported operator for type SetView")
def __imul__(self, other):
raise TypeError("unsupported operator for type SetView")
def copy(self):
return self._list.copy()
def count(self, object):
return self._list.count(object)
def index(self, *args, **kwargs):
return self._list.index(*args, **kwargs)
|
89007
|
import unittest
from asq.queryables import Queryable
__author__ = "<NAME>"
class TestToSet(unittest.TestCase):
def test_to_set(self):
a = [1, 2, 4, 8, 16, 32]
b = Queryable(a).to_set()
c = set([1, 2, 4, 8, 16, 32])
self.assertEqual(b, c)
def test_to_set_closed(self):
a = [1, 2, 4, 8, 16, 32]
b = Queryable(a)
b.close()
self.assertRaises(ValueError, lambda: b.to_set())
def test_to_set_duplicates(self):
a = [1, 2, 4, 8, 8, 16, 32]
b = Queryable(a)
self.assertRaises(ValueError, lambda: b.to_set())
|
89024
|
from pylanguagetool import converters
markdown = """# Heading
This is *a* Sentence.
"""
html = """<h1>Heading</h1>
<p>This is <em>a</em> Sentence.</p>
"""
plaintext = """Heading
This is a Sentence.
"""
ipython = """{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Heading\\n",
"some *text*"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Other Heading\\n",
"- text\\n",
"- more text"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 0
}"""
ipython_markdown = """# Heading
some *text*
## Other Heading
- text
- more text
"""
ipython_html = """<h1>Heading</h1>
<p>some <em>text</em></p>
<h2>Other Heading</h2>
<ul>
<li>text</li>
<li>more text</li>
</ul>
"""
ipython_plaintext = """Heading
some text
Other Heading
text
more text
"""
def test_html2text():
assert converters.html2text(html) == plaintext
def test_markdown2html():
assert converters.markdown2html(markdown) == html
def test_ipynb2markdown():
assert converters.ipynb2markdown(ipython) == ipython_markdown
def test_ipython_markdown2html():
assert converters.markdown2html(ipython_markdown) == ipython_html
def test_ipython_html2text():
assert converters.html2text(ipython_html) == ipython_plaintext
|
89026
|
import time
import requests
from urllib.parse import urljoin
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
import pytest
pytest_plugins = ["docker_compose"]
@pytest.fixture(scope="module")
def wait_for_api(module_scoped_container_getter):
"""Wait for the api from my_api_service to become responsive"""
request_session = requests.Session()
retries = Retry(total=5,
backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504])
request_session.mount('http://', HTTPAdapter(max_retries=retries))
service = module_scoped_container_getter.get("my_api_service").network_info[0]
api_url = "http://%s:%s/" % (service.hostname, service.host_port)
assert request_session.get(api_url)
start = time.time()
while 'Exit' not in module_scoped_container_getter.get("my_short_lived_service").human_readable_state:
if time.time() - start >= 5:
raise RuntimeError(
'my_short_lived_service should spin up, echo "Echoing" and '
'then shut down, since it still running something went wrong'
)
time.sleep(.5)
return request_session, api_url
@pytest.fixture
def do_an_insert(wait_for_api):
"""Insert data to the database in the container my_db"""
request_session, api_url = wait_for_api
item_url = 'items/1'
data_string = 'some_data'
request_session.put('%s%s?data_string=%s' % (api_url, item_url, data_string))
yield item_url, data_string
request_session.delete(urljoin(api_url, item_url)).json()
def test_read_an_item(wait_for_api, do_an_insert):
request_session, api_url = wait_for_api
item_url, data_string = do_an_insert
item = request_session.get(api_url + item_url).json()
assert item['data'] == data_string
def test_read_and_write(wait_for_api):
request_session, api_url = wait_for_api
data_string = 'some_other_data'
request_session.put('%sitems/2?data_string=%s' % (api_url, data_string))
item = request_session.get(urljoin(api_url, 'items/2')).json()
assert item['data'] == data_string
request_session.delete(urljoin(api_url, 'items/2'))
def test_read_all(wait_for_api):
request_session, api_url = wait_for_api
assert len(request_session.get(urljoin(api_url, 'items/all')).json()) == 0
if __name__ == '__main__':
pytest.main(['--docker-compose', './my_network', '--docker-compose-no-build'])
|
89038
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize("installed_packages", [
("httpd"),
("mod_ssl"),
])
def test_packages_installed(host, installed_packages):
rpackage = host.package(installed_packages)
assert rpackage.is_installed
@pytest.mark.parametrize("services", [
("httpd"),
])
def test_services_running_and_enabled(host, services):
service = host.service(services)
assert service.is_enabled
assert service.is_running
@pytest.mark.parametrize("files", [
("/etc/httpd/conf.d/welcome-vm.conf"),
])
def test_welcome(host, files):
welcome = host.file(files)
assert welcome.user == "root"
assert welcome.group == "root"
assert welcome.mode == 0o644
def test_http_ssl_conf(host):
http_ssl_conf = host.file("/etc/httpd/conf.d/ssl.conf")
assert not http_ssl_conf.contains('Listen 443')
@pytest.mark.parametrize("files", [
("/etc/httpd/conf.d/welcome.conf"),
("/etc/httpd/conf.d/userdir.conf"),
("/etc/httpd/conf.d/autoindex.conf"),
])
def test_empty_config(host, files):
test_empty_config = host.file(files)
assert test_empty_config.size == 0
def test_subject_ssll_key(host):
cmd = host.run("openssl x509 -in /etc/pki/tls/certs/backend.molecule.openconext.org.pem -noout -subject")
assert 'subject= /CN=molecule.openconext.org' in cmd.stdout
assert cmd.rc == 0
|
89043
|
import unittest
from katas.kyu_6.multiples_of_3_and_5 import solution
class MultiplesOfThreeAndFiveTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(solution(10), 23)
def test_equals_2(self):
self.assertEqual(solution(100), 2318)
def test_equals_3(self):
self.assertEqual(solution(997), 232169)
|
89051
|
from .misc.utils import sqlalchemy_to_pydantic
from .crud_router import crud_router_builder
from .misc.type import CrudMethods
|
89053
|
import gzip
import os
import os.path
import data_algebra
import data_algebra.data_ops
import data_algebra.db_model
_have_bigquery = False
try:
# noinspection PyUnresolvedReferences
import google.cloud.bigquery
_have_bigquery = True
except ImportError:
pass
def _bigquery_median_expr(dbmodel, expression):
return (
"PERCENTILE_CONT("
+ dbmodel.expr_to_sql(expression.args[0], want_inline_parens=False)
+ ", 0.5)"
)
def _bigquery_is_bad_expr(dbmodel, expression):
subexpr = dbmodel.expr_to_sql(expression.args[0], want_inline_parens=True)
return (
"("
+ subexpr
+ " IS NULL OR "
+ "IS_INF("
+ subexpr
+ ")"
+ " OR ("
+ subexpr
+ " != 0 AND "
+ subexpr
+ " = -"
+ subexpr
+ "))"
)
BigQuery_formatters = {
"median": _bigquery_median_expr,
"is_bad": _bigquery_is_bad_expr,
}
class BigQueryModel(data_algebra.db_model.DBModel):
"""A model of how SQL should be generated for BigQuery
connection should be google.cloud.bigquery.client.Client"""
def __init__(self, *, table_prefix=None):
data_algebra.db_model.DBModel.__init__(
self,
identifier_quote="`",
string_quote='"',
sql_formatters=BigQuery_formatters,
on_start="(",
on_end=")",
on_joiner=" AND ",
string_type="STRING",
)
self.table_prefix = table_prefix
def quote_table_name(self, table_description):
if not isinstance(table_description, str):
try:
if table_description.node_name == "TableDescription":
table_description = table_description.table_name
else:
raise TypeError(
"Expected table_description to be a string or data_algebra.data_ops.TableDescription)"
)
except KeyError:
raise TypeError(
"Expected table_description to be a string or data_algebra.data_ops.TableDescription)"
)
if self.table_prefix is not None:
table_description = self.table_prefix + "." + table_description
return self.quote_identifier(table_description)
# noinspection PyMethodMayBeStatic
def execute(self, conn, q):
"""
:param conn: database connection
:param q: sql query
"""
assert _have_bigquery
assert isinstance(conn, google.cloud.bigquery.client.Client)
if isinstance(q, data_algebra.data_ops.ViewRepresentation):
q = q.to_sql(db_model=self)
else:
q = str(q)
assert isinstance(q, str)
conn.query(q).result()
def read_query(self, conn, q):
"""
:param conn: database connection
:param q: sql query
:return: query results as table
"""
assert _have_bigquery
assert isinstance(conn, google.cloud.bigquery.client.Client)
if isinstance(q, data_algebra.data_ops.ViewRepresentation):
q = q.to_sql(db_model=self)
else:
q = str(q)
assert isinstance(q, str)
r = self.local_data_model.pd.DataFrame(conn.query(q).result().to_dataframe())
r.reset_index(drop=True, inplace=True)
return r.copy() # fresh copy
def insert_table(
self, conn, d, table_name, *, qualifiers=None, allow_overwrite=False
):
prepped_table_name = table_name
if self.table_prefix is not None:
prepped_table_name = self.table_prefix + "." + table_name
if allow_overwrite:
self.drop_table(conn, table_name)
else:
table_exists = True
try:
self.read_query(
conn,
"SELECT * FROM " + self.quote_table_name(table_name) + " LIMIT 1",
)
table_exists = True
except Exception as e:
table_exists = False
if table_exists:
raise ValueError("table " + prepped_table_name + " already exists")
job = conn.load_table_from_dataframe(d, prepped_table_name)
job.result()
def db_handle(self, conn):
return BigQuery_DBHandle(db_model=self, conn=conn)
class BigQuery_DBHandle(data_algebra.db_model.DBHandle):
def __init__(self, *, db_model=BigQueryModel(), conn):
assert isinstance(db_model, BigQueryModel)
data_algebra.db_model.DBHandle.__init__(self, db_model=db_model, conn=conn)
def describe_bq_table(
self, *, table_catalog, table_schema, table_name, row_limit=7
):
full_name = f"{table_catalog}.{table_schema}.{table_name}"
head = self.db_model.read_query(
conn=self.conn,
q="SELECT * FROM "
+ self.db_model.quote_table_name(full_name)
+ " LIMIT "
+ str(row_limit),
)
cat_name = f"{table_catalog}.{table_schema}.INFORMATION_SCHEMA.COLUMNS"
sql_meta = self.db_model.read_query(
self.conn,
f"SELECT * FROM {self.db_model.quote_table_name(cat_name)} "
+ f"WHERE table_name={self.db_model.quote_string(table_name)}",
)
qualifiers = {
"table_catalog": table_catalog,
"table_schema": table_schema,
"table_name": table_name,
"full_name": full_name,
}
td = data_algebra.data_ops.describe_table(
head,
table_name=full_name,
row_limit=row_limit,
qualifiers=qualifiers,
sql_meta=sql_meta,
)
return td
def query_to_csv(self, q, *, res_name):
if isinstance(q, data_algebra.data_ops.ViewRepresentation):
q = q.to_sql(self.db_model)
else:
q = str(q)
op = lambda: open(res_name, "w")
if res_name.endswith(".gz"):
op = lambda: gzip.open(res_name, "w")
with op() as res:
res_iter = self.conn.query(q).result().to_dataframe_iterable()
is_first = True
for block in res_iter:
block.to_csv(res, index=False, header=is_first)
is_first = False
def example_handle():
"""
Return an example db handle for testing. Returns None if helper packages not present.
Note: binds in a data_catalog and data schema prefix. So this handle is specific
to one database.
"""
# TODO: parameterize this
assert _have_bigquery
credential_file = "/Users/johnmount/big_query/big_query_jm.json"
# assert os.path.isfile(credential_file)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credential_file
# os.environ["GOOGLE_APPLICATION_CREDENTIALS"] # trigger key error if not present
try:
data_catalog = "data-algebra-test"
data_schema = "test_1"
db_handle = BigQueryModel(
table_prefix=f"{data_catalog}.{data_schema}"
).db_handle(google.cloud.bigquery.Client())
db_handle.db_model.prepare_connection(db_handle.conn)
return db_handle
except Exception as e:
return None
|
89070
|
import pytest
def test_import_vdjtools_beta_w_validation():
import pandas as pd
import numpy as np
import os
from tcrdist.paths import path_to_base
from tcrdist.vdjtools_funcs import import_vdjtools
from tcrdist.repertoire import TCRrep
# Reformat vdj_tools input format for tcrdist3
vdj_tools_file_beta = os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz')
df_beta = import_vdjtools( vdj_tools_file = vdj_tools_file_beta ,
chain = 'beta',
organism = 'human',
db_file = 'alphabeta_gammadelta_db.tsv',
validate = True)
assert np.all(df_beta.columns == ['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'cdr3_b_nucseq','valid_v', 'valid_j', 'valid_cdr3'])
# Can be directly imported into a TCRrep instance.
tr = TCRrep(
cell_df = df_beta[['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene']],
chains = ['beta'],
organism = 'human',
compute_distances = False)
def test_import_vdjtools_beta_no_validation():
import pandas as pd
import numpy as np
import os
from tcrdist.paths import path_to_base
from tcrdist.vdjtools_funcs import import_vdjtools
vdj_tools_file_beta = os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz')
df_beta = import_vdjtools( vdj_tools_file = vdj_tools_file_beta ,
chain = 'beta',
organism = 'human',
db_file = 'alphabeta_gammadelta_db.tsv',
validate = False)
assert np.all(df_beta.columns == ['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'cdr3_b_nucseq','valid_v', 'valid_j', 'valid_cdr3'])
assert False in df_beta.valid_cdr3
assert False in df_beta.valid_v
assert False in df_beta.valid_j
def test_import_vdjtools_alpha_w_validation():
import pandas as pd
import numpy as np
import os
from tcrdist.paths import path_to_base
from tcrdist.vdjtools_funcs import import_vdjtools
vdj_tools_file_alpha = os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_alpha.clonotypes.TRA.txt.gz')
df_alpha = import_vdjtools( vdj_tools_file = vdj_tools_file_alpha,
chain = 'alpha',
organism = 'human',
db_file = 'alphabeta_gammadelta_db.tsv',
validate = True)
assert np.all(df_alpha.columns == ['count', 'freq', 'cdr3_a_aa', 'v_a_gene', 'j_a_gene', 'cdr3_a_nucseq','valid_v', 'valid_j', 'valid_cdr3'])
def test_import_vdjtools_alpha_no_validation():
import pandas as pd
import numpy as np
import os
from tcrdist.paths import path_to_base
from tcrdist.vdjtools_funcs import import_vdjtools
vdj_tools_file_alpha = os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_alpha.clonotypes.TRA.txt.gz')
df_alpha = import_vdjtools( vdj_tools_file = vdj_tools_file_alpha,
chain = 'alpha',
organism = 'human',
db_file = 'alphabeta_gammadelta_db.tsv',
validate = False)
assert np.all(df_alpha.columns == ['count', 'freq', 'cdr3_a_aa', 'v_a_gene', 'j_a_gene', 'cdr3_a_nucseq','valid_v', 'valid_j', 'valid_cdr3'])
assert False in df_alpha.valid_cdr3
assert False in df_alpha.valid_v
assert False in df_alpha.valid_j
|
89077
|
import rdkit.Chem as Chem
import numpy as np
import os
'''
This script is meant to split the Tox21 train dataset into the
individual target datasets for training single-task models.
'''
if __name__ == '__main__':
# Read SDF
suppl = Chem.SDMolSupplier(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data', 'tox21_10k_data_all.sdf'
),
sanitize = False
)
mols = []
smiles = []
ys = None
targets = [
'NR-AhR',
'NR-AR',
'NR-AR-LBD',
'NR-Aromatase',
'NR-ER',
'NR-ER-LBD',
'NR-PPAR-gamma',
'SR-ARE',
'SR-ATAD5',
'SR-HSE',
'SR-MMP',
'SR-p53'
]
j = 1
for mol in suppl:
mols.append(mol)
smiles.append(Chem.MolToSmiles(mol))
y = np.nan * np.ones((1, len(targets)))
for i, target in enumerate(targets):
try:
y[0, i] = bool(float(mol.GetProp(target)))
except Exception as e:
pass
if type(ys) == type(None):
ys = y
else:
ys = np.concatenate((ys, y))
if j % 500 == 0:
print('completed {} entries'.format(j))
j += 1
print(ys)
print(ys.shape)
for i, target in enumerate(targets):
print('Target {} has {} entries; {} active'.format(
target, sum(~np.isnan(ys[:, i])), np.sum(ys[~np.isnan(ys[:, i]), i])
))
with open(os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data', 'tox21.smiles'
), 'w') as fid:
for j, smile in enumerate(smiles):
fid.write('{}\t{}\t{}\n'.format(smile, '??', '\t'.join([str(x) for x in ys[j, :]])))
|
89104
|
from __future__ import print_function
import sys
from subprocess import call
from unittest import TestCase
from testfixtures import OutputCapture, compare
from .test_compare import CompareHelper
class TestOutputCapture(CompareHelper, TestCase):
def test_compare_strips(self):
with OutputCapture() as o:
print(' Bar! ')
o.compare('Bar!')
def test_compare_doesnt_strip(self):
with OutputCapture(strip_whitespace=False) as o:
print(' Bar! ')
self.check_raises(
'\tBar!',
compare=o.compare,
message="'\\tBar!' (expected) != ' Bar! \\n' (actual)",
)
def test_stdout_and_stderr(self):
with OutputCapture() as o:
print('hello', file=sys.stdout)
print('out', file=sys.stderr)
print('there', file=sys.stdout)
print('now', file=sys.stderr)
o.compare("hello\nout\nthere\nnow\n")
def test_unicode(self):
with OutputCapture() as o:
print(u'\u65e5', file=sys.stdout)
o.compare(u'\u65e5\n')
def test_separate_capture(self):
with OutputCapture(separate=True) as o:
print('hello', file=sys.stdout)
print('out', file=sys.stderr)
print('there', file=sys.stdout)
print('now', file=sys.stderr)
o.compare(stdout="hello\nthere\n",
stderr="out\nnow\n")
def test_compare_both_at_once(self):
with OutputCapture(separate=True) as o:
print('hello', file=sys.stdout)
print('out', file=sys.stderr)
self.check_raises(
stdout="out\n",
stderr="hello\n",
compare=o.compare,
message=(
'dict not as expected:\n'
'\n'
'values differ:\n'
"'stderr': 'hello' (expected) != 'out' (actual)\n"
"'stdout': 'out' (expected) != 'hello' (actual)\n"
'\n'
"While comparing ['stderr']: 'hello' (expected) != 'out' (actual)\n"
'\n'
"While comparing ['stdout']: 'out' (expected) != 'hello' (actual)"
),
)
def test_original_restore(self):
o_out, o_err = sys.stdout, sys.stderr
with OutputCapture() as o:
self.assertFalse(sys.stdout is o_out)
self.assertFalse(sys.stderr is o_err)
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
def test_double_disable(self):
o_out, o_err = sys.stdout, sys.stderr
with OutputCapture() as o:
self.assertFalse(sys.stdout is o_out)
self.assertFalse(sys.stderr is o_err)
o.disable()
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
o.disable()
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
def test_double_enable(self):
o_out, o_err = sys.stdout, sys.stderr
with OutputCapture() as o:
o.disable()
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
o.enable()
self.assertFalse(sys.stdout is o_out)
self.assertFalse(sys.stderr is o_err)
o.enable()
self.assertFalse(sys.stdout is o_out)
self.assertFalse(sys.stderr is o_err)
self.assertTrue(sys.stdout is o_out)
self.assertTrue(sys.stderr is o_err)
class TestOutputCaptureWithDescriptors(object):
def test_fd(self, capfd):
with capfd.disabled(), OutputCapture(fd=True) as o:
call([sys.executable, '-c', "import sys; sys.stdout.write('out')"])
call([sys.executable, '-c', "import sys; sys.stderr.write('err')"])
compare(o.captured, expected=b'outerr')
o.compare(expected=b'outerr')
def test_fd_separate(self, capfd):
with capfd.disabled(), OutputCapture(fd=True, separate=True) as o:
call([sys.executable, '-c', "import sys; sys.stdout.write('out')"])
call([sys.executable, '-c', "import sys; sys.stderr.write('err')"])
compare(o.captured, expected=b'')
o.compare(stdout=b'out', stderr=b'err')
|
89122
|
import behave
import os
@behave.when(u'I upload item using data frame from "{upload_path}"')
def step_impl(context, upload_path):
import pandas
upload_path = os.path.join(os.environ['DATALOOP_TEST_ASSETS'], upload_path)
# get filepathes
filepaths = list()
# go over all file and run ".feature" files
count = 0
for path, subdirs, files in os.walk(upload_path):
for filename in files:
striped, ext = os.path.splitext(filename)
if ext in ['.jpg', '.png']:
filepaths.append({'local_path': os.path.join(path, filename),
'item_metadata': {'user':
{'dummy': count}}})
count += 1
df = pandas.DataFrame(filepaths)
context.dataset.items.upload(local_path=df)
@behave.then(u'Items should have metadata')
def step_impl(context):
items = context.dataset.items.get_all_items()
for item in items:
try:
item.metadata['user']['dummy']
except Exception:
assert False
|
89136
|
import click
from esque.cli.autocomplete import list_brokers
from esque.cli.helpers import fallback_to_stdin
from esque.cli.options import State, default_options, output_format_option
from esque.cli.output import format_output
from esque.errors import ValidationException
from esque.resources.broker import Broker
@click.command("broker", short_help="Describe a broker.")
@click.argument("broker", metavar="BROKER", callback=fallback_to_stdin, autocompletion=list_brokers, required=False)
@output_format_option
@default_options
def describe_broker(state: State, broker: str, output_format: str):
"""Return configuration options for broker BROKER. BROKER can be given with broker id (integer),
the host name (if hostname is unique), or socket address ('hostname:port')"""
if broker.isdigit():
broker = Broker.from_id(state.cluster, broker).describe()
elif ":" not in broker:
broker = Broker.from_host(state.cluster, broker).describe()
else:
try:
host, port = broker.split(":")
broker = Broker.from_host_and_port(state.cluster, host, int(port)).describe()
except ValueError:
raise ValidationException("BROKER must either be the broker id, the hostname, or in the form 'host:port'")
click.echo(format_output(broker, output_format))
|
89148
|
import questionary
if __name__ == "__main__":
path = questionary.path("Path to the projects version file").ask()
if path:
print(f"Found version file at {path} 🦄")
else:
print("No version file it is then!")
|
89160
|
orders = ["daisies", "periwinkle"]
print(orders)
orders.append("tulips")
orders.append("roses")
print(orders)
|
89162
|
from ..utils import dataset_utils
class TestDatasetUtils(object):
"""Collects all unit tests for `utils.model_utils`.
"""
def test_load_wikihop(self, dataset):
"""Asserts that `data_utils.load_wikihop()` returns the expected value when `masked == True`.
"""
expected = {'train': [
{
"id": "WH_train_0",
"query": "participant_of <NAME>",
"answer": "1996 summer olympics",
"candidates": [
"1996 summer olympics",
"olympic games",
"sport"
],
"supports": [
"<NAME> ( born December 28 , 1969 in Jiguani , Granma ) is a beach volleyball player from Cuba , who won the gold medal in the men 's beach team competition at the 2003 Pan American Games in Santo Domingo , Dominican Republic , partnering Francisco Alvarez . He represented his native country at the 1996 and the 2004 Summer Olympics ."
]
},
{
"id": "WH_train_1",
"query": "languages_spoken_or_written john osteen",
"answer": "english",
"candidates": [
"english",
"greek",
"koine greek",
"nahuatl",
"spanish"
],
"supports": [
"Lakewood Church is a nondenominational charismatic Christian megachurch located in Houston, Texas. It is the largest congregation in the United States, averaging about 52,000 attendees per week. The 16,800-seat Lakewood Church Central Campus, home to four English-language services and two Spanish-language services per week, is located at the former Compaq Center. <NAME> is the senior pastor of Lakewood Church with his wife, Victoria, who serves as co-pastor. Lakewood Church is a part of the Word of Faith movement.",
"Mexico (, modern Nahuatl ), officially the United Mexican States, is a federal republic in the southern half of North America. It is bordered to the north by the United States; to the south and west by the Pacific Ocean; to the southeast by Guatemala, Belize, and the Caribbean Sea; and to the east by the Gulf of Mexico. Covering almost two million square kilometers (over 760,000\u00a0sq\u00a0mi), Mexico is the sixth largest country in the Americas by total area and the 13th largest independent nation in the world. With an estimated population of over 120 million, it is the eleventh most populous country and the most populous Spanish-speaking country in the world while being the second most populous country in Latin America. Mexico is a federation comprising 31 states and a federal district that is also its capital and most populous city. Other metropolises include Guadalajara, Monterrey, Puebla, Toluca, Tijuana and Le\u00f3n."
]
}
]}
actual = dataset
assert expected == actual
def test_load_wikihop_masked(self, masked_dataset):
"""Asserts that `data_utils.load_wikihop()` returns the expected value when `masked == False`.
"""
expected = {'train.masked': [
{
"id": "WH_train_0",
"query": "participant_of juan rossell",
"answer": "___MASK3___",
"candidates": [
"___MASK3___",
"___MASK63___",
"___MASK83___"
],
"supports": [
"<NAME> ( born December 28 , 1969 in Jiguani , Granma ) is a beach volleyball player from Cuba , who won the gold medal in the men ' s beach team competition at the 2003 Pan American Games in Santo Domingo , Dominican Republic , partnering Francisco Alvarez . He represented his native country at the 1996 and the 2004 Summer Olympics ."
]
},
{
"id": "WH_train_1",
"query": "languages_spoken_or_written john osteen",
"answer": "___MASK46___",
"candidates": [
"___MASK15___",
"___MASK25___",
"___MASK46___",
"___MASK67___",
"___MASK85___"
],
"supports": [
"Lakewood Church is a nondenominational charismatic Christian megachurch located in Houston , Texas . It is the largest congregation in the United States , averaging about 52 , 000 attendees per week . The 16 , 800 - seat Lakewood Church Central Campus , home to four ___MASK46___ - language services and two Spanish - language services per week , is located at the former Compaq Center . <NAME> is the senior pastor of Lakewood Church with his wife , Victoria , who serves as co - pastor . Lakewood Church is a part of the Word of Faith movement .",
"Mexico (, modern ___MASK67___ ), officially the United Mexican States , is a federal republic in the southern half of North America . It is bordered to the north by the United States ; to the south and west by the Pacific Ocean ; to the southeast by Guatemala , Belize , and the Caribbean Sea ; and to the east by the Gulf of Mexico . Covering almost two million square kilometers ( over 760 , 000 sq mi ), Mexico is the sixth largest country in the Americas by total area and the 13th largest independent nation in the world . With an estimated population of over 120 million , it is the eleventh most populous country and the most populous ___MASK25___ - speaking country in the world while being the second most populous country in Latin America . Mexico is a federation comprising 31 states and a federal district that is also its capital and most populous city . Other metropolises include Guadalajara , Monterrey , Puebla , Toluca , Tijuana and Le\u00f3n ."
]
}
]}
actual = masked_dataset
assert expected == actual
|
89184
|
import serial
"""
# VE.Direct parser inspired by https://github.com/karioja/vedirect/blob/master/vedirect.py
"""
class Vedirect:
# The error code of the device (relevant when the device is in the fault state).
#
# Error 19 can be ignored, this condition regularly occurs during start-up or shutdown of the MPPT charger.
# Since version 1.15 this error will no longer be reported.
#
# Error 21 can be ignored for 5 minutes, this condition regularly occurs during start-up or shutdown
# of the MPPT charger. Since version 1.16 this warning will no longer be reported when it is not persistent.
#
VICTRON_ERROR = {
'0': 'No error',
'2': 'Battery voltage too high',
'17': 'Charger temperature too high',
'18': 'Charger over current',
'19': 'Charger current reversed',
'20': 'Bulk time limit exceeded',
'21': 'Current sensor issue',
'26': 'Terminals overheated',
'28': 'Converter issue', # (dual converter models only)
'33': 'Input voltage too high (solar panel)',
'34': 'Input current too high (solar panel)',
'38': 'Input shutdown (excessive battery voltage)',
'39': 'Input shutdown (due to current flow during off mode)',
'65': 'Lost communication with one of devices',
'66': 'Synchronised charging device configuration issue',
'67': 'BMS connection lost',
'68': 'Network misconfigured',
'116': 'Factory calibration data lost',
'117': 'Invalid/incompatible firmware',
'119': 'User settings invalid'
}
# The state of operation
VICTRON_CS = {
'0': 'Off',
'2': 'Fault',
'3': 'Bulk',
'4': 'Absorption',
'5': 'Float',
'7': 'Equalize (manual)',
'245': 'Starting-up',
'247': 'Auto equalize / Recondition',
'252': 'External control'
}
# The possible values for the tracker operation
VICTRON_MTTP = {
'0': 'Off',
'1': 'Limited',
'2': 'Active'
}
# Off reason, this field described why a unit is switched off.
#
# Available on SmartSolar mppt chargers since firmware version v1.44 (VE.Direct models)
# and v1.03 (SmartSolar VE.Can models)
# FIXME: This might not work as a dictionary
VICTRON_OFF_REASON = {
"0x00000001": "No input power",
"0x00000002": "Switched off (power switch)",
"0x00000004": "Switched off (device mode register)",
"0x00000008": "Remote input",
"0x00000010": "Protection active",
"0x00000020": "Paygo",
"0x00000040": "BMS",
"0x00000080": "Engine shutdown detection",
"0x00000100": "Analysing input voltage"
}
def __init__(self, port='/dev/ttyAMA0', timeout=5):
"""
Initialise serial component of the Victron parser. Default value is the standard serial port on Raspberry pi
:param port:
:param timeout:
"""
self.ser = serial.Serial(port, 19200, timeout=timeout)
self.header1 = b'\r'
self.header2 = b'\n'
self.delimiter = b'\t'
self.hexmarker = b':'
self.key = bytearray()
self.value = bytearray()
self.bytes_sum = 0
self.state = self.wait_header
self.dict = {}
hex, wait_header, in_key, in_value, in_checksum = range(5)
def input(self, byte):
if byte == self.hexmarker and self.state != self.in_checksum:
self.state = self.hex
if self.state == self.wait_header:
self.bytes_sum += ord(byte)
if byte == self.header1:
self.state = self.wait_header
elif byte == self.header2:
self.state = self.in_key
return None
elif self.state == self.in_key:
self.bytes_sum += ord(byte)
if byte == self.delimiter:
if self.key.decode() == 'Checksum':
self.state = self.in_checksum
else:
self.state = self.in_value
else:
self.key += byte
return None
elif self.state == self.in_value:
self.bytes_sum += ord(byte)
if byte == self.header1:
self.state = self.wait_header
self.dict[self.key.decode()] = self.value.decode()
self.key = bytearray()
self.value = bytearray()
else:
self.value += byte
return None
elif self.state == self.in_checksum:
self.bytes_sum += ord(byte)
self.key = bytearray()
self.value = bytearray()
self.state = self.wait_header
if self.bytes_sum % 256 == 0:
self.bytes_sum = 0
return self.dict
else:
print('Malformed packet')
print('----------------')
for k, v in self.dict.items():
print("{} {}".format(k, v))
self.bytes_sum = 0
elif self.state == self.hex:
self.bytes_sum = 0
if byte == self.header2:
self.state = self.wait_header
else:
raise AssertionError()
def read_data_single(self):
while True:
byte = self.ser.read(1)
packet = self.input(byte)
if packet is not None:
return packet
def read_data_callback(self, callback):
while True:
byte = self.ser.read(1)
if byte:
packet = self.input(byte)
if packet is not None:
callback(packet)
else:
break
|
89199
|
from typing import Dict, Optional, Union
import numpy as np
from werkzeug import ImmutableMultiDict
from .endpoint import Endpoint
def predict(model, input_data: Union[Dict, ImmutableMultiDict], config: Endpoint):
# new model
if hasattr(model, "public_inputs"):
sample = {}
for k, v in dict(input_data).items():
try:
# GET request arguments are strings. If they should in fact be number, we try to convert them here
sample[k] = float(v)
except ValueError:
# Some arguments are in fact strings. So we let them.
sample[k] = v
res = model.predict(sample, "raw")
return res.to_dict("records")[0]
sample = config.process_input(input_data)
vec = np.array(sample).reshape(1, -1)
res = model.predict(vec)
return config.process_output(res)
|
89251
|
from ipywidgets import widgets
from pandas_profiling.report.presentation.core import Variable
class WidgetVariable(Variable):
def render(self) -> widgets.VBox:
items = [self.content["top"].render()]
if self.content["bottom"] is not None:
items.append(self.content["bottom"].render())
return widgets.VBox(items)
|
89277
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas.testing import assert_frame_equal
import pandas as pd
import matplotlib
from pdpbox.pdp import pdp_isolate, pdp_plot
class TestPDPIsolateBinary(object):
def test_pdp_isolate_binary_feature(
self, titanic_model, titanic_data, titanic_features
):
# feature_type: binary
pdp_isolate_out = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature="Sex",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 2
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == "Sex"
assert pdp_isolate_out.feature_type == "binary"
assert pdp_isolate_out.percentile_info == []
assert pdp_isolate_out.display_columns == ["Sex_0", "Sex_1"]
assert pdp_isolate_out.hist_data is None
def test_pdp_isolate_onehot_feature(
self, titanic_model, titanic_data, titanic_features
):
# feature_type: onehot
pdp_isolate_out = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature=["Embarked_C", "Embarked_S", "Embarked_Q"],
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 2
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == ["Embarked_C", "Embarked_S", "Embarked_Q"]
assert pdp_isolate_out.feature_type == "onehot"
assert pdp_isolate_out.percentile_info == []
assert pdp_isolate_out.display_columns == [
"Embarked_C",
"Embarked_S",
"Embarked_Q",
]
assert pdp_isolate_out.hist_data is None
def test_pdp_isolate_numeric_feature(
self, titanic_model, titanic_data, titanic_features
):
# feature_type: numeric
pdp_isolate_out = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature="Fare",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 2
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == "Fare"
assert pdp_isolate_out.feature_type == "numeric"
assert len(pdp_isolate_out.hist_data) == titanic_data.shape[0]
def test_pdp_isolate_cust_grid_points(
self, titanic_model, titanic_data, titanic_features
):
# use cust_grid_points
pdp_isolate_out = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature="Fare",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=range(0, 100, 5),
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 2
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == "Fare"
assert pdp_isolate_out.feature_type == "numeric"
assert pdp_isolate_out.percentile_info == []
assert pdp_isolate_out.display_columns == [
"0",
"5",
"10",
"15",
"20",
"25",
"30",
"35",
"40",
"45",
"50",
"55",
"60",
"65",
"70",
"75",
"80",
"85",
"90",
"95",
]
assert len(pdp_isolate_out.hist_data) == titanic_data.shape[0]
class TestPDPIsolateRegression(object):
def test_pdp_isolate_regression(self, ross_model, ross_data, ross_features):
pdp_isolate_out = pdp_isolate(
model=ross_model,
dataset=ross_data,
model_features=ross_features,
feature="SchoolHoliday",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 0
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == "SchoolHoliday"
assert pdp_isolate_out.feature_type == "binary"
assert pdp_isolate_out.percentile_info == []
assert pdp_isolate_out.display_columns == ["SchoolHoliday_0", "SchoolHoliday_1"]
assert pdp_isolate_out.hist_data is None
def test_pdp_isolate_n_jobs(self, ross_model, ross_data, ross_features):
# test n_jobs > 1
_ = pdp_isolate(
model=ross_model,
dataset=ross_data,
model_features=ross_features,
feature="SchoolHoliday",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=2,
predict_kwds={},
data_transformer=None,
)
def test_pdp_isolate_multiclass(otto_model, otto_data, otto_features):
pdp_isolate_out = pdp_isolate(
model=otto_model,
dataset=otto_data,
model_features=otto_features,
feature="feat_67",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert len(pdp_isolate_out) == 9
assert pdp_isolate_out[0]._type == "PDPIsolate_instance"
assert pdp_isolate_out[0].n_classes == 9
for i in range(9):
assert pdp_isolate_out[i].which_class == i
assert pdp_isolate_out[0].feature == "feat_67"
assert pdp_isolate_out[0].feature_type == "numeric"
class TestPDPPlotSingle(object):
@pytest.fixture
def pdp_sex(self, titanic_data, titanic_model, titanic_features):
result = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature="Sex",
)
return result
def test_pdp_plot_single_default(self, pdp_sex):
# single chart without data dist plot
fig, axes = pdp_plot(pdp_sex, "sex")
assert type(fig) == matplotlib.figure.Figure
assert sorted(axes.keys()) == ["pdp_ax", "title_ax"]
assert type(axes["pdp_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot
def test_pdp_plot_single_distplot(self, pdp_sex):
# single chart with data dist plot
fig, axes = pdp_plot(pdp_sex, "sex", plot_pts_dist=True)
assert sorted(axes.keys()) == ["pdp_ax", "title_ax"]
assert sorted(axes["pdp_ax"].keys()) == ["_count_ax", "_pdp_ax"]
assert type(axes["pdp_ax"]["_pdp_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["pdp_ax"]["_count_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot
class TestPDPPlotMulti(object):
@pytest.fixture
def pdp_feat_67_rf(self, otto_data, otto_model, otto_features):
result = pdp_isolate(
model=otto_model,
dataset=otto_data,
model_features=otto_features,
feature="feat_67",
)
return result
def test_pdp_plot_multi_default(self, pdp_feat_67_rf):
# multi charts without data dist plot
fig, axes = pdp_plot(
pdp_isolate_out=pdp_feat_67_rf,
feature_name="feat_67",
center=True,
x_quantile=True,
)
assert type(fig) == matplotlib.figure.Figure
assert sorted(axes.keys()) == ["pdp_ax", "title_ax"]
assert len(axes["pdp_ax"]) == 9
assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["pdp_ax"][0]) == matplotlib.axes._subplots.Subplot
def test_pdp_plot_multi_which_classes(self, pdp_feat_67_rf):
# change which classes
fig, axes = pdp_plot(
pdp_feat_67_rf,
"feat_67",
center=True,
x_quantile=True,
ncols=2,
which_classes=[0, 3, 7],
)
assert len(axes["pdp_ax"]) == 3
def test_pdp_plot_multi_one_class(self, pdp_feat_67_rf):
# only keep 1 class
fig, axes = pdp_plot(
pdp_feat_67_rf,
"feat_67",
center=True,
x_quantile=True,
ncols=2,
which_classes=[5],
)
assert type(axes["pdp_ax"]) == matplotlib.axes._subplots.Subplot
def test_pdp_plot_multi_distplot(self, pdp_feat_67_rf):
# multi charts with data dist plot
fig, axes = pdp_plot(
pdp_isolate_out=pdp_feat_67_rf,
feature_name="feat_67",
center=True,
x_quantile=True,
plot_pts_dist=True,
)
assert sorted(axes.keys()) == ["pdp_ax", "title_ax"]
assert len(axes["pdp_ax"]) == 9
assert sorted(axes["pdp_ax"][0].keys()) == ["_count_ax", "_pdp_ax"]
assert type(axes["pdp_ax"][0]["_count_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["pdp_ax"][0]["_pdp_ax"]) == matplotlib.axes._subplots.Subplot
|
89282
|
from collections import Iterable, OrderedDict, Mapping
from functools import reduce
from devito.tools.utils import filter_sorted, flatten
__all__ = ['toposort']
def build_dependence_lists(elements):
"""
Given an iterable of dependences, return the dependence lists as a
mapper suitable for graph-like algorithms. A dependence is an iterable of
elements ``[a, b, c, ...]``, meaning that ``a`` preceeds ``b`` and ``c``,
``b`` preceeds ``c``, and so on.
"""
mapper = OrderedDict()
for element in elements:
for idx, i0 in enumerate(element):
v = mapper.setdefault(i0, set())
for i1 in element[idx + 1:]:
v.add(i1)
return mapper
def toposort(data):
"""
Given items that depend on other items, a topological sort arranges items in
order that no one item precedes an item it depends on.
``data`` captures the various dependencies. It may be:
* A dictionary whose keys are items and whose values are a set of
dependent items. The dictionary may contain self-dependencies
(which are ignored), and dependent items that are not also
dict keys.
* An iterable of dependences as expected by :func:`build_dependence_lists`.
Readapted from: ::
http://code.activestate.com/recipes/577413/
"""
if not isinstance(data, Mapping):
assert isinstance(data, Iterable)
data = build_dependence_lists(data)
processed = []
if not data:
return processed
# Do not transform `data` in place
mapper = OrderedDict([(k, set(v)) for k, v in data.items()])
# Ignore self dependencies
for k, v in mapper.items():
v.discard(k)
# Perform the topological sorting
extra_items_in_deps = reduce(set.union, mapper.values()) - set(mapper)
mapper.update(OrderedDict([(item, set()) for item in extra_items_in_deps]))
while True:
ordered = set(item for item, dep in mapper.items() if not dep)
if not ordered:
break
processed = filter_sorted(ordered) + processed
mapper = OrderedDict([(item, (dep - ordered)) for item, dep in mapper.items()
if item not in ordered])
if len(processed) != len(set(flatten(data) + flatten(data.values()))):
raise ValueError("A cyclic dependency exists amongst %r" % data)
return processed
|
89326
|
from abc import ABC
import scipy.sparse
from rasa.nlu.featurizers.featurizer import Featurizer
class SparseFeaturizer(Featurizer[scipy.sparse.spmatrix], ABC):
"""Base class for all sparse featurizers."""
pass
|
89395
|
import pygame
from pygame.locals import *
from sys import exit
from random import *
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
screen.lock()
for count in range(10):
random_color = (randint(0,255), randint(0,255), randint(0,255))
random_pos = (randint(0,639), randint(0,479))
random_size = (639-randint(random_pos[0],639), 479-randint(random_pos[1],479))
pygame.draw.rect(screen, random_color, Rect(random_pos, random_size))
screen.unlock()
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
|
89425
|
from garuda_dir.garuda_pb2 import Void
class GarudaCustom(object):
def CustomCallDemo(self, context, void):
'''
rpc CustomCallDemo(Void) returns (Void);
'''
print("Just a dummy RPC call")
return Void()
|
89489
|
from typing import List, Union
import pytest
from mockito import mock, unstub, verifyStubbedInvocationsAreUsed, when
from ...config import config
from ...core.entities.mod import Mod
from ...core.entities.sites import Sites
from ...core.entities.version_info import Stabilities, VersionInfo
from .update import Update
from .update_repo import UpdateRepo
@pytest.fixture
def mock_repo():
return mock(UpdateRepo)
def test_use_all_installed_mods_when_no_mods_are_specified(mock_repo):
mods: List[Mod] = [Mod("1", "one"), Mod("2", "two")]
update = Update(mock_repo)
when(mock_repo).get_all_mods().thenReturn(mods)
when(update).find_download_and_install(...)
update.execute([])
verifyStubbedInvocationsAreUsed()
unstub()
def test_call_find_download_and_install(mock_repo):
when(mock_repo).get_all_mods().thenReturn([])
update = Update(mock_repo)
when(update).find_download_and_install(...)
update.execute([])
verifyStubbedInvocationsAreUsed()
unstub()
def version_info(filename: str) -> VersionInfo:
return VersionInfo(
stability=Stabilities.release,
mod_loaders=set(),
site=Sites.curse,
upload_time=1,
minecraft_versions=[],
download_url="",
filename=filename,
)
@pytest.mark.parametrize(
"name,old,new,pretend,expected",
[
(
"Remove file when new file has been downloaded",
"old",
"new",
False,
True,
),
(
"Keep file when no new file has been downloaded",
"old",
"old",
False,
False,
),
(
"Keep old file when new filename is empty",
"old",
"",
False,
False,
),
(
"Don't remove old file when it doesn't exist",
None,
"new",
False,
False,
),
(
"Don't remove old file when it's empty",
"",
"new",
False,
False,
),
(
"Don't remove old file when --pretend is on",
"old",
"new",
True,
False,
),
],
)
def test_on_version_found(
name: str, old: Union[str, None], new: Union[str, None], pretend: bool, expected: bool, mock_repo
):
print(name)
config.pretend = pretend
if expected:
when(mock_repo).remove_mod_file(old)
update = Update(mock_repo)
old_mod = Mod("", "", file=old)
new_mod = Mod("", "", file=new)
update.on_version_found(old_mod, new_mod)
config.pretend = False
verifyStubbedInvocationsAreUsed()
unstub()
|
89536
|
from pymodelica import compile_fmu
fmu_name = compile_fmu("{{model_name}}", "{{model_name}}.mo",
version="{{fmi_version}}", target="{{fmi_api}}",
compiler_options={'extra_lib_dirs':["{{sim_lib_path}}"]})
|
89538
|
from django.conf import settings
from portia_api.jsonapi import JSONResponse
from portia_api.jsonapi.renderers import JSONRenderer
from .models import (Job, Log, Schedule,JobItem)
import time, datetime
import requests
from storage import (get_storage_class,create_project_storage)
from portia_orm.models import Project
import inspect
import uuid
import re
from django.db.models import Max
import logging
logger = logging.getLogger('portia_dashboard')
def _request_get(url):
retryTime = 5
res = None
for i in range(retryTime):
try:
res = requests.get(url)#self.proxyUtil.getRandomProxy())
if res.status_code !=200:
continue
break
except:
continue
return res
def _request_post(url):
retryTime = 5
res = None
for i in range(retryTime):
try:
res = requests.post(url)#self.proxyUtil.getRandomProxy())
if res.status_code !=200:
continue
break
except:
continue
return res
def matchDate(line):
matchThis = ""
matched = re.match(r'\d\d\d\d-\d\d-\d\d\ \d\d:\d\d:\d\d',line)
if matched:
#matches a date and adds it to matchThis
matchThis = matched.group()
else:
matchThis = "NONE"
return matchThis
def parseLine(line):
return re.findall( r'(?P<date>\d\d\d\d-\d\d-\d\d\ \d\d:\d\d:\d\d) \[(?P<source>[^\]]+)\] (?P<level>INFO|DEBUG|ERROR|WARNING|CRITICAL): (?P<text>.*)', line ) [0]
def generateDicts(log):
currentDict = {}
index = 1
for line in log.splitlines():
if line.startswith(matchDate(line)):
if currentDict:
yield currentDict
date, source, level, text = parseLine(line)
currentDict = {
"index" : index,
"date": date,
"source":source,
"level":level,
"text":text
}
index = index + 1
else:
currentDict["text"] += line
yield currentDict
def _get_log_from_scrapyd(project_id, spider_id, job_id ) :
res = _request_get("%s/logs/%s/%s/%s.log" %(settings.SCRAPYD_URL,project_id, spider_id, job_id ))
return res.text if res.status_code == 200 else ''
def _get_log(project_id, spider_id, job_id, job_status ):
log = None
try:
log = Log.objects.get(id=job_id )
except Log.DoesNotExist:
content = _get_log_from_scrapyd(project_id, spider_id, job_id )
log = Log.objects.create(id=job_id , content=content )
return log
if job_status != 'finished' :
log.content = _get_log_from_scrapyd(project_id, spider_id, job_id )
log.save()
return log
def job_log(request):
result = []
project_id = request.GET.get('project')
spider_id = request.GET.get('spider')
job_id = request.GET.get('job')
job = Job.objects.get(id=job_id)
if job:
log = _get_log(project_id, job.spider, job.id, job.status )
if log :
result = list(generateDicts(log.content))
return JSONResponse({"project":project_id,"spider":spider_id, "job": job_id, "log":result})
def _get_log_count(project_id, spider_id, job_id, job_status ) :
warnings, errors , criticals = 0,0,0
log = _get_log(project_id, spider_id, job_id, job_status )
if log :
try:
result = list(generateDicts(log.content ))
for item in result :
if item['level'] == 'WARNING' :
warnings += 1
elif item['level'] == 'ERROR' :
errors += 1
elif item['level'] == 'CRITICAL' :
criticals += 1
except KeyError:
pass
return warnings, errors, criticals
def job_cancel(request):
project_id = request.GET.get('project')
job_id = request.GET.get('job')
res = _request_post("%s/cancel.json?project=%s&job=%s" %(settings.SCRAPYD_URL,project_id, job_id ))
if res:
result = res.json()
if result.get("status", '') == 'ok' :
return JSONResponse({'status':'ok'})
return JSONResponse({'status':'error'})
def job_delete(request):
id = request.GET.get('job')
if id:
Job.objects.get(id=id).delete()
Log.objects.get(id=id ).delete()
return JSONResponse({'status':'ok'})
else:
return JSONResponse({'status':'error'})
def _get_timestamp_from_string( timestring ) :
dt = datetime.datetime.strptime(timestring,"%Y-%m-%d %H:%M:%S.%f")
ts = time.mktime (dt.timetuple()) * 1000
return ts
def _get_stub_job ():
try:
job = Job.objects.get(id='ffffffffffffffff0000000000000000')
except Job.DoesNotExist:
job = Job.objects.create(id = 'ffffffffffffffff0000000000000000', spider='', start_time = 0 , index = 0 )
return job
def _get_last_start_time ():
job = _get_stub_job()
max_start_time = job.start_time
return max_start_time
def _set_last_start_time(last_start_time) :
job = _get_stub_job()
job.start_time = last_start_time
job.save()
def _get_last_index():
job = _get_stub_job()
last_index = job.index
return last_index
def _set_last_index ( last_index ) :
job = _get_stub_job()
job.index = last_index
job.save()
def _update_jobs_model(project_id) :
#last_start_time = _get_last_start_time()
updated_count = 0
created_count = 0
res = _request_get("%s/listjobs.json?project=%s" %(settings.SCRAPYD_URL,project_id))
if res:
for status in ['pending', 'running', 'finished']:
data = res.json().get(status,[])
jobs = []
for item in data:
created = False
try:
job = Job.objects.get(id=item['id'])
except Job.DoesNotExist:
if 'start_time' in item and _get_timestamp_from_string(item['start_time']) <= _get_last_start_time() :
# the job must be removed, so skip it
continue
job = Job.objects.create(id = item['id'], spider=item['spider'], index = ( _get_last_index() + 1 ))
_set_last_index(job.index)
created = True
created_count += 1
#job maybe changed if not in 'finished' status.
if job.status != 'finished' or job.start_time == 0 or job.end_time == 0 :
if 'start_time' in item :
job.start_time = _get_timestamp_from_string(item['start_time'])
if 'end_time' in item :
job.end_time = _get_timestamp_from_string(item['end_time'])
if status == 'finished' :
job.warning_count, job.error_count, job.critical_count = _get_log_count(project_id, job.spider, job.id, job.status )
job.status = status
job.save()
updated_count += 1
if created == True and job.start_time > _get_last_start_time() :
_set_last_start_time(job.start_time)
return created_count, updated_count
def _get_string_from_timestamp( timestamp) :
return datetime.datetime.fromtimestamp(timestamp / 1000 ).strftime("%Y-%m-%d %H:%M:%S")
def job_list(request) :
result = {}
project_id = request.GET.get('project')
spider = request.GET.get('spider', '')
_update_jobs_model(project_id )
for status in ['pending', 'running', 'finished']:
res_jobs = []
jobs = Job.objects.filter(status = status ).order_by('-start_time')
for job in jobs :
if (spider == '' or spider == job.spider ):
res_jobs.append({'id':job.id ,
'index' : job.index,
'project':project_id,
'spider':job.spider,
'start_time': _get_string_from_timestamp(job.start_time),
'end_time': _get_string_from_timestamp(job.end_time),
'error_count': job.error_count,
'warning_count': job.warning_count,
'critical_count': job.critical_count
})
result[status] = res_jobs
return JSONResponse(result)
def schedule_add(request):
project = request.GET.get('project')
spider = request.GET.get('spider')
interval = request.GET.get('interval')
times = request.GET.get('times')
if project and spider and interval:
schedule = Schedule(id = uuid.uuid1().hex,
project = project,
spider = spider,
start_time = int(time.time() * 1000),
interval = interval,
times = times,
date_update = int(time.time() * 1000)
)
schedule.save()
return JSONResponse({'status':'ok'})
else:
return JSONResponse({'status':'error'})
def schedule_list(request):
result =[]
schedules = Schedule.objects.all()
for schedule in schedules:
result.append({'id':schedule.id ,
'project':schedule.project,
'spider':schedule.spider,
'start_time': _get_string_from_timestamp(schedule.start_time),
'update_time': _get_string_from_timestamp(schedule.date_update),
'interval' : schedule.interval,
'times' : schedule.times
})
return JSONResponse(result)
def schedule_del(request):
id = request.GET.get('id')
if id:
Schedule.objects.get(id=id).delete()
return JSONResponse({'status':'ok'})
else:
return JSONResponse({'status':'error'})
def article_list(request):
result =[]
job = request.GET.get('job')
items = JobItem.objects(job=job)
for item in items:
res = { 'id': str(item.id) ,
'item-display-name' : 'item',
'job':item.job,
'spider':item.spider,
'url':item.url,
'time' : item.time.strftime("%Y-%m-%d %H:%M:%S")
}
result.append(res)
return JSONResponse(result)
def article_detail(request):
result = {}
job_item_id = request.GET.get('job_item')
job_items = JobItem.objects( id = job_item_id )
if job_items[0] :
for name, value in job_items[0].__dict__.iteritems():
if not name.startswith('_') and not inspect.ismethod(value):
#value = getattr(item, name )
result[name] = value
return JSONResponse(result)
def article_del(request):
spider_id = request.GET.get('spider')
job_id = request.GET.get('job')
job_item_id = request.GET.get('job_item')
if spider_id :
jobItems = JobItem.objects.filter(spider=spider_id)
for item in jobItems :
item.delete()
return JSONResponse({'status':'ok'})
elif job_id :
jobItems = JobItem.objects.filter(job=job_id)
for item in jobItems :
item.delete()
return JSONResponse({'status':'ok'})
elif job_item_id:
JobItem.objects.get(id=job_item_id).delete()
return JSONResponse({'status':'ok'})
else:
return JSONResponse({'status':'error'})
|
89574
|
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
from psqlextra.types import PostgresPartitioningMethod
from . import db_introspection
from .fake_model import define_fake_partitioned_model
def test_schema_editor_create_delete_partitioned_model_range():
"""Tests whether creating a partitioned model and adding a list partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.RANGE
key = ["timestamp"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_range_partition(model, "pt1", "2019-01-01", "2019-02-01")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_delete_partitioned_model_list():
"""Tests whether creating a partitioned model and adding a range partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.LIST
key = ["category"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_list_partition(model, "pt1", ["car", "boat"])
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_delete_partitioned_model_default():
"""Tests whether creating a partitioned model and adding a default
partition to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.LIST
key = ["category"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_default_partition(model, "default")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_default"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_partitioned_model_no_method():
"""Tests whether its possible to create a partitioned model without
explicitly setting a partitioning method.
The default is "range" so setting one explicitely should not be
needed.
"""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"key": ["timestamp"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
pt = db_introspection.get_partitioned_table(model._meta.db_table)
assert pt.method == PostgresPartitioningMethod.RANGE
assert len(pt.partitions) == 0
def test_schema_editor_create_partitioned_model_no_key():
"""Tests whether trying to create a partitioned model without a
partitioning key raises :see:ImproperlyConfigured as its not possible to
create a partitioned model without one and we cannot have a sane
default."""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": PostgresPartitioningMethod.RANGE},
)
schema_editor = PostgresSchemaEditor(connection)
with pytest.raises(ImproperlyConfigured):
schema_editor.create_partitioned_model(model)
def test_schema_editor_add_range_partition():
"""Tests whether adding a range partition works."""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"key": ["timestamp"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_range_partition(
model,
name="mypartition",
from_values="2019-1-1",
to_values="2019-2-1",
comment="test",
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
def test_schema_editor_add_list_partition():
"""Tests whether adding a list partition works."""
model = define_fake_partitioned_model(
{"name": models.TextField()},
{"method": PostgresPartitioningMethod.LIST, "key": ["name"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_list_partition(
model, name="mypartition", values=["1"], comment="test"
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
@pytest.mark.parametrize(
"method,key",
[
(PostgresPartitioningMethod.RANGE, ["timestamp"]),
(PostgresPartitioningMethod.LIST, ["name"]),
],
)
def test_schema_editor_add_default_partition(method, key):
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_default_partition(
model, name="mypartition", comment="test"
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
|
89581
|
import fiona
import numpy as np
import os
import pytest
import rasterio
import mapchete
from mapchete.index import zoom_index_gen
from mapchete.io import get_boto3_bucket
@pytest.mark.remote
def test_remote_indexes(mp_s3_tmpdir, gtiff_s3):
zoom = 7
gtiff_s3.dict.update(zoom_levels=zoom)
def gen_indexes_and_check():
# generate indexes
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
geojson=True,
txt=True,
vrt=True,
)
)
# assert GeoJSON exists
with fiona.open(
os.path.join(mp.config.output.path, "%s.geojson" % zoom)
) as src:
assert len(src) == 2
# assert TXT exists
txt_index = os.path.join(mp.config.output.path, "%s.txt" % zoom)
bucket = get_boto3_bucket(txt_index.split("/")[2])
key = "/".join(txt_index.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
content = obj.get()["Body"].read().decode()
assert len([l + "\n" for l in content.split("\n") if l]) == 2
# assert VRT exists
with rasterio.open(os.path.join(mp.config.output.path, "%s.vrt" % zoom)) as src:
assert src.read().any()
with mapchete.open(gtiff_s3.dict) as mp:
# write output data
mp.batch_process(zoom=zoom)
# generate indexes and check
gen_indexes_and_check()
# generate indexes again and assert nothing has changes
gen_indexes_and_check()
def test_vrt(mp_tmpdir, cleantopo_br):
zoom = 8
with mapchete.open(
dict(cleantopo_br.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
output_tiles = list(
mp.config.output_pyramid.tiles_from_bounds(
mp.config.bounds_at_zoom(zoom=zoom), zoom=zoom
)
)
bounds = (
min([t.left for t in output_tiles]),
min([t.bottom for t in output_tiles]),
max([t.right for t in output_tiles]),
max([t.top for t in output_tiles]),
)
# bounds = mp.config.effective_bounds
vrt_index = os.path.join(mp.config.output.path, "%s.vrt" % zoom)
with rasterio.open(vrt_index) as vrt:
assert vrt.driver == "VRT"
assert vrt.dtypes[0] == "uint16"
assert vrt.meta["dtype"] == "uint16"
assert vrt.count == 1
assert vrt.nodata == 0
assert vrt.bounds == bounds
vrt_data = vrt.read()
assert vrt_data.any()
# generate a VRT using GDAL and compare
out_dir = os.path.join(mp_tmpdir, "cleantopo_br")
temp_vrt = os.path.join(out_dir, str(zoom) + "_gdal.vrt")
gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
temp_vrt,
out_dir,
zoom,
)
os.system(gdalbuildvrt)
with rasterio.open(temp_vrt, "r") as gdal_vrt:
assert gdal_vrt.dtypes[0] == "uint16"
assert gdal_vrt.meta["dtype"] == "uint16"
assert gdal_vrt.count == 1
assert gdal_vrt.nodata == 0
assert gdal_vrt.bounds == bounds
gdal_vrt_data = gdal_vrt.read()
assert np.array_equal(vrt_data, gdal_vrt_data)
# make sure handling an existing VRT works
with mapchete.open(
dict(cleantopo_br.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
def test_vrt_mercator(mp_tmpdir, cleantopo_br_mercator):
zoom = 8
with mapchete.open(
dict(cleantopo_br_mercator.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
output_tiles = list(
mp.config.output_pyramid.tiles_from_bounds(
mp.config.bounds_at_zoom(zoom=zoom), zoom=zoom
)
)
bounds = (
min([t.left for t in output_tiles]),
min([t.bottom for t in output_tiles]),
max([t.right for t in output_tiles]),
max([t.top for t in output_tiles]),
)
# bounds = mp.config.effective_bounds
vrt_index = os.path.join(mp.config.output.path, "%s.vrt" % zoom)
with rasterio.open(vrt_index) as vrt:
assert vrt.driver == "VRT"
assert vrt.dtypes[0] == "uint16"
assert vrt.meta["dtype"] == "uint16"
assert vrt.count == 1
assert vrt.nodata == 0
for vrt_b, b in zip(vrt.bounds, bounds):
assert round(vrt_b, 6) == round(b, 6)
vrt_data = vrt.read()
assert vrt_data.any()
# generate a VRT using GDAL and compare
out_dir = os.path.join(mp_tmpdir, "cleantopo_br_mercator")
temp_vrt = os.path.join(out_dir, str(zoom) + "_gdal.vrt")
gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
temp_vrt,
out_dir,
zoom,
)
os.system(gdalbuildvrt)
with rasterio.open(temp_vrt, "r") as gdal_vrt:
assert gdal_vrt.dtypes[0] == "uint16"
assert gdal_vrt.meta["dtype"] == "uint16"
assert gdal_vrt.count == 1
assert gdal_vrt.nodata == 0
for vrt_b, b in zip(vrt.bounds, bounds):
assert round(vrt_b, 6) == round(b, 6)
gdal_vrt_data = gdal_vrt.read()
assert np.array_equal(vrt_data, gdal_vrt_data)
assert gdal_vrt_data.any()
# make sure handling an existing VRT works
with mapchete.open(
dict(cleantopo_br_mercator.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
|
89589
|
from math import sqrt, pi as PI
import tensorflow as tf
from .feature_extraction import feature_extraction
class FeaturesTest(tf.test.TestCase):
def test_features(self):
image = tf.constant([
[[255, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [255, 255, 255]],
[[255, 255, 255], [255, 255, 255], [255, 255, 255]],
])
segmentation = tf.constant([
[0, 0, 0],
[0, 0, 1],
[1, 1, 1],
], dtype=tf.int32)
M = [[5, 2, 2, 2],
[4, 1, 1, 1],
[6, 1, 1, 1],
[10, 1, 1, 1]]
bbox = [2, 3]
convex_area = 5
perimeter = 4 + (1 + sqrt(2)) / 2
Mw = [[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
mean = [51, 0, 0]
minimum = [0, 0, 0]
maximum = [255, 0, 0]
with self.test_session() as sess:
f = feature_extraction(segmentation, image).eval()[0]
self.assertEqual(len(f), 45)
self.assertAllEqual(f[0:4], M[0])
self.assertAllEqual(f[4:8], M[1])
self.assertAllEqual(f[8:12], M[2])
self.assertAllEqual(f[12:16], M[3])
self.assertAllEqual(f[16:18], bbox)
self.assertEqual(f[18], convex_area)
self.assertEqual(round(f[19]*100), round(perimeter*100))
self.assertAllEqual(f[20:24], Mw[0])
self.assertAllEqual(f[24:28], Mw[1])
self.assertAllEqual(f[28:32], Mw[2])
self.assertAllEqual(f[32:36], Mw[3])
self.assertAllEqual(f[36:39], mean[0:3])
self.assertAllEqual(f[39:42], minimum)
self.assertAllEqual(f[42:45], maximum)
|
89625
|
import json
import jinja2
def tojson(obj, **kwargs):
return jinja2.Markup(json.dumps(obj, **kwargs))
|
89642
|
version https://git-lfs.github.com/spec/v1
oid sha256:1eaa715583d925005051963b4060c72e79f474a068a74ee5a9831c55e6dff8ac
size 713
|
89686
|
import enum
import pathlib
from typing import DefaultDict, Dict, List, Optional, Sequence, Tuple
from pysen import ComponentBase
from pysen.command import CommandBase
from pysen.diagnostic import Diagnostic
from pysen.reporter import Reporter
from pysen.runner_options import PathContext, RunOptions
from pysen.setting import SettingFile
class Operation(enum.Enum):
ADD = "+"
MUL = "*"
class FakeCommand(CommandBase):
def __init__(
self, coef: int, op: Operation, ref: List[float], options: RunOptions
) -> None:
self.coef = coef
self.op = op
self.ref = ref
self.options = options
assert len(ref) == 1
@property
def name(self) -> str:
return f"{self.op.value} {self.coef}"
def __call__(self, reporter: Reporter) -> int:
value = self.ref[0]
coef = float(self.coef)
if self.op == Operation.ADD:
value += coef
elif self.op == Operation.MUL:
value *= coef
else:
raise AssertionError(f"invalid op: {self.op}")
self.ref[0] = value
if value >= 0.0:
return 0
else:
if self.options.require_diagnostics:
reporter.report_diagnostics(
[Diagnostic(pathlib.Path(".").resolve(), message="")]
)
return 1
class FakeComponent(ComponentBase):
def __init__(
self,
name: str,
ops: Dict[str, Tuple[int, Operation]],
expected_base_dir: Optional[pathlib.Path],
expected_settings_dir: Optional[pathlib.Path],
ref: List[float],
) -> None:
self._name = name
self._ops = ops
self._expected_base_dir = expected_base_dir
self._expected_settings_dir = expected_settings_dir
self._ref = ref
assert len(ref) == 1
@property
def name(self) -> str:
return self._name
def export_settings(
self,
paths: PathContext,
files: DefaultDict[str, SettingFile],
) -> None:
if self._expected_base_dir is not None:
assert paths.base_dir == self._expected_base_dir
if self._expected_settings_dir is not None:
assert paths.settings_dir == self._expected_settings_dir
for name, op in self._ops.items():
fname = f"{name}.yaml"
setting_file = files[fname]
setting_file.set_section((self.name,), {"coef": op[0], "op": op[1].value})
@property
def targets(self) -> Sequence[str]:
return list(self._ops.keys())
def create_command(
self, target: str, paths: PathContext, options: RunOptions
) -> CommandBase:
if self._expected_base_dir is not None:
assert paths.base_dir == self._expected_base_dir
if self._expected_settings_dir is not None:
assert paths.settings_dir == self._expected_settings_dir
op = self._ops[target]
return FakeCommand(op[0], op[1], self._ref, options)
|
89719
|
import os
import re
import torch
CHECKPOINTS_DIR = 'checkpoints'
def find_last_checkpoint_epoch(savedir, prefix = None):
root = os.path.join(savedir, CHECKPOINTS_DIR)
if not os.path.exists(root):
return -1
if prefix is None:
r = re.compile(r'(\d+)_.*')
else:
r = re.compile(r'(\d+)_' + re.escape(prefix) + '_.*')
last_epoch = -1
for fname in os.listdir(root):
m = r.match(fname)
if m:
epoch = int(m.groups()[0])
last_epoch = max(last_epoch, epoch)
return last_epoch
def get_save_path(savedir, name, epoch, mkdir = False):
if epoch is None:
fname = '%s.pth' % (name)
root = savedir
else:
fname = '%04d_%s.pth' % (epoch, name)
root = os.path.join(savedir, CHECKPOINTS_DIR)
result = os.path.join(root, fname)
if mkdir:
os.makedirs(root, exist_ok = True)
return result
def save(named_dict, savedir, prefix, epoch = None):
for (k,v) in named_dict.items():
save_path = get_save_path(
savedir, prefix + '_' + k, epoch, mkdir = True
)
if isinstance(v, torch.nn.DataParallel):
torch.save(v.module.state_dict(), save_path)
else:
torch.save(v.state_dict(), save_path)
def load(named_dict, savedir, prefix, epoch, device):
for (k,v) in named_dict.items():
load_path = get_save_path(
savedir, prefix + '_' + k, epoch, mkdir = False
)
if isinstance(v, torch.nn.DataParallel):
v.module.load_state_dict(
torch.load(load_path, map_location = device)
)
else:
v.load_state_dict(
torch.load(load_path, map_location = device)
)
|
89727
|
class Solution:
"""
@param A: an array
@return: divide the array into 3 non-empty parts
"""
def threeEqualParts(self, A):
total = A.count(1)
if total == 0:
return [0, 2]
if total % 3:
return [-1, -1]
k = total // 3
count = 0
for i, a in enumerate(A):
if a == 1:
count += 1
if count == 1:
start = i
elif count == k + 1:
mid = i
elif count == k * 2 + 1:
end = i
break
while end < len(A) and A[start] == A[mid] == A[end]:
start += 1
mid += 1
end += 1
if end == len(A):
return [start - 1, mid]
else:
return [-1, -1]
|
89745
|
from picamera.array import PiRGBArray
import picamera
from picamera import PiCamera
import time
import cv2
def CaptureImage(camera, rawCapture):
print('Capturing frames...')
print('Press Ctrl-C to end')
try:
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
cv2.imwrite('driving_frames/'+str(time.time()) + '.jpg', image)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
except (KeyboardInterrupt, picamera.exc.PiCameraValueError):
print('Stopped')
pass
if __name__ == "__main__":
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 320)
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=(640, 320))
# Set ISO to the desired value
camera.iso = 800
# Wait for the automatic gain control to settle
time.sleep(2)
# Now fix the values
camera.shutter_speed = camera.exposure_speed
camera.exposure_mode = 'off'
g = camera.awb_gains
camera.awb_mode = 'off'
camera.awb_gains = g
# allow the camera to warmup
time.sleep(0.1)
CaptureImage(camera, rawCapture)
|
89769
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel
from configs.basic_config import config as basic_config
class BertFCForMultiLable(BertPreTrainedModel):
def __init__(self, config):
super(BertFCForMultiLable, self).__init__(config)
# bert = BertModel.from_pretrained(bert_model_path)
self.bert = BertModel(config)
for param in self.bert.parameters():
param.requires_grad = True
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids,
attention_mask=None, token_type_ids=None, head_mask=None):
"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
def unfreeze(self, start_layer, end_layer):
def children(m):
return m if isinstance(m, (list, tuple)) else list(m.children())
def set_trainable_attr(m, b):
m.trainable = b
for p in m.parameters():
p.requires_grad = b
def apply_leaf(m, f):
c = children(m)
if isinstance(m, nn.Module):
f(m)
if len(c) > 0:
for l in c:
apply_leaf(l, f)
def set_trainable(l, b):
apply_leaf(l, lambda m: set_trainable_attr(m, b))
# You can unfreeze the last layer of bert
# by calling set_trainable(model.bert.encoder.layer[23], True)
set_trainable(self.bert, False)
for i in range(start_layer, end_layer+1):
set_trainable(self.bert.encoder.layer[i], True)
class BertCNNForMultiLabel(BertPreTrainedModel):
def __init__(self, config):
super(BertPreTrainedModel, self).__init__(config)
config.num_filters = basic_config.cnn.num_filters
config.filter_sizes = basic_config.cnn.filter_sizes
config.dropout = basic_config.dropout
self.bert = BertModel(config)
for param in self.bert.parameters():
param.requires_grad = True
self.convs = nn.ModuleList(
[nn.Conv2d(1, config.num_filters, (k, config.hidden_size))
for k in config.filter_sizes])
self.dropout = nn.Dropout(config.dropout)
self.fc_cnn = nn.Linear(config.num_filters *
len(config.filter_sizes), config.num_labels)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def forward(self, input_ids,
attention_mask=None, token_type_ids=None, head_mask=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask)
encoder_out, text_cls = outputs
out = encoder_out.unsqueeze(1)
out = torch.cat([self.conv_and_pool(out, conv)
for conv in self.convs], 1)
out = self.dropout(out)
out = self.fc_cnn(out)
return out
class BertRCNNForMultiLabel(BertPreTrainedModel):
def __init__(self, config):
super(BertPreTrainedModel, self).__init__(config)
config.rnn_hidden = basic_config.rcnn.rnn_hidden
config.num_layers = basic_config.rcnn.num_layers
config.kernel_size = basic_config.rcnn.kernel_size
config.lstm_dropout = basic_config.rcnn.dropout
self.bert = BertModel(config)
for param in self.bert.parameters():
param.requires_grad = True
self.lstm = nn.LSTM(config.hidden_size,
config.rnn_hidden,
config.num_layers,
bidirectional=True,
batch_first=True,
dropout=config.lstm_dropout)
self.maxpool = nn.MaxPool1d(config.kernel_size)
self.fc = nn.Linear(config.rnn_hidden * 2 +
config.hidden_size, config.num_labels)
def forward(self, input_ids,
attention_mask=None, token_type_ids=None, head_mask=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask)
encoder_out, text_cls = outputs
out, _ = self.lstm(encoder_out)
out = torch.cat((encoder_out, out), 2)
out = F.relu(out)
out = out.permute(0, 2, 1)
out = self.maxpool(out).squeeze()
out = self.fc(out)
return out
class BertDPCNNForMultiLabel(BertPreTrainedModel):
def __init__(self, config):
super(BertPreTrainedModel, self).__init__(config)
config.kernel_size = basic_config.dpcnn.kernel_size
config.num_filters = basic_config.dpcnn.num_filters
self.bert = BertModel(config)
for param in self.bert.parameters():
param.requires_grad = True
self.conv_region = nn.Conv2d(
1, config.num_filters, (3, config.hidden_size), stride=1)
self.conv = nn.Conv2d(config.num_filters,
config.num_filters, (3, 1), stride=1)
self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)
self.padding1 = nn.ZeroPad2d((0, 0, 1, 1)) # top bottom
self.padding2 = nn.ZeroPad2d((0, 0, 0, 1)) # bottom
self.relu = nn.ReLU()
self.fc = nn.Linear(config.num_filters, config.num_labels)
def forward(self, input_ids,
attention_mask=None, token_type_ids=None, head_mask=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask)
encoder_out, text_cls = outputs
x = encoder_out.unsqueeze(1) # [batch_size, 1, seq_len, embed]
x = self.conv_region(x) # [batch_size, num_filters, seq_len-3+1, 1]
x = self.padding1(x) # [batch_size, num_filters, seq_len, 1]
x = self.relu(x)
x = self.conv(x) # [batch_size, num_filters, seq_len-3+1, 1]
x = self.padding1(x) # [batch_size, num_filters, seq_len, 1]
x = self.relu(x)
x = self.conv(x) # [batch_size, num_filters, seq_len-3+1, 1]
while x.size()[2] > 2:
x = self._block(x)
x = x.squeeze() # [batch_size, num_filters]
x = self.fc(x)
return x
def _block(self, x):
x = self.padding2(x)
px = self.max_pool(x)
x = self.padding1(px)
x = F.relu(x)
x = self.conv(x)
x = self.padding1(x)
x = F.relu(x)
x = self.conv(x)
x = x + px # short cut
return x
|
89777
|
import os
from homeassistant.core import HomeAssistant
import pytest
from custom_components.hacs.websocket import (
acknowledge_critical_repository,
get_critical_repositories,
hacs_config,
hacs_removed,
hacs_repositories,
hacs_repository,
hacs_repository_data,
hacs_settings,
hacs_status,
)
@pytest.mark.asyncio
async def test_check_local_path(hacs, connection, tmpdir):
hacs.hass = HomeAssistant()
os.makedirs(tmpdir, exist_ok=True)
get_critical_repositories(hacs.hass, connection, {"id": 1})
hacs_config(hacs.hass, connection, {"id": 1})
hacs_removed(hacs.hass, connection, {"id": 1})
hacs_repositories(hacs.hass, connection, {"id": 1})
hacs_repository(hacs.hass, connection, {"id": 1})
hacs_repository_data(hacs.hass, connection, {"id": 1})
hacs_settings(hacs.hass, connection, {"id": 1})
hacs_status(hacs.hass, connection, {"id": 1})
acknowledge_critical_repository(hacs.hass, connection, {"repository": "test/test", "id": 1})
|
89785
|
from sqlalchemy import Column, DateTime, Enum, Integer, String
from sqlalchemy.sql.schema import UniqueConstraint
from virtool.pg.base import Base
from virtool.samples.models import ArtifactType
class SampleArtifactCache(Base):
"""
SQL model to store a cached sample artifact
"""
__tablename__ = "sample_artifacts_cache"
__table_args__ = (UniqueConstraint("key", "name", "sample"),)
id = Column(Integer, primary_key=True)
key = Column(String, nullable=False)
name = Column(String, nullable=False)
name_on_disk = Column(String)
sample = Column(String, nullable=False)
size = Column(Integer)
type = Column(Enum(ArtifactType), nullable=False)
uploaded_at = Column(DateTime)
class SampleReadsCache(Base):
"""
SQL model to store cached sample reads files
"""
__tablename__ = "sample_reads_cache"
__tableargs__ = (UniqueConstraint("key", "name", "sample"),)
id = Column(Integer, primary_key=True)
key = Column(String, nullable=False)
name = Column(String(length=13), nullable=False)
name_on_disk = Column(String, nullable=False)
sample = Column(String, nullable=False)
size = Column(Integer)
uploaded_at = Column(DateTime)
|
89792
|
import re
from sklearn.utils.validation import check_is_fitted
from gensim.models.word2vec import Word2Vec
import sys
from owl2vec_star.rdf2vec.walkers.random import RandomWalker
import numpy as np
import multiprocessing
class RDF2VecTransformer():
"""Project random walks or subtrees in graphs into embeddings, suited
for classification.
Parameters
----------
vector_size: int (default: 500)
The dimension of the embeddings.
max_path_depth: int (default: 1)
The maximum number of hops to take in the knowledge graph. Due to the
fact that we transform s -(p)-> o to s -> p -> o, this will be
translated to `2 * max_path_depth` hops internally.
wl: bool (default: True)
Whether to use Weisfeiler-Lehman embeddings
wl_iterations: int (default: 4)
The number of Weisfeiler-Lehman iterations. Ignored if `wl` is False.
walks_per_graph: int (default: infinity)
The maximum number of walks to extract from the neighborhood of
each instance.
n_jobs: int (default: 1)
gensim.models.Word2Vec parameter.
window: int (default: 5)
gensim.models.Word2Vec parameter.
sg: int (default: 1)
gensim.models.Word2Vec parameter.
max_iter: int (default: 10)
gensim.models.Word2Vec parameter.
negative: int (default: 25)
gensim.models.Word2Vec parameter.
min_count: int (default: 1)
gensim.models.Word2Vec parameter.
Attributes
----------
model: gensim.models.Word2Vec
The fitted Word2Vec model. Embeddings can be accessed through
`self.model.wv.get_vector(str(instance))`.
"""
def __init__(self, vector_size=500, walkers=RandomWalker(2, float('inf')),
window=5, sg=1, max_iter=10, negative=25, min_count=1):
self.vector_size = vector_size
self.walkers = walkers
self.n_jobs = int(multiprocessing.cpu_count()/2) if int(multiprocessing.cpu_count()/2) > 1 else 1
self.window = window
self.sg = sg
self.max_iter = max_iter
self.negative = negative
self.min_count = min_count
def fit(self, graph, instances):
"""Fit the embedding network based on provided instances.
Parameters
----------
graphs: graph.KnowledgeGraph
The graph from which we will extract neighborhoods for the
provided instances. You can create a `graph.KnowledgeGraph` object
from an `rdflib.Graph` object by using a converter method.
instances: array-like
The instances for which an embedding will be created. It important
to note that the test instances should be passed to the fit method
as well. Due to RDF2Vec being unsupervised, there is no
label leakage.
-------
"""
self.walks_ = []
for walker in self.walkers:
self.walks_ += list(walker.extract(graph, instances))
print('Extracted {} walks for {} instances!'.format(len(self.walks_), len(instances)))
sentences = [list(map(str, x)) for x in self.walks_]
self.model_ = Word2Vec(sentences, size=self.vector_size,
window=self.window, workers=self.n_jobs, sg=self.sg, iter=self.max_iter,
negative=self.negative, min_count=self.min_count, seed=42)
def transform(self, instances):
"""Construct a feature vector for the provided instances.
Parameters
----------
graphs: graph.KnowledgeGraph
The graph from which we will extract neighborhoods for the
provided instances. You can create a `graph.KnowledgeGraph` object
from an `rdflib.Graph` object by using a converter method.
instances: array-like
The instances for which an embedding will be created. These
instances must have been passed to the fit method as well,
or their embedding will not exist in the model vocabulary.
Returns
-------
embeddings: array-like
The embeddings of the provided instances.
"""
check_is_fitted(self, ['model_'])
feature_vectors = []
for instance in instances:
feature_vectors.append(self.model_.wv.get_vector(str(instance)))
return feature_vectors
def fit_transform(self, graph, instances):
"""First apply fit to create a Word2Vec model and then generate
embeddings for the provided instances.
Parameters
----------
graphs: graph.KnowledgeGraph
The graph from which we will extract neighborhoods for the
provided instances. You can create a `graph.KnowledgeGraph` object
from an `rdflib.Graph` object by using a converter method.
instances: array-like
The instances for which an embedding will be created.
Returns
-------
embeddings: array-like
The embeddings of the provided instances.
"""
self.fit(graph, instances)
return self.transform(instances)
|
89808
|
import tvm
import numpy as np
from tvm import relay
from tvm.relay.ir_pass import infer_type
from tvm.relay.ir_builder import IRBuilder, func_type
from tvm.relay.ir_builder import scalar_type, convert, tensor_type
from tvm.relay.env import Environment
def assert_has_type(expr, typ, env=Environment({})):
checked_expr = infer_type(env, expr)
checked_type = checked_expr.checked_type
if checked_type != typ:
raise RuntimeError("Type mismatch %s vs %s" % (
checked_type, typ))
def test_cmp_type():
for op in (relay.greater,
relay.greater_equal,
relay.less,
relay.less_equal,
relay.equal,
relay.not_equal):
ib = relay.ir_builder.IRBuilder()
x = ib.param("x", relay.TensorType((10, 4), "float32"))
y = ib.param("y", relay.TensorType((5, 10, 1), "float32"))
with ib.function(x, y) as func:
ib.ret(op(x.var, y.var))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.TensorType((5, 10, 4), "uint1")
def test_binary_broadcast():
for op in [relay.right_shift,
relay.left_shift,
relay.maximum]:
ib = relay.ir_builder.IRBuilder()
x = ib.param("x", relay.TensorType((10, 4), "int32"))
y = ib.param("y", relay.TensorType((5, 10, 1), "int32"))
with ib.function(x, y) as func:
ib.ret(op(x.var, y.var))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.TensorType((5, 10, 4), "int32")
def test_binary_op():
def check_binary_op(opfunc):
"""
Program:
fn (x, y) {
return x <op> y;
}
"""
b = IRBuilder()
x = b.param('x', tensor_type(5, 5, 5))
y = b.param('y', tensor_type(5, 5, 5))
with b.function(x, y) as func:
b.ret(opfunc(x.var, y.var))
b.ret(func)
prog, env = b.get()
ttype = tensor_type(5, 5, 5)
expected_ty = func_type([ttype, ttype], ttype)
assert_has_type(func.to_func(), expected_ty)
for opfunc in [relay.pow]:
check_binary_op(opfunc)
def test_binary_broadcast_op():
def check_binary_broadcast_op(opfunc):
"""
Program:
fn (x: Tensor[(10, 4), f32], y: Tensor[(5, 10, 1), f32]) -> Tensor[(5, 10, 4), f32] {
return x <op> y;
}
"""
b = IRBuilder()
x = b.param('x', tensor_type(10, 4))
y = b.param('y', tensor_type(5, 10, 1))
with b.function(x, y) as func:
b.ret(opfunc(x.var, y.var))
b.ret(func)
prog, env = b.get()
expected_ty = func_type([tensor_type(10, 4), tensor_type(5, 10, 1)],
tensor_type(5, 10, 4))
assert_has_type(func.to_func(), expected_ty)
for opfunc in [relay.pow]:
check_binary_broadcast_op(opfunc)
def test_cmp_type():
for op in (relay.greater,
relay.greater_equal,
relay.less,
relay.less_equal,
relay.equal,
relay.not_equal):
ib = relay.ir_builder.IRBuilder()
x = ib.param("x", relay.TensorType((10, 4), "float32"))
y = ib.param("y", relay.TensorType((5, 10, 1), "float32"))
with ib.function(x, y) as func:
ib.ret(op(x.var, y.var))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.TensorType((5, 10, 4), "uint1")
def test_binary_broadcast():
for op in [relay.right_shift,
relay.left_shift,
relay.maximum,
relay.minimum]:
ib = relay.ir_builder.IRBuilder()
x = ib.param("x", relay.TensorType((10, 4), "int32"))
y = ib.param("y", relay.TensorType((5, 10, 1), "int32"))
with ib.function(x, y) as func:
ib.ret(op(x.var, y.var))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.TensorType((5, 10, 4), "int32")
if __name__ == "__main__":
test_cmp_type()
test_binary_broadcast()
test_binary_op()
test_binary_broadcast_op()
|
89880
|
import tensorflow as tf
k = tf.constant([
[1, 0, 1],
[2, 1, 0],
[0, 0, 1]
], dtype=tf.float32, name='k')
i = tf.constant([
[4, 3, 1, 0],
[2, 1, 0, 1],
[1, 2, 4, 1],
[3, 1, 0, 2]
], dtype=tf.float32, name='i')
kernel = tf.reshape(k, [3, 3, 1, 1], name='kernel')
image = tf.reshape(i, [1, 4, 4, 1], name='image')
res = tf.squeeze(tf.nn.conv2d(image, kernel, [1, 1, 1, 1], "VALID"))
with tf.Session() as sess:
print sess.run(res)
|
89932
|
from rest_framework import serializers
from django.contrib.auth.models import User
from oms_gallery.models import Photo, Gallery
from oms_cms.backend.pages.models import Pages, BlockPage
class PagesListSerializer(serializers.ModelSerializer):
"""Сериализация всех страниц"""
class Meta:
model = Pages
fields = '__all__'
class ImageBlockPageSerializer(serializers.HyperlinkedModelSerializer):
"""Сериализация изображения блока информации для старницы"""
class Meta:
model = Photo
fields = ('name', 'image', 'captions', 'create_date', 'slug')
class GalleryBlockPageSerializer(serializers.HyperlinkedModelSerializer):
"""Сериализация галлереи блока информации для старницы"""
images = ImageBlockPageSerializer(many=True, read_only=True)
class Meta:
model = Photo
fields = ('name', 'captions', 'create_date', 'slug', 'images')
class PageBlockPageSerializer(serializers.ModelSerializer):
"""Сериализация страницы блока информации для старницы"""
class Meta:
model = Pages
fields = ('id',)
class BlockPageSerializer(serializers.HyperlinkedModelSerializer):
"""Сериализация блока информации для старницы"""
page = PageBlockPageSerializer(read_only=True)
image = ImageBlockPageSerializer(read_only=True)
slider = GalleryBlockPageSerializer(read_only=True)
class Meta:
model = BlockPage
fields = ('page', 'id', 'title', 'sub_title', 'description',
'video_link', 'video_up', 'name', 'sort',
'image', 'slider')
class PagesSerializerRetrieve(serializers.ModelSerializer):
"""Сериализация отдельной страницы"""
page_blocks = BlockPageSerializer(many=True, read_only=True)
class Meta:
model = Pages
fields = ('id', 'lang', 'slug', 'title', 'sub_title',
'text', 'edit_date', 'published_date',
'published', 'template', 'registration_required',
'page_blocks')
class BlockPageCreateSerializer(serializers.ModelSerializer):
"""Сериализация блока информации для старницы (создание)"""
class Meta:
model = BlockPage
fields = '__all__'
|
89968
|
import os
# face_data (directory) represents the path component to be joined.
FACE_DATA_PATH = os.path.join(os.getcwd(),'face_cluster')
ENCODINGS_PATH = os.path.join(os.getcwd(),'encodings.pickle')
CLUSTERING_RESULT_PATH = os.getcwd()
|
90011
|
def celciusToFahrenheit(celcius: float, ndigits: int = 2)->float:
"""
Convert a given value from Celsius to Fahrenheit and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
"""
return round((float(celcius) *9 / 5) + 32 , ndigits)
def celciusToKelvin(celcius: float, ndigits: int = 2)->float:
"""
Convert a given value from Celsius to Kelvin and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
"""
return round(float(celcius) + 273.15, ndigits)
def celciusToRankie(celcius:float, ndigits: int = 2)->float:
"""
Convert a given value from Celsius to Rankine and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
"""
return round((float(celcius)* 9 / 5) + 491.67, ndigits)
def fahrenheitToCelcius(fahrenheit:float, ndigits = 2)->float:
"""
Convert a given value from Fahrenheit to Celsius and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
"""
return round((float(fahrenheit) - 32) * 5 / 9, ndigits)
def fahrenheitToKelvin(fahrenheit:float, ndigits = 2)->float:
"""
Convert a given value from Fahrenheit to Kelvin and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
"""
return round(((float(fahrenheit) - 32) * 5 / 9) + 273.5, ndigits)
def fahrenheitToRankie(fahrenheit:float, ndigits = 2)->float:
"""
Convert a given value from Fahrenheit to Rankine and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
"""
return round(float(fahrenheit)+ 459.7, ndigits)
def kelvinToCelcius(kelvin:float, ndigits = 2)->float:
"""
Convert a given value from Kelvin to Celsius and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
"""
return round(float(kelvin) - 273.15, ndigits)
def kelvinToFahrenheit(kelvin:float, ndigits = 2)->float:
"""
Convert a given value from Kelvin to Fahrenheit and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
"""
return round(((float(kelvin) - 273.15)* 9 / 5)+32,ndigits)
def kelvinToRankie(kelvin:float, ndigits = 2)->float:
"""
Convert a given value from Kelvin to Rankine and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
"""
return round(float(fahrenheit)+ 459, ndigits)
def rankieToCelcius(rankie:float, ndigits = 2)->float:
"""
Convert a given value from Rankine to Celsius and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
Wikipedia reference: https://en.wikipedia.org/wiki/Celsius
"""
return round((float(rankie) - 491.67)* 5 / 9, ndigits)
def rankieToFahrenheit(rankie:float, ndigits = 2) ->float:
"""
Convert a given value from Rankine to Fahrenheit and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
Wikipedia reference: https://en.wikipedia.org/wiki/Fahrenheit
"""
return round(float(rankie) - 459.67, ndigits)
def rankieToKelvin(rankie:float, ndigits = 2)-> float:
"""
Convert a given value from Rankine to Kelvin and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
"""
return round((float(rankie) * 5 / 9), ndigits)
def reamurToKelvin(reamur:float, ndigits = 2) ->float:
return round((float(reamur) * 1.25 + 273.15), ndigits)
def reamurToFahrenheit(reamur: float, ndigits = 2)->float:
return round((float(reamur) * 2.25 + 32 ), ndigits)
def reamurToCelcius(reamur: float, ndigits = 2)-> float:
return round((float(reamur)* 1.25), ndigits)
def reamurToRankie(reamur: float, ndigits = 2)->float:
return round((float(reamur)* 2.25 + 32 + 459.67), ndigits)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
90016
|
from __future__ import absolute_import
import json
import os
import pytest
import requests
import time
from rancher_gen.compat import b64encode
@pytest.fixture(scope='session')
def stack_service(request):
host = os.getenv('RANCHER_HOST')
port = int(os.getenv('RANCHER_PORT', 80))
access_key = os.getenv('RANCHER_ACCESS_KEY')
secret_key = os.getenv('RANCHER_SECRET_KEY')
api_token = b64encode("{0}:{1}".format(access_key, secret_key))
with open(os.path.join(os.path.dirname(__file__), 'compose.yaml')) as fh:
compose_string = fh.read()
# Create the stack and service
headers = {
'Authorization': 'Basic {0}'.format(api_token)
}
url = 'http://{0}:{1}/v1/environments'.format(host, port)
stack_name = 'teststack'
res = requests.post(url, headers=headers, data={
'name': stack_name,
"dockerCompose": compose_string,
"startOnCreate": True
})
stack = res.json()
# Wait for stack to be active
state = stack['state']
url = '{0}?name={1}'.format(url, stack_name)
while state != 'active':
res = requests.get(url, headers=headers)
stack = res.json()['data'][0]
state = stack['state']
time.sleep(1)
# Wait for services to be active
services = []
for i in [1, 2]:
url = '{0}?name={1}'.format(stack['links']['services'], 'hello%d' % i)
res = requests.get(url, headers=headers)
state = ''
service = None
while state != 'active':
service = res.json()['data'][0]
state = service['state']
time.sleep(1)
services.append(service)
def teardown():
requests.delete('{0}/{1}'.format(url, stack['id']), headers=headers)
request.addfinalizer(teardown)
return stack, services
@pytest.fixture(scope='function')
def mock_message(request, stack_service):
stack, services = stack_service
service_id = services[0]['id']
with open(os.path.join(os.path.dirname(__file__), 'mock_msg.json')) as fh:
mock_message = json.loads(fh.read())
instances_link =\
mock_message['data']['resource']['services'][0]['links']['instances']
mock_message['data']['resource']['services'][0]['links']['instances'] =\
instances_link.format(stack['accountId'], services[0]['id'])
return mock_message
|
90029
|
import base64
import time
import requests
import yaml
from sawtooth_sdk.protobuf import batch_pb2
from sawtooth_signing import ParseError, CryptoFactory, create_context
from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from cli.common.protobuf import payload_pb2
from cli.common import transaction, helper
from cli.common.exceptions import HealthCareException
class HealthCareClient:
def __init__(self, base_url, keyfile=None):
self._base_url = base_url
if keyfile is None:
self._signer = None
return
try:
with open(keyfile) as fd:
private_key_str = fd.read().strip()
except OSError as err:
raise HealthCareException(
'Failed to read private key {}: {}'.format(
keyfile, str(err)))
try:
private_key = Secp256k1PrivateKey.from_hex(private_key_str)
except ParseError as e:
raise HealthCareException(
'Unable to load private key: {}'.format(str(e)))
self._signer = CryptoFactory(create_context('secp256k1')) \
.new_signer(private_key)
def create_clinic(self, name, wait=None):
batch, batch_id = transaction.create_clinic(
txn_signer=self._signer,
batch_signer=self._signer,
name=name)
batch_list = batch_pb2.BatchList(batches=[batch])
# inputs = outputs = helper.make_clinic_address(clinic_pkey=txn_key)
#
# clinic = payload_pb2.CreateClinic(
# public_key=txn_key,
# name=name)
#
# payload = payload_pb2.TransactionPayload(
# payload_type=payload_pb2.TransactionPayload.CREATE_CLINIC,
# create_clinic=clinic)
#
# return self._send_healthcare_txn(txn_key, batch_key, [inputs], [outputs], payload,
# wait=wait,
# auth_user=auth_user,
# auth_password=<PASSWORD>)
return self._send_batches(batch_list=batch_list,
batch_id=batch_id,
wait=wait)
# def create_clinic(self, name, wait=None, auth_user=None, auth_password=<PASSWORD>):
# batch_key = txn_key = self._signer.get_public_key().as_hex()
#
# address = helper.make_clinic_address(clinic_pkey=txn_key)
#
# clinic = payload_pb2.CreateClinic(
# public_key=txn_key,
# name=name)
#
# payload = payload_pb2.TransactionPayload(
# payload_type=payload_pb2.TransactionPayload.CREATE_CLINIC,
# create_clinic=clinic)
#
# return self._send_healthcare_txn(txn_key, batch_key, [address], [address], payload,
# wait=wait,
# auth_user=auth_user,
# auth_password=<PASSWORD>)
def create_doctor(self, name, surname, wait=None):
# batch_key = txn_key = self._signer.get_public_key().as_hex()
#
# address = helper.make_doctor_address(doctor_pkey=txn_key)
#
# doctor = payload_pb2.CreateDoctor(
# public_key=txn_key,
# name=name,
# surname=surname)
#
# payload = payload_pb2.TransactionPayload(
# payload_type=payload_pb2.TransactionPayload.CREATE_DOCTOR,
# create_doctor=doctor)
#
# return self._send_healthcare_txn(txn_key, batch_key, [address], [address], payload,
# wait=wait,
# auth_user=auth_user,
# auth_password=<PASSWORD>)
batch, batch_id = transaction.create_doctor(
txn_signer=self._signer,
batch_signer=self._signer,
name=name,
surname=surname)
batch_list = batch_pb2.BatchList(batches=[batch])
return self._send_batches(batch_list=batch_list,
batch_id=batch_id,
wait=wait)
def create_patient(self, name, surname, wait=None):
# batch_key = txn_key = self._signer.get_public_key().as_hex()
#
# address = helper.make_patient_address(patient_pkey=txn_key)
#
# patient = payload_pb2.CreatePatient(
# public_key=txn_key,
# name=name,
# surname=surname)
#
# payload = payload_pb2.TransactionPayload(
# payload_type=payload_pb2.TransactionPayload.CREATE_PATIENT,
# create_patient=patient)
#
# return self._send_healthcare_txn(txn_key, batch_key, [address], [address], payload,
# wait=wait,
# auth_user=auth_user,
# auth_password=<PASSWORD>)
batch, batch_id = transaction.create_patient(
txn_signer=self._signer,
batch_signer=self._signer,
name=name,
surname=surname)
batch_list = batch_pb2.BatchList(batches=[batch])
return self._send_batches(batch_list=batch_list,
batch_id=batch_id,
wait=wait)
def add_lab_test(self, height, weight, gender, a_g_ratio, albumin, alkaline_phosphatase, appearance,
bilirubin, casts, color, wait=None):
batch, batch_id = transaction.add_lab_test(
txn_signer=self._signer,
batch_signer=self._signer,
height=height,
weight=weight,
gender=gender,
a_g_ratio=a_g_ratio,
albumin=albumin,
alkaline_phosphatase=alkaline_phosphatase,
appearance=appearance,
bilirubin=bilirubin,
casts=casts,
color=color)
batch_list = batch_pb2.BatchList(batches=[batch])
return self._send_batches(batch_list=batch_list,
batch_id=batch_id,
wait=wait)
def add_pulse(self, pulse, timestamp, wait=None):
batch, batch_id = transaction.add_pulse(
txn_signer=self._signer,
batch_signer=self._signer,
pulse=pulse,
timestamp=timestamp)
batch_list = batch_pb2.BatchList(batches=[batch])
return self._send_batches(batch_list=batch_list,
batch_id=batch_id,
wait=wait)
# def add_claim(self, claim_id, patient_pkey, wait=None, auth_user=None,
# auth_password=<PASSWORD>):
# batch, batch_id = transaction.register_claim(
# txn_signer=self._signer,
# batch_signer=self._signer,
# claim_id=claim_id,
# patient_pkey=patient_pkey)
#
# batch_list = batch_pb2.BatchList(batches=[batch])
#
# return self._send_batches(batch_list=batch_list,
# batch_id=batch_id,
# wait=wait)
#
# def assign_doctor(self, claim_id, doctor_pkey, wait=None, auth_user=None,
# auth_password=<PASSWORD>):
# current_times_str = str(time.time())
#
# batch, batch_id = transaction.assign_doctor(
# txn_signer=self._signer,
# batch_signer=self._signer,
# claim_id=claim_id,
# description="Doctor pkey: {}, assigned to claim: {}".format(doctor_pkey, claim_id),
# event_time=current_times_str)
#
# batch_list = batch_pb2.BatchList(batches=[batch])
#
# return self._send_batches(batch_list=batch_list,
# batch_id=batch_id,
# wait=wait)
#
# def first_visit(self, claim_id, description, doctor_pkey, wait=None, auth_user=None,
# auth_password=<PASSWORD>):
#
# current_times_str = str(time.time())
#
# batch, batch_id = transaction.first_visit(
# txn_signer=self._signer,
# batch_signer=self._signer,
# claim_id=claim_id,
# description="Doctor pkey: {}, claim hex: {}, description: {}".format(doctor_pkey, claim_id, description),
# event_time=current_times_str)
#
# batch_list = batch_pb2.BatchList(batches=[batch])
#
# return self._send_batches(batch_list=batch_list,
# batch_id=batch_id,
# wait=wait)
#
# def pass_tests(self, claim_id, description, wait=None, auth_user=None,
# auth_password=<PASSWORD>):
#
# current_times_str = str(time.time())
#
# batch, batch_id = transaction.pass_tests(
# txn_signer=self._signer,
# batch_signer=self._signer,
# claim_id=claim_id,
# description=description,
# event_time=current_times_str)
#
# batch_list = batch_pb2.BatchList(batches=[batch])
#
# return self._send_batches(batch_list=batch_list,
# batch_id=batch_id,
# wait=wait)
#
# def attend_procedures(self, claim_id, description, wait=None, auth_user=None,
# auth_password=<PASSWORD>):
#
# current_times_str = str(time.time())
#
# batch, batch_id = transaction.attend_procedures(
# txn_signer=self._signer,
# batch_signer=self._signer,
# claim_id=claim_id,
# description=description,
# event_time=current_times_str)
#
# batch_list = batch_pb2.BatchList(batches=[batch])
#
# return self._send_batches(batch_list=batch_list,
# batch_id=batch_id,
# wait=wait)
#
# def eat_pills(self, claim_id, description, wait=None, auth_user=None,
# auth_password=<PASSWORD>):
#
# current_times_str = str(time.time())
#
# batch, batch_id = transaction.eat_pills(
# txn_signer=self._signer,
# batch_signer=self._signer,
# claim_id=claim_id,
# description=description,
# event_time=current_times_str)
#
# batch_list = batch_pb2.BatchList(batches=[batch])
#
# return self._send_batches(batch_list=batch_list,
# batch_id=batch_id,
# wait=wait)
#
# def next_visit(self, claim_id, description, doctor_pkey, wait=None, auth_user=None,
# auth_password=<PASSWORD>):
#
# current_times_str = str(time.time())
#
# batch, batch_id = transaction.next_visit(
# txn_signer=self._signer,
# batch_signer=self._signer,
# claim_id=claim_id,
# description="Doctor pkey: {}, claim hex: {}, description: {}".format(doctor_pkey, claim_id, description),
# event_time=current_times_str)
#
# batch_list = batch_pb2.BatchList(batches=[batch])
#
# return self._send_batches(batch_list=batch_list,
# batch_id=batch_id,
# wait=wait)
#
# def list_claims(self, auth_user=None, auth_password=<PASSWORD>):
# claim_list_prefix = helper.make_claim_list_address()
#
# result = self._send_request(
# "state?address={}".format(claim_list_prefix))
# orders = {}
#
# try:
# data = yaml.safe_load(result)["data"]
# if data is not None:
# for value in data:
# dec_ord = base64.b64decode(value["data"])
# o = payload_pb2.CreateClaim()
# o.ParseFromString(dec_ord)
# orders[value["address"]] = o
#
# except BaseException:
# pass
#
# return orders
#
def list_patients(self):
patient_list_prefix = helper.make_patient_list_address()
result = self._send_request(
"state?address={}".format(patient_list_prefix))
patients = {}
try:
data = yaml.safe_load(result)["data"]
if data is not None:
for value in data:
dec_pt = base64.b64decode(value["data"])
pt = payload_pb2.CreatePatient()
pt.ParseFromString(dec_pt)
patients[value["address"]] = pt
except BaseException:
pass
return patients
def list_clinics(self):
operator_list_prefix = helper.make_clinic_list_address()
result = self._send_request(
"state?address={}".format(operator_list_prefix))
clinics = {}
try:
data = yaml.safe_load(result)["data"]
if data is not None:
for value in data:
dec_cl = base64.b64decode(value["data"])
cl = payload_pb2.CreateClinic()
cl.ParseFromString(dec_cl)
clinics[value["address"]] = cl
except BaseException:
pass
return clinics
def list_doctors(self):
doctor_list_prefix = helper.make_doctor_list_address()
result = self._send_request(
"state?address={}".format(doctor_list_prefix))
doctors = []
try:
data = yaml.safe_load(result)["data"]
if data is not None:
for value in data:
dec_dc = base64.b64decode(value["data"])
dc = payload_pb2.CreateDoctor()
dc.ParseFromString(dec_dc)
doctors.append(dc)
except BaseException:
pass
return doctors
def list_lab_test(self):
lab_test_list_prefix = helper.make_lab_test_list_address()
result = self._send_request(
"state?address={}".format(lab_test_list_prefix))
lab_tests = {}
try:
data = yaml.safe_load(result)["data"]
if data is not None:
for value in data:
dec_lt = base64.b64decode(value["data"])
lt = payload_pb2.AddLabTest()
lt.ParseFromString(dec_lt)
lab_tests[value["address"]] = lt
except BaseException:
pass
return lab_tests
def list_pulse(self):
pulse_list_prefix = helper.make_pulse_list_address()
result = self._send_request(
"state?address={}".format(pulse_list_prefix))
pulse_list = {}
try:
data = yaml.safe_load(result)["data"]
if data is not None:
for value in data:
dec_p = base64.b64decode(value["data"])
p = payload_pb2.AddPulse()
p.ParseFromString(dec_p)
pulse_list[value["address"]] = p
except BaseException:
pass
return pulse_list
#
# def list_claim_details(self, claim_id, clinic_hex, auth_user=None, auth_password=<PASSWORD>):
# claim_details_prefix = helper.make_event_list_address(claim_id=claim_id, clinic_pkey=clinic_hex)
#
# result = self._send_request(
# "state?address={}".format(claim_details_prefix))
# orders = {}
#
# try:
# data = yaml.safe_load(result)["data"]
# if data is not None:
# for value in data:
# dec_ord = base64.b64decode(value["data"])
# o = payload_pb2.ActionOnClaim()
# o.ParseFromString(dec_ord)
# orders[value["address"]] = o
#
# except BaseException:
# pass
#
# return orders
def _send_request(self,
suffix,
data=None,
content_type=None,
name=None):
if self._base_url.startswith("http://"):
url = "{}/{}".format(self._base_url, suffix)
else:
url = "http://{}/{}".format(self._base_url, suffix)
headers = {}
# if auth_user is not None:
# auth_string = "{}:{}".format(auth_user, auth_password)
# b64_string = base64.b64encode(auth_string.encode()).decode()
# auth_header = 'Basic {}'.format(b64_string)
# headers['Authorization'] = auth_header
if content_type is not None:
headers['Content-Type'] = content_type
try:
if data is not None:
result = requests.post(url, headers=headers, data=data)
else:
result = requests.get(url, headers=headers)
if result.status_code == 404:
raise HealthCareException("No such operator: {}".format(name))
elif not result.ok:
raise HealthCareException("Error {}: {}".format(
result.status_code, result.reason))
except requests.ConnectionError as err:
raise HealthCareException(
'Failed to connect to {}: {}'.format(url, str(err)))
except BaseException as err:
raise HealthCareException(err)
return result.text
def _send_batches(self, batch_list, batch_id, wait):
if wait and wait > 0:
wait_time = 0
start_time = time.time()
response = self._send_request(
"batches", batch_list.SerializeToString(),
'application/octet-stream')
while wait_time < wait:
status = self._get_status(
batch_id,
wait - int(wait_time))
wait_time = time.time() - start_time
if status != 'PENDING':
return response
return response
return self._send_request(
"batches", batch_list.SerializeToString(),
'application/octet-stream')
# def _send_healthcare_txn(self, txn_key, batch_key, inputs, outputs, payload, wait,
# auth_user, auth_password):
#
# txn_header_bytes, signature = self._transaction_header(txn_key, inputs, outputs, payload)
#
# txn = Transaction(
# header=txn_header_bytes,
# header_signature=signature,
# payload=payload.SerializeToString()
# )
#
# transactions = [txn]
#
# batch_header_bytes, signature = self._batch_header(batch_key, transactions)
#
# batch = Batch(
# header=batch_header_bytes,
# header_signature=signature,
# transactions=transactions
# )
#
# batch_list = BatchList(batches=[batch])
# batch_id = batch_list.batches[0].header_signature
#
# if wait and wait > 0:
# wait_time = 0
# start_time = time.time()
# response = self._send_request(
# "batches", batch_list.SerializeToString(),
# 'application/octet-stream',
# auth_user=auth_user,
# auth_password=<PASSWORD>)
# while wait_time < wait:
# status = self._get_status(
# batch_id,
# wait - int(wait_time),
# auth_user=auth_user,
# auth_password=<PASSWORD>)
# wait_time = time.time() - start_time
#
# if status != 'PENDING':
# return response
#
# return response
#
# return self._send_request(
# "batches", batch_list.SerializeToString(),
# 'application/octet-stream',
# auth_user=auth_user,
# auth_password=<PASSWORD>)
def _get_status(self, batch_id, wait):
try:
result = self._send_request(
'batch_statuses?id={}&wait={}'.format(batch_id, wait))
return yaml.safe_load(result)['data'][0]['status']
except BaseException as err:
raise HealthCareException(err)
# def _transaction_header(self, txn_key, inputs, outputs, payload): txn_header_bytes = TransactionHeader(
# family_name=helper.TP_FAMILYNAME, family_version=helper.TP_VERSION, inputs=inputs, outputs=outputs,
# signer_public_key=txn_key, # signer.get_public_key().as_hex(), # In this example, we're signing the batch with
# the same private key, # but the batch can be signed by another party, in which case, the # public key will need
# to be associated with that key. batcher_public_key=txn_key, # signer.get_public_key().as_hex(),
# In this example, there are no dependencies. This list should include # an previous transaction header
# signatures that must be applied for # this transaction to successfully commit. # For example, # dependencies=[
# '540a6803971d1880ec73a96cb97815a95d374cbad5d865925e5aa0432fcf1931539afe10310c122c5eaae15df61236079abbf4f258889359
# c4d175516934484a'], dependencies=[], nonce=random.random().hex().encode(), payload_sha512=hashlib.sha512(payload.
# SerializeToString()).hexdigest() ).SerializeToString()
#
# signature = self._signer.sign(txn_header_bytes)
# return txn_header_bytes, signature
#
# def _batch_header(self, batch_key, transactions):
# batch_header_bytes = BatchHeader(
# signer_public_key=batch_key,
# transaction_ids=[txn.header_signature for txn in transactions],
# ).SerializeToString()
#
# signature = self._signer.sign(batch_header_bytes)
#
# return batch_header_bytes, signature
|
90037
|
import gzip
import struct
import os
import logging
import logging.handlers
import sys
import json
def getBootstraps(quantDir):
logging.basicConfig(level=logging.INFO)
bootstrapFile = os.path.sep.join([quantDir, "aux_info", "bootstrap", "bootstraps.gz"])
nameFile = os.path.sep.join([quantDir, "aux_info", "bootstrap", "names.tsv.gz"])
if not os.path.isfile(bootstrapFile):
logging.error("The required bootstrap file {} doesn't appear to exist".format(bootstrapFile))
sys.exit(1)
if not os.path.isfile(nameFile):
logging.error("The required transcript name file {} doesn't appear to exist".format(nameFile))
sys.exit(1)
with gzip.open(nameFile) as nf:
txpNames = nf.read().strip().split('\t')
ntxp = len(txpNames)
logging.info("Expecting bootstrap info for {} transcripts".format(ntxp))
with open(os.path.sep.join([quantDir, "aux_info", "meta_info.json"])) as fh:
meta_info = json.load(fh)
if meta_info['samp_type'] == 'gibbs':
s = struct.Struct('<' + 'i' * ntxp)
elif meta_info['samp_type'] == 'bootstrap':
s = struct.Struct('@' + 'd' * ntxp)
else:
logging.error("Unknown sampling method: {}".format(meta_info['samp_type']))
sys.exit(1)
numBoot = 0
listBootstrap = []
# Now, iterate over the bootstrap samples and write each
with gzip.open(bootstrapFile) as bf:
while True:
try:
x = s.unpack_from(bf.read(s.size))
listBootstrap.append(x)
numBoot += 1
except:
logging.info("read all bootstrap values")
break
logging.info("read bootstraps successfully.")
return listBootstrap
|
90052
|
from enn import *
import numpy as np
from grid_LSTM import netLSTM, netLSTM_full
from grid_data_v2 import TextDataset
import grid_data_v2 as grid_data
from grid_configuration import config
from util import Record, save_var, get_file_list, Regeneralize, list_to_csv
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch
import time
import matplotlib.pyplot as plt
import pickle
import pandas as pd
import os
plt.ion()
seed = config.seed
torch.random.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.set_device(config.deviceID)
print(config.test_ID)
PATH = config.path
if not os.path.exists(PATH):
os.mkdir(PATH)
'''Parmaters used in the net.'''
ERROR_PER = config.ERROR_PER
NE = config.ne # number of ensemble
GAMMA = config.GAMMA
T = config.T
''' load data and initialize enn net'''
text = TextDataset(config)
textLoader = DataLoader(text, batch_size=config.batch_size, shuffle=True,
num_workers=config.num_workers, drop_last=config.drop_last)
criterion = torch.nn.MSELoss()
def train(net_enn, input_, target):
dstb_y = lamuda.Lamuda(target, NE, ERROR_PER)
train_losses = Record()
losses = Record()
lamuda_history = Record()
std_history = Record()
pred_history = Record()
initial_parameters = net_enn.initial_parameters
initial_pred = net_enn.output(input_)
train_losses.update(criterion(initial_pred.mean(0), target).tolist())
losses.update(criterion(initial_pred.mean(0), target).tolist())
std_history.update(dstb_y.std(initial_pred))
pred_history.update(initial_pred)
lamuda_history.update(dstb_y.lamuda(initial_pred))
for j in range(T):
torch.cuda.empty_cache()
params = net_enn.get_parameter()
dstb_y.update()
time_ = time.strftime('%Y%m%d_%H_%M_%S')
delta = enrml.EnRML(pred_history.get_latest(mean=False), params, initial_parameters,
lamuda_history.get_latest(mean=False), dstb_y.dstb, ERROR_PER)
params_raw = net_enn.update_parameter(delta)
torch.cuda.empty_cache()
pred = net_enn.output(input_)
loss_new = criterion(pred.mean(0), target).tolist()
bigger = train_losses.check(loss_new)
record_while = 0
while bigger:
record_while += 1
lamuda_history.update(lamuda_history.get_latest(mean=False) * GAMMA)
if lamuda_history.get_latest(mean=False) > GAMMA ** 5:
lamuda_history.update(lamuda_history.data[0])
print('abandon current iteration')
net_enn.set_parameter(params)
loss_new = train_losses.get_latest()
dstb_y.update()
params_raw = params
break
dstb_y.update()
net_enn.set_parameter(params)
delta = enrml.EnRML(pred_history.get_latest(mean=False), params, initial_parameters,
lamuda_history.get_latest(mean=False), dstb_y.dstb, ERROR_PER)
params_raw = net_enn.update_parameter(delta)
torch.cuda.empty_cache()
pred = net_enn.output(input_)
loss_new = criterion(pred.mean(0), target).tolist()
print('update losses, new loss:{}'.format(loss_new))
bigger = train_losses.check(loss_new)
train_losses.update(loss_new)
save_var(params_raw, '{}/{}_params'.format(PATH, time_))
print("iteration:{} \t current train losses:{}".format(j, train_losses.get_latest(mean=True)))
with open('{}/loss.txt'.format(PATH), 'a') as f:
f.write(time.strftime('%Y%m%d_%H_%M_%S')+','+str(train_losses.get_latest(mean=True))+',\n')
f.close()
pred_history.update(pred)
std_history.update(dstb_y.std(pred))
if std_history.bigger():
lamuda_history.update(lamuda_history.get_latest(mean=False))
else:
lamuda_tmp = lamuda_history.get_latest(mean=False) / GAMMA
if lamuda_tmp < 0.005:
lamuda_tmp = 0.005
lamuda_history.update(lamuda_tmp)
return net_enn, train_losses.get_latest(mean=True), pred_history.get_latest(mean=False)
# def predict(data, params=None, model_predict=None): # 每次预测24h,然后拼接在一起获得预测结果
# result = []
# input_ = torch.tensor(data)
# input_ = Variable(input_.view(1, len(data), config.input_dim).float()).cuda()
# if params is not None:
# model_predict.set_parameter(params)
# i = 0
# while i <= len(data) - config.train_len:
# pred = model_predict.output(input_[:, i:i+config.train_len, :])
# result.append(pred[:, -24:, :])
# print('predicting: {} to {}'.format(i, i + config.train_len))
# i += 24
# #save_var(result, 'result')
# return torch.cat(result, dim=1)
def predict_full(data, params=None, model_predict=None): # 直接对待预测区域全序列预测,不进行拼接处理
input_ = torch.tensor(data)
input_ = Variable(input_.view(1, len(data), config.input_dim).float()).cuda()
if params is not None:
model_predict.set_parameter(params)
pred = model_predict.output(input_)
return pred
def draw_result(enn_net):
param_list = get_file_list('params', config.path)
params = pickle.load(open(param_list[-1], 'rb'))
print("use parameter file: {}".format(param_list[-1]))
enn_net.set_parameter(params)
for i, k in enumerate(config.test_ID):
input_ = text.test_data_input_list[i]
target = text.test_data_output_list[i]
raw_data = pd.read_csv("data/{}.csv".format(config.data_list[k-1]))
real_std = raw_data.LOAD.std()
real_mean = raw_data.LOAD.mean()
raw = np.array(raw_data.LOAD)[config.predict_len:-config.predict_len]
pred = predict_full(input_, params=params, model_predict=enn_net)
# 平移,baseline
#pred = np.zeros((config.ne, np.shape(raw)[0], 1))#趋势平移
#pred = np.ones((config.ne, np.shape(raw)[0], 1)) #纯粹平移
# save the result right from the enn net
np.savetxt('result/e{}-p{}-pred_w{}.csv'.format(config.experiment_ID, PATH, k),
np.array(pred)[:, :, 0].T, delimiter=',')
# 加上平均趋势,获得真正的ratio
if grid_data.use_mean_ratio == True:
if grid_data.use_different_mean_ratio == True:
mean_ratio = text.mean_ratio_all_pred[k-1,:]
elif grid_data.use_CV_ratio == True:
mean_ratio = text.mean_ratio_group_pred[config.test_set_ID-1,:]
else:
mean_ratio = text.mean_ratio_all_ave_pred
print('mean_ratio:', mean_ratio)
pred = np.array(pred) + mean_ratio.reshape(-1,1) # 补充上平均趋势,ave表示这是所有区平均后的趋势
#test1 = mean_ratio.reshape(-1,1)
#pred=np.array([list(test1) for i in range(100)])
np.savetxt('result/e{}-p{}-pred_mean_ratio_w{}.csv'.format(config.experiment_ID, PATH, k),
np.array(pred)[:, :, 0].T, delimiter=',')
print('test_pred:', np.shape(pred))
target = target + mean_ratio.reshape(-1,1)
loss = criterion(torch.tensor(pred.mean(0)[:, 0]).float().cpu(), torch.tensor(target[:, 0]).float())
else:
loss = criterion(torch.tensor(pred.mean(0)[:, 0]).float().cpu(), torch.tensor(target[:, 0]).float())
print("ID{}\t test loss: {}".format(k, loss))
mean = grid_data.load_mean[k - 1][0]
std = grid_data.load_std[k - 1][0]
pred_ratio = Regeneralize(np.array(pred[:, :, 0]), mean, std)
pred_real = pred_ratio * raw
#pred_real = pred_ratio
target_ratio = Regeneralize(target, mean, std).reshape(-1,1)
target_real = target_ratio * raw.reshape(-1,1)
#target_real = target_ratio
loss_ratio = criterion(torch.tensor(pred_ratio.mean(0)).float(), torch.tensor(target_ratio[:, 0]).float()) #new
print("ID{}\t ratio loss: {}".format(k, loss_ratio))
#target_real = np.array(raw_data.LOAD)[config.predict_len*2:]
# make a normalization of real load value:
loss_relative = np.mean(np.abs(pred_real.mean(0) - target_real.reshape(-1))/target_real.reshape(-1))
std = 1 * pred_real.std(0)
pred_normalized = (pred_real.mean(0) - real_mean) / real_std
target_normalized = (target_real.reshape(-1) - real_mean) / real_std
print('pred_normalized shape:', np.shape(pred_normalized))
print('target_normalized shape:', np.shape(target_normalized))
loss_real = criterion(Variable(torch.tensor(pred_normalized).float()),
Variable(torch.tensor(target_normalized).float()))
print("ID{}\t relative loss: {}".format(k, loss_relative))
print("ID{}\t real loss: {}".format(k, loss_real))
with open('{}/test_loss_{}.csv'.format(PATH, config.experiment_ID), 'a') as f:
f.write('{},{},{},{},{},{}\n'.format(k, loss, loss_ratio, loss_real, loss_relative, std.mean()))
f.close()
print('std:', std.mean())
x = np.arange(len(target))
np.savetxt('result/e{}-p{}-pred_w{}_real.csv'.format(config.experiment_ID, PATH, k),
np.array(pred_real).T, delimiter=',')
print('flag1')
np.savetxt('result/e{}-p{}-target_w{}_real.csv'.format(config.experiment_ID, PATH, k),
target, delimiter=',')
print('Plotting')
plt.figure(figsize=(100, 5))
plt.plot(target_real, label='target', color='black', alpha=0.4)
plt.errorbar(x, pred_real.mean(0), yerr=std, color='red', alpha=0.7)
plt.title(str(k) + '-' + config.info)
plt.legend()
plt.savefig('{}/ID{}.png'.format(PATH, k))
plt.show()
print('flag2')
def evaluate(enn_net, epoch):
#params = os.path.join(config.path, 'parameters_epoch%d' % epoch)
param_list = get_file_list('params', config.path)
#param_index = int(len(param_list)*epoch/config.epoch)-1
param_index = int(len(param_list)*(epoch+1)/config.epoch)-1
print('total number of saved parameters: %d, using no %d' % (len(param_list), param_index))
params = param_list[param_index]
print("use parameter file: {}".format(params))
params = pickle.load(open(params, 'rb'))
enn_net.set_parameter(params)
for i, k in enumerate(config.test_ID):
input_ = text.test_data_input_list[i] # ->array(data_len * in_dim)
target = text.test_data_output_list[i]# ->array(data_len * 1)
raw_data = pd.read_csv("data/{}.csv".format(config.data_list[k-1]))
real_std = raw_data.LOAD.std()
real_mean = raw_data.LOAD.mean()
raw = np.array(raw_data.LOAD)[config.predict_len:-config.predict_len]
pred = predict_full(input_, params=params, model_predict=enn_net)# ->tensor(ensemble_size*data_len*1)
# 平移,baseline
#pred = np.zeros((config.ne, np.shape(raw)[0], 1))#趋势平移
#pred = np.ones((config.ne, np.shape(raw)[0], 1)) #纯粹平移
# save the result right from the enn net
np.savetxt('result/e{}-epoch{}-pred_w{}.csv'.format(config.experiment_ID, epoch, k),
np.array(pred)[:, :, 0].T, delimiter=',')
# 加上平均趋势,获得真正的ratio
if grid_data.use_mean_ratio == True:
if grid_data.use_different_mean_ratio == True:
mean_ratio = text.mean_ratio_all_pred[k-1,:]
elif grid_data.use_CV_ratio == True:
mean_ratio = text.mean_ratio_group_pred[config.test_set_ID-1,:]
else:
mean_ratio = text.mean_ratio_all_ave_pred
print('mean_ratio:', mean_ratio)
pred = np.array(pred) + mean_ratio.reshape(-1,1) # 补充上平均趋势,ave表示这是所有区平均后的趋势
#test1 = mean_ratio.reshape(-1,1)
#pred=np.array([list(test1) for i in range(100)])
np.savetxt('result/e{}-epoch{}-pred_mean_ratio_w{}.csv'.format(config.experiment_ID, epoch, k),
np.array(pred)[:, :, 0].T, delimiter=',')
print('test_pred:', np.shape(pred))
target = target + mean_ratio.reshape(-1,1)
loss = criterion(torch.tensor(pred.mean(0)[:, 0]).float().cpu(), torch.tensor(target[:, 0]).float())
else:
loss = criterion(torch.tensor(pred.mean(0)[:, 0]).float().cpu(), torch.tensor(target[:, 0]).float())
print("ID{}\t test loss: {}".format(k, loss))
mean = grid_data.load_mean[k - 1][0]
std = grid_data.load_std[k - 1][0]
pred_ratio = Regeneralize(np.array(pred[:, :, 0]), mean, std)
pred_real = pred_ratio * raw
#pred_real = pred_ratio
target_ratio = Regeneralize(target, mean, std).reshape(-1,1)
target_real = target_ratio * raw.reshape(-1,1)
#target_real = target_ratio
loss_ratio = criterion(torch.tensor(pred_ratio.mean(0)).float(), torch.tensor(target_ratio[:, 0]).float()) #new
print("ID{}\t ratio loss: {}".format(k, loss_ratio))
#target_real = np.array(raw_data.LOAD)[config.predict_len*2:]
# make a normalization of real load value:
loss_relative = np.mean(np.abs(pred_real.mean(0) - target_real.reshape(-1))/target_real.reshape(-1))
std = 1 * pred_real.std(0)
pred_normalized = (pred_real.mean(0) - real_mean) / real_std
target_normalized = (target_real.reshape(-1) - real_mean) / real_std
print('pred_normalized shape:', np.shape(pred_normalized))
print('target_normalized shape:', np.shape(target_normalized))
loss_real = criterion(Variable(torch.tensor(pred_normalized).float()),
Variable(torch.tensor(target_normalized).float()))
print("ID{}\t relative loss: {}".format(k, loss_relative))
print("ID{}\t real loss: {}".format(k, loss_real))
f = open(r'{}/epoch{}_test_loss_{}.csv'.format(PATH, epoch, config.experiment_ID), 'a')
f.write('{},{},{},{},{},{}\n'.format(k, loss, loss_ratio, loss_real, loss_relative, std.mean()))
f.close()
print('std:', std.mean())
x = np.arange(len(target))
np.savetxt('result/e{}-epoch{}-pred_w{}_real.csv'.format(config.experiment_ID, epoch, k),
np.array(pred_real).T, delimiter=',')
print('flag1')
np.savetxt('result/e{}-epoch{}-target_w{}_real.csv'.format(config.experiment_ID, epoch, k),
target, delimiter=',')
print('Plotting')
plt.figure(figsize=(100, 5))
plt.plot(target_real, label='target', color='black', alpha=0.4)
plt.errorbar(x, pred_real.mean(0), yerr=std, color='red', alpha=0.7)
plt.title(str(k) + '-' + config.info)
plt.legend()
plt.savefig('{}/ID{}_epoch{}.png'.format(PATH, k, epoch))
plt.show()
print('flag2')
def save_result(enn_net):
test_loss = []
for i, k in enumerate(config.test_ID):
input_ = text.test_data_input_list[i]
target = text.test_data_output_list[i]
pred = predict_full(input_, params=None, model=enn_net)
loss = criterion(pred.cpu(), torch.tensor(target).float())
test_loss.append(loss)
with open(PATH + '/test_loss.txt', 'a') as f:
f.write(time.strftime('%Y%m%d_%H_%M_%S') + ',' + str(test_loss) + ',\n')
f.close()
def run():
with open('{}/time.txt'.format(PATH), 'a') as f:
f.write('{},\n'.format(time.strftime('%Y%m%d_%H_%M_%S')))
f.close()
model = netLSTM()
with torch.no_grad():
model = model.cuda()
net_enn_train = enn.ENN(model, NE)
for epoch in range(config.epoch):
for i, data in enumerate(textLoader):
print('#'*30)
print("{}: batch{}".format(time.strftime('%Y%m%d_%H_%M_%S'), i))
input_, target = data
#input_ = torch.from_numpy(np.stack(list(shrink(input_, 5)), axis=1))
#target = torch.from_numpy(np.stack(list(shrink(target, 5)), axis=1))
with torch.no_grad():
input_, target = map(Variable, (input_.float(), target.float()))
target = target[:, -config.predict_len:, :]
print(target.shape)
target = target.reshape(-1, config.output_dim)
input_ = input_.cuda()
target = target.cuda()
net_enn_train, loss, pred_data = train(net_enn_train, input_, target)
# save pred and target while training
save_dir = os.path.join(PATH, 'predict_history')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_data = {}
save_data['pred'] = np.array(pred_data.mean(0)[:, 0])
save_data['target'] = np.array(np.array(target[:, 0]))
save_data = pd.DataFrame.from_dict(save_data)
save_filename = '{}_{}.csv'.format(epoch, i)
save_data.to_csv(os.path.join(save_dir, save_filename))
"""
with open('predict_history'+'/pred.txt', 'a') as f:
f.write(list_to_csv(np.array(pred_data.mean(0)[:, 0])) + '\n')
f.close()
with open('predict_history'+'/target.txt', 'a') as f:
f.write(list_to_csv(np.array(target[:, 0])) + '\n')
f.close()
"""
with open(PATH+'/time.txt', 'a') as f:
f.write(time.strftime('%Y%m%d_%H_%M_%S') + ',' + str(loss) + ',\n')
f.close()
with torch.no_grad():
params = net_enn_train.get_parameter()
filename = PATH+"/parameters_epoch{}".format(epoch)
save_var(params, filename)
del params
if __name__ == '__main__':
run() # only include the training process based on the netLSTM model.
model = netLSTM_full() # netLSTM and netLSTM_full share the weights and bias. The only difference between these models is the output length.
with torch.no_grad():
model = model.cuda()
net_enn = enn.ENN(model, NE)
#draw_result(net_enn)#
evaluate(net_enn, 4)# count from 0
print(config.test_ID)
|
90077
|
def remove_smallest(n, lst):
if n <= 0:
return lst
elif n > len(lst):
return []
dex = [b[1] for b in sorted((a, i) for i, a in enumerate(lst))[:n]]
return [c for i, c in enumerate(lst) if i not in dex]
|
90102
|
import tensorflow as tf
def complex_to_real(tensor):
"""Returns tensor converted from a complex dtype with shape
(...,) to a real dtype with shape (..., 2), where last index
marks real [0] and imag [1] parts of a complex valued tensor.
Args:
tensor: complex valued tensor of shape (...,).
Returns:
real valued tensor of shape (..., 2)."""
return tf.concat([tf.math.real(tensor)[..., tf.newaxis],
tf.math.imag(tensor)[..., tf.newaxis]], axis=-1)
def real_to_complex(tensor):
"""Returns tensor converted from a real dtype with shape
(..., 2) to complex dtype with shape (...,), where last index
of a real tensor marks real [0] and imag [1]
parts of a complex valued tensor.
Args:
tensor: real valued tensor of shape (..., 2).
Returns:
complex valued tensor of shape (...,)."""
return tf.complex(tensor[..., 0], tensor[..., 1])
|
90104
|
import requests
import re
from urllib import parse
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36",
"referer": "https://tb.alicdn.com/snapshot/index.html",
'cookie': 't=884491259d4aed9aac3cd83e5798c433; cna=UU81Fxb46woCAWUv7c0BLoMd; sgcookie=ERElHyZEXq%2FBxbIAKkMLf; tracknick=%5Cu53F6%5Cu95EE%5Cu8C01%5Cu662F%5Cu8FB0%5Cu5357; _cc_=V32FPkk%2Fhw%3D%3D; enc=UvoaKN2E%2F5qKScgssIA7s34lg2c%2B7mFKY6bD58vrwGvLTZKDyYj7UQ0p3hGnXJK11f8JrZT5ky54YNi0i73Few%3D%3D; tfstk=cIOdBdvB3cmha_TF3QHGFR3VyY-dafFd2ys4w4-E6MTnQmN8NsxviIpfnv_Yv13O.; thw=cn; hng=CN%7Czh-CN%7CCNY%7C156; cookie2=1165897f57a1ed424d42db9d3a99ff7d; v=0; _tb_token_=<PASSWORD>; alitrackid=tb.alicdn.com; lastalitrackid=tb.alicdn.com; JSESSIONID=42FB5C5D5D65C270436BAF43224830CB; isg=BPb2H7f2tUx9pkBnqiw8IaAaRyz4FzpR25dtfWDcO1mro5U9yaZ-YfUau3_PPzJp; l=eBTUSTCcQZnRM5Q_BO5alurza77TaQdf1nVzaNbMiInca6TFta8TVNQqOBKvSdtjgt5j2eKrb3kJjRhM8W4LRjkDBeYBRs5mpfpp8e1..',
}
keyword = input("请输入你要搜索的信息:")
def get_parse(url):
html = requests.get(url,headers= headers)
if html.status_code ==200:
print('页面正常')
get_html(html)
else:
print(html.status_code)
def get_html(html):
#用正则表达式去获取商品的名称,价格,商家名称和商家位置
content = html.text
#定位商品名称
names = re.compile('"raw_title":"(.*?)"', re.I | re.S)
name = names.findall(content)
#定位价格
prices = re.compile('"view_price":"(.*?)"',re.I|re.S)
price = prices.findall(content)
#定位商家名称
nicks = re.compile('"nick":"(.*?)"',re.I|re.S)
nick = nicks.findall(content)
#定位商家位置
item_locs = re.compile('"item_loc":"(.*?)"', re.I | re.S)
item_loc= item_locs.findall(content)
#先算出爬出来正则的长度,从而确定循环,把商品的名称,价格,位置全部有序的全部打印出来
for j in range(len(name)):
print('商品名称:{}\n价格:{}\n商家名称:{}\n商家位置:{}\n'.format(name[j], price[j], nick[j], item_loc[j]))
if __name__ == '__main__':
for i in range(0,45,44):
url = 'https://s.taobao.com/search?q={}&imgfile=&commend=all&ssid=s5-e&' \
'search_type=item&sourceId=tb.index&spm=a21bo.2017.201856-taobao-item.1&' \
'ie=utf8&initiative_id=tbindexz_20170306&bcoffset=1&ntoffset=1&p4ppushleft=2%2C48&s={}'.format(parse.quote(keyword),i)
get_parse(url)
|
90139
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
class GMMLogLoss(nn.Module):
''' compute the GMM loss between model output and the groundtruth data.
Args:
ncenter: numbers of gaussian distribution
ndim: dimension of each gaussian distribution
sigma_bias:
sigma_min: current we do not use it.
'''
def __init__(self, ncenter, ndim, sigma_min=0.03):
super(GMMLogLoss,self).__init__()
self.ncenter = ncenter
self.ndim = ndim
self.sigma_min = sigma_min
def forward(self, output, target):
'''
Args:
output: [b, T, ncenter + ncenter * ndim * 2]:
[:, :, : ncenter] shows each gaussian probability
[:, :, ncenter : ncenter + ndim * ncenter] shows the average values of each dimension of each gaussian
[: ,:, ncenter + ndim * ncenter : ncenter + ndim * 2 * ncenter] show the negative log sigma of each dimension of each gaussian
target: [b, T, ndim], the ground truth target landmark data is shown here
To maximize the log-likelihood equals to minimize the negative log-likelihood.
NOTE: It is unstable to directly compute the log results of sigma, e.g. ln(-0.1) as we need to clip the sigma results
into positive. Hence here we predict the negative log sigma results to avoid numerical instablility, which mean:
`` sigma = 1/exp(predict), predict = -ln(sigma) ``
Also, it will be just the 'B' term below!
Currently we only implement single gaussian distribution, hence the first values of pred are meaningless.
For single gaussian distribution:
L(mu, sigma) = -n/2 * ln(2pi * sigma^2) - 1 / (2 x sigma^2) * sum^n (x_i - mu)^2 (n for prediction times, n=1 for one frame, x_i for gt)
= -1/2 * ln(2pi) - 1/2 * ln(sigma^2) - 1/(2 x sigma^2) * (x - mu)^2
== min -L(mu, sgima) = 0.5 x ln(2pi) + 0.5 x ln(sigma^2) + 1/(2 x sigma^2) * (x - mu)^2
= 0.5 x ln_2PI + ln(sigma) + 0.5 x (MU_DIFF/sigma)^2
= A - B + C
In batch and Time sample, b and T are summed and averaged.
'''
b, T, _ = target.shape
# read prediction paras
mus = output[:, :, self.ncenter : (self.ncenter + self.ncenter * self.ndim)].view(b, T, self.ncenter, self.ndim) # [b, T, ncenter, ndim]
# apply min sigma
neg_log_sigmas_out = output[:, :, (self.ncenter + self.ncenter * self.ndim):].view(b, T, self.ncenter, self.ndim) # [b, T, ncenter, ndim]
inv_sigmas_min = torch.ones(neg_log_sigmas_out.size()).cuda() * (1. / self.sigma_min)
inv_sigmas_min_log = torch.log(inv_sigmas_min)
neg_log_sigmas = torch.min(neg_log_sigmas_out, inv_sigmas_min_log)
inv_sigmas = torch.exp(neg_log_sigmas)
# replicate the target of ncenter to minus mu
target_rep = target.unsqueeze(2).expand(b, T, self.ncenter, self.ndim) # [b, T, ncenter, ndim]
MU_DIFF = target_rep - mus # [b, T, ncenter, ndim]
# sigma process
A = 0.5 * math.log(2 * math.pi) # 0.9189385332046727
B = neg_log_sigmas # [b, T, ncenter, ndim]
C = 0.5 * (MU_DIFF * inv_sigmas)**2 # [b, T, ncenter, ndim]
negative_loglikelihood = A - B + C # [b, T, ncenter, ndim]
return negative_loglikelihood.mean()
def Sample_GMM(gmm_params, ncenter, ndim, weight_smooth = 0.0, sigma_scale = 0.0):
''' Sample values from a given a GMM distribution.
Args:
gmm_params: [b, target_length, (2 * ndim + 1) * ncenter], including the
distribution weights, average and sigma
ncenter: numbers of gaussian distribution
ndim: dimension of each gaussian distribution
weight_smooth: float, smooth the gaussian distribution weights
sigma_scale: float, adjust the gaussian scale, larger for sharper prediction,
0 for zero sigma which always return average values
Returns:
current_sample: []
'''
# reshape as [b*T, (2 * ndim + 1) * ncenter]
b, T, _ = gmm_params.shape
gmm_params_cpu = gmm_params.cpu().view(-1, (2 * ndim + 1) * ncenter)
# compute each distrubution probability
prob = nn.functional.softmax(gmm_params_cpu[:, : ncenter] * (1 + weight_smooth), dim=1)
# select the gaussian distribution according to their weights
selected_idx = torch.multinomial(prob, num_samples=1, replacement=True)
mu = gmm_params_cpu[:, ncenter : ncenter + ncenter * ndim]
# please note that we use -logsigma as output, hence here we need to take the negative
sigma = torch.exp(-gmm_params_cpu[:, ncenter + ncenter * ndim:]) * sigma_scale
# print('sigma average:', sigma.mean())
selected_sigma = torch.empty(b*T, ndim).float()
selected_mu = torch.empty(b*T, ndim).float()
current_sample = torch.randn(b*T, ndim).float()
# current_sample = test_sample
for i in range(b*T):
idx = selected_idx[i, 0]
selected_sigma[i, :] = sigma[i, idx * ndim:(idx + 1) * ndim]
selected_mu[i, :] = mu[i, idx * ndim:(idx + 1) * ndim]
# sample with sel sigma and sel mean
current_sample = current_sample * selected_sigma + selected_mu
# cur_sample = sel_mu
# return current_sample.unsqueeze(1).cuda()
return current_sample.reshape(b, T, -1).cuda()
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
gpu_id = input.get_device()
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).cuda(gpu_id).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).cuda(gpu_id).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real)
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real)
return self.loss(input[-1], target_tensor)
class VGGLoss(nn.Module):
def __init__(self, model=None):
super(VGGLoss, self).__init__()
if model is None:
self.vgg = Vgg19()
else:
self.vgg = model
self.vgg.cuda()
# self.vgg.eval()
self.criterion = nn.L1Loss()
self.style_criterion = StyleLoss()
self.weights = [1.0, 1.0, 1.0, 1.0, 1.0]
self.style_weights = [1.0, 1.0, 1.0, 1.0, 1.0]
# self.weights = [5.0, 1.0, 0.5, 0.4, 0.8]
# self.style_weights = [10e4, 1000, 50, 15, 50]
def forward(self, x, y, style=False):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
if style:
# return both perceptual loss and style loss.
style_loss = 0
for i in range(len(x_vgg)):
this_loss = (self.weights[i] *
self.criterion(x_vgg[i], y_vgg[i].detach()))
this_style_loss = (self.style_weights[i] *
self.style_criterion(x_vgg[i], y_vgg[i].detach()))
loss += this_loss
style_loss += this_style_loss
return loss, style_loss
for i in range(len(x_vgg)):
this_loss = (self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()))
loss += this_loss
return loss
def gram_matrix(input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self):
super(StyleLoss, self).__init__()
def forward(self, x, y):
Gx = gram_matrix(x)
Gy = gram_matrix(y)
return F.mse_loss(Gx, Gy) * 30000000
class MaskedL1Loss(nn.Module):
def __init__(self):
super(MaskedL1Loss, self).__init__()
self.criterion = nn.L1Loss()
def forward(self, input, target, mask):
mask = mask.expand(-1, input.size()[1], -1, -1)
loss = self.criterion(input * mask, target * mask)
return loss
from torchvision import models
class Vgg19(nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
|
90146
|
from tornado.web import StaticFileHandler
from cloudtunes import settings
from cloudtunes.base.handlers import BaseHandler
class MainHandler(BaseHandler):
def get(self):
webapp_dir = self.settings['static_path']
homepage_dir = settings.HOMEPAGE_SITE_DIR
if self.current_user:
app_dir = webapp_dir
else:
if self.request.path != '/':
return self.redirect('/')
app_dir = homepage_dir
with open(app_dir + '/index.html') as f:
self.write(f.read())
class NoCacheStaticFileHandler(StaticFileHandler):
def set_extra_headers(self, path):
self.set_header('Cache-control', 'no-cache')
|
90167
|
from .base import *
from .bitfinex import *
from .bitstamp import *
from .buda import *
from .kraken import *
|
90177
|
import logging
import os
import ray
import time
from enum import Enum
from ray.actor import ActorHandle
from ray.streaming.generated import remote_call_pb2
from ray.streaming.runtime.command\
import WorkerCommitReport, WorkerRollbackRequest
logger = logging.getLogger(__name__)
class CallResult:
"""
Call Result
"""
def __init__(self, success, result_code, result_msg, result_obj):
self.success = success
self.result_code = result_code
self.result_msg = result_msg
self.result_obj = result_obj
@staticmethod
def success(payload=None):
return CallResult(True, CallResultEnum.SUCCESS, None, payload)
@staticmethod
def fail(payload=None):
return CallResult(False, CallResultEnum.FAILED, None, payload)
@staticmethod
def skipped(msg=None):
return CallResult(True, CallResultEnum.SKIPPED, msg, None)
def is_success(self):
if self.result_code is CallResultEnum.SUCCESS:
return True
return False
class CallResultEnum(Enum):
"""
call result enum
"""
SUCCESS = 0
FAILED = 1
SKIPPED = 2
class RemoteCallMst:
"""
remote call job master
"""
@staticmethod
def request_job_worker_rollback(master: ActorHandle,
request: WorkerRollbackRequest):
logger.info("Remote call mst: request job worker rollback start.")
request_pb = remote_call_pb2.BaseWorkerCmd()
request_pb.actor_id = request.from_actor_id
request_pb.timestamp = int(time.time() * 1000.0)
rollback_request_pb = remote_call_pb2.WorkerRollbackRequest()
rollback_request_pb.exception_msg = request.exception_msg()
rollback_request_pb.worker_hostname = os.uname()[1]
rollback_request_pb.worker_pid = str(os.getpid())
request_pb.detail.Pack(rollback_request_pb)
return_ids = master.requestJobWorkerRollback\
.remote(request_pb.SerializeToString())
result = remote_call_pb2.BoolResult()
result.ParseFromString(ray.get(return_ids))
logger.info("Remote call mst: request job worker rollback finish.")
return result.boolRes
@staticmethod
def report_job_worker_commit(master: ActorHandle,
report: WorkerCommitReport):
logger.info("Remote call mst: report job worker commit start.")
report_pb = remote_call_pb2.BaseWorkerCmd()
report_pb.actor_id = report.from_actor_id
report_pb.timestamp = int(time.time() * 1000.0)
wk_commit = remote_call_pb2.WorkerCommitReport()
wk_commit.commit_checkpoint_id = report.commit_checkpoint_id
report_pb.detail.Pack(wk_commit)
return_id = master.reportJobWorkerCommit\
.remote(report_pb.SerializeToString())
result = remote_call_pb2.BoolResult()
result.ParseFromString(ray.get(return_id))
logger.info("Remote call mst: report job worker commit finish.")
return result.boolRes
|
90184
|
from setuptools import find_packages, setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
long_description = open('README.md').read()
setup(
use_scm_version=True,
setup_requires=['setuptools_scm'],
name='django-super-deduper',
description='Utilities for deduping Django model instances',
url='https://github.com/mighty-justice/django-super-deduper',
long_description=long_description,
classifiers=[
'Framework :: Django :: 1.11',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=['django>=1.11'],
test_suite='tests',
)
|
90217
|
import sys
import storage
import json
import texttable as tt
protocol = "https"
nbmaster = ""
username = ""
password = ""
domainname = ""
domaintype = ""
port = 1556
def print_usage():
print("Example:")
print(
"python -W ignore get_storage_server_by_id.py -nbmaster <master_server> -username <username> -password <password> -stsid <storage server id>[-domainname <domain_name>] [-domaintype <domain_type>]\n\n\n")
def read_command_line_arguments():
if len(sys.argv) % 2 == 0:
print_usage()
exit()
global nbmaster
global username
global password
global domainname
global domaintype
global stsid
for i in range(1, len(sys.argv), 2):
if sys.argv[i] == "-nbmaster":
nbmaster = sys.argv[i + 1]
elif sys.argv[i] == "-username":
username = sys.argv[i + 1]
elif sys.argv[i] == "-password":
password = sys.argv[i + 1]
elif sys.argv[i] == "-stsid":
stsid = sys.argv[i + 1]
elif sys.argv[i] == "-domainname":
domainname = sys.argv[i + 1]
elif sys.argv[i] == "-domaintype":
domaintype = sys.argv[i + 1]
else:
print_usage()
exit()
if nbmaster == "":
print("Please provide the value for 'nbmaster'")
exit()
elif username == "":
print("Please provide the value for 'username'")
exit()
elif password == "":
print("Please provide the value for 'password'")
elif stsid == "":
print("Please provide the value for 'stsid'")
exit()
print_usage()
read_command_line_arguments()
base_url = protocol + "://" + nbmaster + ":" + str(port) + "/netbackup"
jwt = storage.perform_login(username, password, base_url, domainname, domaintype)
jobs = storage.get_storage_server_by_id(jwt, base_url, stsid)
print(jobs)
|
90285
|
import numpy as np
from gym import utils, spaces
from gym.envs.mujoco import mujoco_env
from gym.envs.robotics.rotations import quat2euler, euler2quat, mat2euler
import os
# import random
from random import uniform, randint, randrange
from mjremote import mjremote
import time
from doorenv2.envs.doorenv import DoorEnv
class DoorEnvBlueV1(DoorEnv, utils.EzPickle):
def __init__(self,
port=1050,
unity=False,visionnet_input=False,
world_path='/home/demo/DoorGym/world_generator/world/pull_floatinghook',
pos_control=False,
ik_control=False
):
super().__init__(
port=port,
unity=unity,
visionnet_input=visionnet_input,
world_path=world_path,
pos_control=pos_control,
)
utils.EzPickle.__init__(self)
def gripper_action_gen(self, a):
self.gripper_action = np.array([a[-1],-a[-1],a[-1],-a[-1]])
return np.concatenate((a,self.gripper_action))
def randomized_property(self):
self.model.body_mass[10:16] = self.sample_gaussiannormal(self.model_origin.body_mass[10:16], 0.2) # gaussiannormal x original_mass
self.model.dof_damping[0:10] = self.sample_gaussiannormal(self.model_origin.dof_damping[0:10], 0.2) # gaussiannormal x original_damping
self.model.actuator_gainprm[:,0] = self.sample_gaussiannormal(self.model_origin.actuator_gainprm[:,0], 0.1) # gaussiannormal x original_damping
def _reset_model(self, gg=2, hooked=False, untucked=False):
qpos = self.init_qpos
if self.xml_path.find("float")>-1:
qpos = self.np_random.uniform(low=-0.3, high=0.3, size=self.model.nq) + self.init_qpos
if self.xml_path.find("hook")>-1:
qpos[self.nn-1] = np.random.uniform(0.0,3.13)
if self.xml_path.find("gripper")>-1:
qpos[self.nn-2] = np.random.uniform(0.0,3.13)
elif self.xml_path.find("mobile")>-1:
qpos[0] = 0.0 + uniform(-0.0, 0.0) # x_slider
qpos[1] = 0.0 + uniform(-0.0, -0.0) # y_slider
qpos[2] = 0.0 + uniform(-2.3412, 3.3999) # base_roll_joint
qpos[3] = 0.0 + uniform(-2.2944, 0) # shoulder_lift_joint
qpos[4] = 0.0 + uniform(-2.6761, 2.6761) # shoulder_roll_joint
qpos[5] = 1.0 + uniform(-2.2944, 0) # elbow_lift_joint
qpos[6] = 0.0 + uniform(-2.6761, 2.6761) # elbow_roll_joint
qpos[7] = 1.0 + uniform(-2.2944, 0) # wrist_lift_joint
qpos[8] = 0.0 + uniform(-2.6761, 2.6761) # wrist_roll_joint
else:
qpos = self.init_qpos
qpos[0] = 0.0 + uniform(-0.1, 0.1) # base_roll_joint
qpos[1] = 0.0 + uniform(-0.1, 0.1) # shoulder_lift_joint
qpos[2] = 0.0 + uniform(-0.1, 0.1) # shoulder_roll_joint
qpos[3] = 0.0 + uniform(-0.1, 0.1) # elbow_lift_joint
qpos[4] = 0.0 + uniform(-0.1, 0.1) # elbow_roll_joint
qpos[5] = 0.0 + uniform(-0.1, 0.1) # wrist_lift_joint
qpos[6] = 0.0 + uniform(-0.1, 0.1) # wrist_roll_joint
if self.xml_path.find("pull")>-1:
self.goal = self.np_random.uniform(low=-.15, high=.15, size=gg)
if self.xml_path.find("lefthinge")>-1:
self.goal[0] = np.random.uniform(-0.15,0.05)
self.goal[1] = np.random.uniform(-0.15,0.15)
else:
self.goal[0] = np.random.uniform(-0.05,0.15)
self.goal[1] = np.random.uniform(-0.15,0.15)
else:
self.goal = np.zeros(gg)
self.goal[0] = np.random.uniform(-0.15,0.15)
qpos[self.nn:-gg] = 0
qpos[-gg:] = self.goal
# qvel = self.init_qvel
# self.set_state(qpos, qvel)
if hooked:
if self.xml_path.find("float")>-1:
robot_origin = np.array([1.0, 0, 1.2])
if self.xml_path.find("lever")>-1:
goal_in_xyz = self.sim.data.get_geom_xpos("door_knob_4") - robot_origin
offset_to_hook = np.array([0.13,0.0,0.0])
elif self.xml_path.find("round")>-1:
goal_in_xyz = self.sim.data.get_geom_xpos("door_knob_2") - robot_origin
offset_to_hook = np.array([0.0,0.0,0.0])
elif self.xml_path.find("pull")>-1:
goal_in_xyz = self.sim.data.get_geom_xpos("door_knob_7") - robot_origin
offset_to_hook = np.array([0.13,0.0,0.0])
else:
assert "not sure about the door knob type"
if self.xml_path.find("hook")>-1:
offset_to_hook_randomness = np.array([np.random.uniform(-0.01,0.01), np.random.uniform(-0.005,0.005), np.random.uniform(-0.06,0.06)])
hand_init_pos_3D = goal_in_xyz + offset_to_hook + offset_to_hook_randomness
hand_ori_random = self.np_random.uniform(low=-0.05, high=0.05, size=3)
wrist_dir_chance = np.random.randint(100)
if wrist_dir_chance>=50:
hand_ori_random[-1] = np.random.uniform(0.0,0.4)
else:
hand_ori_random[-1] = np.random.uniform(2.74,3.14)
qpos[:self.nn] = np.concatenate((hand_init_pos_3D,hand_ori_random))
if self.xml_path.find("gripper")>-1:
offset_to_hook_randomness = np.array([0.0, 0.0, np.random.uniform(-0.06,0.06)])
hand_init_pos_3D = goal_in_xyz + offset_to_hook + offset_to_hook_randomness
hand_ori_random = self.np_random.uniform(low=-0.01, high=0.01, size=3)
wrist_dir_chance = np.random.randint(100)
if wrist_dir_chance>=50:
hand_ori_random[-1] = np.random.uniform(0.0,0.01)
else:
hand_ori_random[-1] = np.random.uniform(3.13,3.14)
qpos[:self.nn-1] = np.concatenate((hand_init_pos_3D,hand_ori_random))
qpos[0] -= 0.02
qpos[self.nn: self.nn+4] = np.array([1.0,-1.0,1.0,-1.0])
qvel = self.init_qvel
self.set_state(qpos, qvel)
if self.unity:
self.remote.setqpos(self.sim.data.qpos)
return self._get_obs()
def get_robot_joints(self):
return np.concatenate([
self.sim.data.qpos.flat[:self.nn],
self.sim.data.qvel.flat[:self.nn]])
def get_finger_target(self):
if self.xml_path.find("hook")>-1:
return self.sim.data.get_geom_xpos("hookfinger_2")
elif self.xml_path.find("gripper")>-1:
return (self.sim.data.get_geom_xpos("fingerleft2") \
+ self.sim.data.get_geom_xpos("fingerright2"))/2.0
else:
assert "not sure about the end-effector type"
def get_finger_ori(self):
if self.xml_path.find("hook")>-1:
return quat2euler(self.sim.data.get_body_xquat("robotfinger_hook_target"))
elif self.xml_path.find("gripper")>-1:
return quat2euler(self.sim.data.get_body_xquat("robotwrist_rolllink"))
else:
assert "not sure about the end-effector type"
def get_finger_quat(self):
if self.xml_path.find("hook")>-1:
return self.sim.data.get_body_xquat("robotfinger_hook_target")
elif self.xml_path.find("gripper")>-1:
return self.sim.data.get_body_xquat("robotwrist_rolllink")
else:
assert "not sure about the end-effector type"
class DoorEnvBlueV2(DoorEnv, utils.EzPickle):
def __init__(self,
port=1050,
unity=False,
visionnet_input=False,
vision_obs=False,
world_path='/home/demo/DoorGym/world_generator/world/pull_floatinghook',
pos_control=False,
ik_control=False,
imgsize_h=640,
imgsize_w=640
):
# print("1st passed", imgsize_h)
super().__init__(
port=port,
unity=unity,
visionnet_input=visionnet_input,
vision_obs = vision_obs,
world_path=world_path,
pos_control=pos_control,
ik_control=ik_control,
imgsize_h=imgsize_h,
imgsize_w=imgsize_w
)
utils.EzPickle.__init__(self)
def gripper_action_gen(self, a):
self.gripper_action = np.array([a[-1],-a[-1],a[-1],-a[-1]])
return np.concatenate((a,self.gripper_action))
def physics_randomization(self):
self.model.body_mass[1:18] = self.sample_gaussiannormal(self.model_origin.body_mass[1:18], 0.2) # gaussiannormal x original_mass
self.model.dof_damping[0:12] = self.sample_gaussiannormal(self.model_origin.dof_damping[0:12], 0.2) # gaussiannormal x original_damping
self.model.actuator_gainprm[:,0] = self.sample_gaussiannormal(self.model_origin.actuator_gainprm[:,0], 0.1) # gaussiannormal x original_damping
def set_base_pos(self, pos_list=[0.6, 0.35, 0.7]):
for i,x in enumerate(pos_list):
self.model.body_pos[1,i] = x
# def color_randomization(self):
# import pprint as pp
# import sys
# pp.pprint(dir(self.model), width=1)
# print(">>>>>before>>>>>>>")
# pp.pprint(self.model.geom_rgba)
# geom_n = self.model.geom_rgba.shape[0]
# geom_rgba = []
# for i in range(geom_n):
# geom_rgba.append([randrange(1,100)/100.0, randrange(1,100)/100.0, randrange(1,100)/100.0, 1.0])
# self.model.geom_rgba[:,:] = np.array(geom_rgba)
# self.model.cam_quat[:,:] = np.array(euler2quat(cam_ori))
# self.model.cam_fovy[:] = np.array(cam_fovy)
# print(">>>>>after>>>>>>>")
# pp.pprint(self.model.geom_rgba)
# pp.pprint(self.model.cam_quat)
# pp.pprint(self.model.cam_fovy)
# sys.exit(1)
def _reset_model(self, gg=2, hooked=False, untucked=False):
def randomize():
qpos = self.init_qpos
# qpos[0] = uniform(-3.3999, 2.3412) # base_roll_joint
# qpos[1] = uniform(-2.2944, 0) # shoulder_lift_joint
# qpos[2] = uniform(-2.6761, 2.6761) # shoulder_roll_joint
# qpos[3] = uniform(-2.2944, 0) # elbow_lift_joint
# qpos[4] = uniform(-2.6761, 2.6761) # elbow_roll_joint
# qpos[5] = uniform(-2.2944, 0) # wrist_lift_joint
# qpos[6] = uniform(-2.6761, 2.676) # wrist_roll_joint
qpos[0] = 0.0 + uniform(-0.1, 0.1) # base_roll_joint
qpos[1] = -2.310 + uniform(-0.0, 0.1) # shoulder_lift_joint
qpos[2] = 1.571 + uniform(-0.1, 0.1) # shoulder_roll_joint
qpos[3] = -0.750 + uniform(-0.1, 0.1) # elbow_lift_joint
qpos[4] = -1.571 + uniform(-0.1, 0.1) # elbow_roll_joint
qpos[5] = 0.0 + uniform(-0.1, 0.1) # wrist_lift_joint
qpos[6] = 0.0 + uniform(-0.1, 0.1) # wrist_roll_joint
if self.xml_path.find("pull")>-1:
self.goal = self.np_random.uniform(low=-.15, high=.15, size=gg)
if self.xml_path.find("lefthinge")>-1:
self.goal[0] = np.random.uniform(-0.15,0.05)
self.goal[1] = np.random.uniform(-0.15,0.15)
else:
self.goal[0] = np.random.uniform(-0.05,0.15)
self.goal[1] = np.random.uniform(-0.15,0.15)
else:
self.goal = np.zeros(gg)
self.goal[0] = np.random.uniform(-0.15,0.15)
qpos[self.nn:-gg] = 0
qpos[-gg:] = self.goal
qvel = self.init_qvel
self.set_state(qpos, qvel)
collision = True
while collision:
# print("collision found! Count: ", self.sim.data.ncon)
randomize()
collision = self.sim.data.ncon > 0
# import pprint as pp
# pp.pprint(dir(env.env.sim.data))
# print("final collision count: ", self.sim.data.ncon)
# import sys
# sys.exit(1)
if self.unity:
self.remote.setqpos(self.sim.data.qpos)
return self._get_obs()
def get_robot_joints(self):
if self.ik_control:
return np.concatenate([
self.get_finger_target(),
self.get_finger_quat(),
self.get_gripper_pos(),
self.get_finger_vel(),
self.get_finger_angvel(),
])
else:
return np.concatenate([
self.sim.data.qpos.flat[:self.nn],
self.sim.data.qvel.flat[:self.nn]
])
def get_finger_target(self):
return (self.sim.data.get_geom_xpos("fingerleft2") \
+ self.sim.data.get_geom_xpos("fingerright2"))/2.0
def get_base_pos(self):
return self.sim.data.get_body_xpos("robotbase_link")
def get_finger_ori(self):
return quat2euler(self.sim.data.get_body_xquat("robotwrist_rolllink"))
def get_finger_quat(self):
return self.sim.data.get_body_xquat("robotwrist_rolllink")
def get_finger_vel(self):
return self.sim.data.get_body_xvelp("robotwrist_rolllink")
def get_finger_angvel(self):
return self.sim.data.get_body_xvelr("robotwrist_rolllink")
def get_gripper_pos(self):
return np.array([self.sim.data.get_joint_qpos("right_gripper_joint")])
|
90306
|
from decimal import Decimal
from typing import Iterable, Optional, TypeVar
from stock_indicators._cslib import CsIndicator
from stock_indicators._cstypes import List as CsList
from stock_indicators._cstypes import Decimal as CsDecimal
from stock_indicators._cstypes import to_pydecimal
from stock_indicators.indicators.common.helpers import RemoveWarmupMixin
from stock_indicators.indicators.common.results import IndicatorResults, ResultBase
from stock_indicators.indicators.common.quote import Quote
def get_fcb(quotes: Iterable[Quote], window_span: int = 2):
"""Get FCB calculated.
Fractal Chaos Bands (FCB) outline high and low price channels
to depict broad less-chaotic price movements.
FCB is a channelized depiction of Williams Fractals.
Parameters:
`quotes` : Iterable[Quote]
Historical price quotes.
`window_span` : int, defaults 2
Number of span periods in the evaluation window.
Returns:
`FCBResults[FCBResult]`
FCBResults is list of FCBResult with providing useful helper methods.
See more:
- [FCB Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/Fcb/#content)
- [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content)
"""
results = CsIndicator.GetFcb[Quote](CsList(Quote, quotes), window_span)
return FCBResults(results, FCBResult)
class FCBResult(ResultBase):
"""
A wrapper class for a single unit of Fractal Chaos Bands (FCB) results.
"""
@property
def upper_band(self) -> Optional[Decimal]:
return to_pydecimal(self._csdata.UpperBand)
@upper_band.setter
def upper_band(self, value):
self._csdata.UpperBand = CsDecimal(value)
@property
def lower_band(self) -> Optional[Decimal]:
return to_pydecimal(self._csdata.LowerBand)
@lower_band.setter
def lower_band(self, value):
self._csdata.LowerBand = CsDecimal(value)
_T = TypeVar("_T", bound=FCBResult)
class FCBResults(RemoveWarmupMixin, IndicatorResults[_T]):
"""
A wrapper class for the list of Fractal Chaos Bands (FCB) results.
It is exactly same with built-in `list` except for that it provides
some useful helper methods written in CSharp implementation.
"""
|
90353
|
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader
from allennlp.data.tokenizers import WordTokenizer
from allennlp.models import Model
from allennlp.service.predictors.predictor import Predictor
from overrides import overrides
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from propara.data.propara_dataset_reader import Action
from propara.trainer_decoder.action_scorer import ActionScorerDummy
@Predictor.register('prostruct_prediction')
class ProStructPredictor(Predictor):
"""
Wrapper for the :class:`processes.models.ProStructModel` model.
This is used at prediction time, including on the demo when invoking the following command:
# demo command:
python -m allennlp.service.server_simple
--archive-path /tmp/xtiny/model.tar.gz
--predictor prostruct_prediction
--include-package processes
--static-dir demo/propara_demo
"""
def __init__(self,
model: Model,
dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
self.tokenizer = WordTokenizer(word_splitter=SpacyWordSplitter(pos_tags=True))
@overrides
def predict_json(self, inputs: JsonDict, cuda_device: int = -1) -> JsonDict:
# read one json instance from prostruct
# sentence_texts: List[str]
# participants: List[str],
# states: List[List[str]], where states[i][j] is ith participant at time j
# Para id is useful for decoder trainer. As we won't call it at prediction time,
# we make this optional.
para_id = inputs.get("para_id", -1)
sentence_texts = inputs["sentence_texts"]
sentence_texts = sentence_texts if "\n" not in sentence_texts else [s for s in sentence_texts.split("\n")]
participants = inputs["participants"]
if not participants:
participants = [p for p in self.helper.participants_from_sentences(sentence_texts)]
# Participants can be separated in many different ways
# (A participant can contain comma and in those cases we separate by "\n" or "\t").
# Do this only when participants is not already a list (demo passes a string).
if isinstance(participants, str):
if "\n" in participants:
separator = "\n"
elif "\t" in participants:
separator = "\t"
else:
separator = ","
participants = [p.strip() for p in participants.split(separator)]
participants = participants if "," not in participants else [p.strip() for p in participants.split(",")]
states = inputs.get("states", None)
# Can be used in demo (eventually the demo would control more parameters such as which commonsense etc).
top_k_sequences = inputs.get("top_k_sequences", 2)
print(f"Predictor gets input: ", inputs)
print(f"Predictor formats inputs =\n{para_id},\n{sentence_texts}\n{participants}")
instance = self._dataset_reader.text_to_instance(para_id=para_id,
sentence_texts=sentence_texts,
participants=list(participants),
states=states,
filename="test"
# rules_activated="0,0,0,0"
) # convert from set
# Can we update instance based on self.proparaDecoderStep.update_rules()
old_action_scorer = self._model.decoder_step.get_action_scorer()
old_valid_action_gen = self._model.decoder_step.get_valid_action_generator()
rules_used_original = self._model.decoder_step.get_valid_action_generator().get_rules_used()
dont_use_kb = "dont_use_kb" in inputs and inputs["dont_use_kb"]
if dont_use_kb:
self._model.decoder_step.change_action_scorer(ActionScorerDummy())
rules_changed = "rules_used" in inputs and inputs["rules_used"] is not None
if rules_changed:
updated_rules = [True if int(rule_val.strip()) > 0 else False
for rule_val in inputs["rules_used"].split(",")]
self._model.decoder_step.get_valid_action_generator().set_rules_used(updated_rules)
outputs = self._model.forward_on_instance(instance)
# Reset to original settings.
if dont_use_kb:
self._model.decoder_step.change_action_scorer(old_action_scorer)
if rules_changed:
self._model.decoder_step.change_valid_action_generator(old_valid_action_gen)
json_outputs = ProStructPredictor.to_json(
outputs,
participants,
top_k_sequences
)
json_outputs["default_kb_used"] = self._model.decoder_step.get_action_scorer().name
json_outputs["default_rules_used"] = rules_used_original
json_outputs['predicted_locations'] = self.predict_locations(outputs, sentence_texts, participants)
settings_used = ""
if rules_changed or dont_use_kb:
settings_used = f"rules used: {inputs.get('rules_used', '')} and using {'no kb' if dont_use_kb else 'kb'}"
json_outputs['settings_used'] = settings_used
json_outputs["sentences"] = sentence_texts
return {**inputs, **json_outputs}
def predict_locations(self,
outputs,
sentence_texts, # sent
participants): # parti
loc_per_sent_per_parti = []
if 'location_span_after' not in outputs:
return loc_per_sent_per_parti
locs = outputs['location_span_after'][0] # Shape: (bs=1 always) sent x parti x span_start_end
best_action_seq = outputs['best_final_states'][0][0].action_history[0] # step x participant_labels
for sent_id, sent in enumerate(sentence_texts):
loc_per_parti = []
for parti_id, parti in enumerate(participants):
start = locs[sent_id][parti_id][0].data[0]
end = locs[sent_id][parti_id][1].data[0] # Inclusive
# if action = create/move then output loc
# if action = destroy then output '-'
# if action = none then output '?'
curr_action = best_action_seq[sent_id][parti_id]
loc_per_parti.append(
self.span_from_sent(sent, start, end) if (curr_action == Action.CREATE.value or curr_action == Action.MOVE.value)
else (
'-' if curr_action == Action.DESTROY.value else '?'
)
)
loc_per_sent_per_parti.append(loc_per_parti)
return loc_per_sent_per_parti
def span_from_sent(self, sent, start, end):
sent_tokens = self.tokenizer.tokenize(sent)
return ' '.join([s.text for s in sent_tokens[start:(end+1)]]) if 0 <= start <= end < len(sent_tokens) else "?"
@classmethod
def to_json(cls, outputs, participants, top_k_sequences):
"""
The predictor is expected to format outputs as json (simplifies rendering to UI server)
Example of an "outputs" supplied to this function:
For one instance with (4 participants 4 actions 10 steps),
output from predictor looks like:
{
0: [ # 0 indicates first instance in the batch, we have a fixed batch size of 1.
[
[(2, 1, 2, 0), (0, 1, 0, 1), ... (0, 0, 3, 0)] # top 1st; 10 = number of steps.
] - 21.939434051513672, [
[(2, 0, 2, 1), (0, 1, 0, 1), ... (0, 0, 3, 0)] # top 2nd
] - 21.940093994140625, [
[(2, 1, 2, 0), (0, 1, 0, 1), ... (0, 0, 3, 0)] # top 3rd
...
]
[(2, 0, 2, 1), (0, 1, 0, 1), ... (0, 0, 0, 0)] # top 10th
] - 21.94779396057129
]
}
"""
json = {}
if outputs is not None:
for rank, o in enumerate(outputs['best_final_states'][0][:top_k_sequences]):
curr_top_rank = "top" + str(rank + 1) # top1, top2...
o_json = o.to_json()
# o_json: {'action_history': [(0, 2, 0, 0, 2), ..., (0, 0, 0, 0, 0)], 'score': -33.67}
# step: (0, 2, 0, 0, 2)
# Replace index to Action (ignore zeros/NONE)
json[curr_top_rank + "_labels"] = [[str(Action(int(label)).name).replace("NONE", "")
for label in step]
for step in o_json['action_history']
]
json[curr_top_rank + "_original"] = o_json['action_history']
json[curr_top_rank + "_score"] = o_json['score']
else:
json["error_message"] = "No output predicted probably because " \
"input participants is not found in paragraph."
json["participants"] = participants
return json
|
90365
|
from enum import IntEnum
class RedisStatus(IntEnum):
"""Connection status for the redis client."""
NONE = 0
CONNECTED = 1
AUTH_ERROR = 2
CONN_ERROR = 3
class RedisEvent(IntEnum):
"""Redis client events."""
CONNECT_BEGIN = 1
CONNECT_SUCCESS = 2
CONNECT_FAIL = 3
KEY_ADDED_TO_CACHE = 4
KEY_FOUND_IN_CACHE = 5
FAILED_TO_CACHE_KEY = 6
|
90377
|
from common_fixtures import * # NOQA
import websocket as ws
import base64
import pytest
VOLUME_DRIVER = "rancher-longhorn"
STACK_NAME_PREFIX = "volume-"
CONTROLLER = "controller"
REPLICA = "replica"
def test_container_with_volume_execute(client, test_name):
volume_name = 'vol' + test_name
cleanup_items = []
cleanup_vols = []
try:
c = client.create_container(
name=test_name,
imageUuid=TEST_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
dataVolumes=[volume_name + ":/vol"],
volumeDriver=VOLUME_DRIVER,
attachStdin=True,
attachStdout=True,
tty=True,
command='/bin/bash')
cleanup_items.append(c)
container = client.wait_success(c, timeout=120)
vols = client.list_volume(name=volume_name)
assert len(vols) == 1
cleanup_vols.append(vols[0])
test_msg = 'EXEC_WORKS'
assert_execute(container, test_msg)
finally:
delete_all(client, cleanup_items)
for volume in cleanup_vols:
volume = client.wait_success(client.delete(volume))
assert volume.state == "removed"
volume = client.wait_success(volume.purge())
assert volume.state == "purged"
def test_container_migrate_volume(client, test_name):
volume_name = 'vol' + test_name
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 2
test_msg = 'EXEC_WORKS'
cleanup_items = []
cleanup_vols = []
try:
c1 = client.create_container(
name=test_name,
imageUuid=TEST_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
dataVolumes=[volume_name + ":/vol"],
volumeDriver=VOLUME_DRIVER,
requestedHostId=hosts[0].id,
attachStdin=True,
attachStdout=True,
tty=True,
command='/bin/bash')
cleanup_items.append(c1)
container = client.wait_success(c1, timeout=120)
vols = client.list_volume(name=volume_name)
assert len(vols) == 1
cleanup_vols.append(vols[0])
assert_execute(container, test_msg)
client.wait_success(client.delete(c1))
cleanup_items.remove(c1)
c2 = client.create_container(
name=test_name + "-2",
imageUuid=TEST_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
dataVolumes=[volume_name + ":/vol"],
volumeDriver=VOLUME_DRIVER,
requestedHostId=hosts[1].id,
attachStdin=True,
attachStdout=True,
tty=True,
command='/bin/bash')
cleanup_items.append(c2)
container = client.wait_success(c2, timeout=180)
assert_read(container, test_msg)
finally:
delete_all(client, cleanup_items)
for volume in cleanup_vols:
volume = client.wait_success(client.delete(volume))
assert volume.state == "removed"
volume = client.wait_success(volume.purge())
assert volume.state == "purged"
def test_container_replica_down(admin_client, client, test_name):
volume_name = 'vol' + test_name
cleanup_items = []
cleanup_vols = []
try:
c = client.create_container(
name=test_name,
imageUuid=TEST_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
dataVolumes=[volume_name + ":/vol"],
volumeDriver=VOLUME_DRIVER,
attachStdin=True,
attachStdout=True,
tty=True,
command='/bin/bash')
cleanup_items.append(c)
container = client.wait_success(c, timeout=120)
vols = client.list_volume(name=volume_name)
assert len(vols) == 1
cleanup_vols.append(vols[0])
test_msg = 'EXEC_WORKS'
assert_execute(container, test_msg)
replicas = get_replica_containers(admin_client, client, volume_name)
assert len(replicas) == 2
rep1 = client.wait_success(client.delete(replicas[0]))
assert rep1.state == 'removed'
# make sure data is intact
assert_read(container, test_msg)
test_msg = 'EXEC_WORKS_AFTER_REMOVE'
assert_execute(container, test_msg)
# TODO implement check of volume status, wait it to be UP
finally:
delete_all(client, cleanup_items)
for volume in cleanup_vols:
volume = client.wait_success(client.delete(volume))
assert volume.state == "removed"
volume = client.wait_success(volume.purge())
assert volume.state == "purged"
@pytest.mark.skip(reason="need a way to stop replica without HA it")
def test_container_both_replica_down_and_rebuild(
admin_client, client, test_name):
volume_name = 'vol' + test_name
cleanup_items = []
cleanup_vols = []
try:
c1 = client.create_container(
name=test_name,
imageUuid=TEST_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
dataVolumes=[volume_name + ":/vol"],
volumeDriver=VOLUME_DRIVER,
attachStdin=True,
attachStdout=True,
tty=True,
command='/bin/bash')
cleanup_items.append(c1)
container = client.wait_success(c1, timeout=120)
vols = client.list_volume(name=volume_name)
assert len(vols) == 1
cleanup_vols.append(vols[0])
test_msg = 'EXEC_WORKS'
assert_execute(container, test_msg)
replicas = get_replica_containers(admin_client, client, volume_name)
assert len(replicas) == 2
rep1 = client.wait_success(replicas[0].stop())
assert rep1.state == 'stopped'
# make sure data is intact
assert_read(container, test_msg)
test_msg = 'EXEC_WORKS_AFTER_STOP'
assert_execute(container, test_msg)
rep2 = client.wait_success(replicas[1].stop())
assert rep2.state == 'stopped'
# now controller should be stopped, volume won't be available
controller = get_controller_container(
admin_client, client, volume_name)
con = client.wait_success(client.delete(controller))
assert con.state == 'removed'
print "wait_for container remove creation"
client.wait_success(client.delete(c1))
cleanup_items.remove(c1)
# now a new controller should be started, use recent replicas
# wait for volume to be in attached state again.
c2 = client.create_container(
name=test_name + "-2",
imageUuid=TEST_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
dataVolumes=[volume_name + ":/vol"],
volumeDriver=VOLUME_DRIVER,
attachStdin=True,
attachStdout=True,
tty=True,
command='/bin/bash')
cleanup_items.append(c2)
print "wait_for new container creation"
container = client.wait_success(c2, timeout=180)
assert_read(container, test_msg)
# TODO implement check of volume status, wait it to be UP
finally:
delete_all(client, cleanup_items)
for volume in cleanup_vols:
volume = client.wait_success(client.delete(volume))
assert volume.state == "removed"
volume = client.wait_success(volume.purge())
assert volume.state == "purged"
def assert_execute(container, test_msg):
execute = container.execute(attachStdin=True,
attachStdout=True,
command=['/bin/bash', '-c',
'echo ' + test_msg +
' | tee /vol/test'],
tty=True)
conn = ws.create_connection(execute.url + '?token=' + execute.token,
timeout=10)
# Python is weird about closures
closure_wrapper = {
'result': ''
}
def exec_check():
msg = conn.recv()
closure_wrapper['result'] += base64.b64decode(msg)
return test_msg == closure_wrapper['result'].rstrip()
wait_for(exec_check,
'Timeout waiting for exec msg %s' % test_msg)
def assert_read(container, test_msg):
execute = container.execute(attachStdin=True,
attachStdout=True,
command=['/bin/bash', '-c',
'cat /vol/test'],
tty=True)
conn = ws.create_connection(execute.url + '?token=' + execute.token,
timeout=10)
# Python is weird about closures
closure_wrapper = {
'result': ''
}
def exec_check():
msg = conn.recv()
closure_wrapper['result'] += base64.b64decode(msg)
return test_msg == closure_wrapper['result'].rstrip()
wait_for(exec_check,
'Timeout waiting for exec msg %s' % test_msg)
def get_system_stack_name(volume_name):
return STACK_NAME_PREFIX + volume_name
def get_replica_containers(admin_client, client, volume_name):
stack_name = get_system_stack_name(volume_name)
stack, service = get_env_service_by_name(client, stack_name, REPLICA)
return get_service_containers(admin_client, service)
def get_controller_container(admin_client, client, volume_name):
stack_name = get_system_stack_name(volume_name)
stack, service = get_env_service_by_name(client, stack_name, CONTROLLER)
return get_service_containers(admin_client, service)[0]
|
90408
|
from boa_test.tests.boa_test import BoaTest
from boa.compiler import Compiler
from neo.Prompt.Commands.BuildNRun import TestBuild
class TestContract(BoaTest):
def test_binops(self):
output = Compiler.instance().load('%s/boa_test/example/BinopTest.py' % TestContract.dirname).default
out = output.write()
tx, results, total_ops, engine = TestBuild(out, ['&', 4, 4], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 4)
tx, results, total_ops, engine = TestBuild(out, ['|', 4, 3], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 7)
tx, results, total_ops, engine = TestBuild(out, ['|', 4, 8], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 12)
tx, results, total_ops, engine = TestBuild(out, ['^', 4, 4], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['^', 4, 2], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 6)
# tx, results, total_ops, engine = TestBuild(out, ['>>', 16, 2], self.GetWallet1(), '', '07')
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetBigInteger(), 4)
# tx, results, total_ops, engine = TestBuild(out, ['>>', 16, 0], self.GetWallet1(), '', '07')
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetBigInteger(), 16)
#
# tx, results, total_ops, engine = TestBuild(out, ['>>', 11, 1], self.GetWallet1(), '', '07')
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetBigInteger(), 5)
#
# tx, results, total_ops, engine = TestBuild(out, ['<<', 16, 2], self.GetWallet1(), '', '07')
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetBigInteger(), 64)
#
# tx, results, total_ops, engine = TestBuild(out, ['<<', 16, -2], self.GetWallet1(), '', '07')
# self.assertEqual(len(results), 0)
#
# tx, results, total_ops, engine = TestBuild(out, ['<<', 4, 5], self.GetWallet1(), '', '07')
# self.assertEqual(len(results), 1)
# self.assertEqual(results[0].GetBigInteger(), 128)
tx, results, total_ops, engine = TestBuild(out, ['%', 16, 2], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['%', 16, 11], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 5)
tx, results, total_ops, engine = TestBuild(out, ['//', 16, 2], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 8)
tx, results, total_ops, engine = TestBuild(out, ['//', 16, 7], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 2)
tx, results, total_ops, engine = TestBuild(out, ['/', 16, 7], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 2)
tx, results, total_ops, engine = TestBuild(out, ['~', 16, 0], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), -17)
tx, results, total_ops, engine = TestBuild(out, ['~', -3, 0], self.GetWallet1(), '', '07')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 2)
|
90412
|
import pytest
from dagster import Any, String, usable_as_dagster_type
from dagster.check import CheckError
from dagster.core.types.dagster_type import resolve_dagster_type
from dagster.utils import safe_tempfile_path
from dagstermill.serialize import read_value, write_value
def test_scalar():
with safe_tempfile_path() as tempfile_path:
assert (
read_value(
resolve_dagster_type(String),
write_value(resolve_dagster_type(String), "foo", tempfile_path),
)
== "foo"
)
def test_scalar_any():
with safe_tempfile_path() as tempfile_path:
assert (
read_value(
resolve_dagster_type(Any),
write_value(resolve_dagster_type(Any), "foo", tempfile_path),
)
== "foo"
)
@usable_as_dagster_type
class EvenType:
def __init__(self, num):
assert num % 2 is 0
self.num = num
def test_custom_dagster_type():
with safe_tempfile_path() as tempfile_path:
assert (
read_value(
resolve_dagster_type(EvenType),
write_value(resolve_dagster_type(EvenType), 4, tempfile_path),
)
== 4
)
def test_read_bad_value():
with pytest.raises(CheckError, match="Malformed value"):
read_value(resolve_dagster_type(Any), {"value": "foo", "file": "bar"})
with pytest.raises(CheckError, match="Malformed value"):
read_value(resolve_dagster_type(Any), {"quux": "buzz"})
|
90432
|
import argparse
import re
if not hasattr(re, '_pattern_type'):
re._pattern_type = re.Pattern
import os.path
import sys
from ygo import globals
from ygo.exceptions import LanguageError
from ygo.language_handler import LanguageHandler
from ygo.parsers.login_parser import LoginParser
from ygo.server import Server
from ygo.utils import parse_lflist
from ygo.websockets import start_websocket_server
def main():
globals.language_handler = LanguageHandler()
print("Adding languages...")
globals.language_handler.add('english', 'en')
globals.language_handler.add("german", "de")
globals.language_handler.add('japanese', 'ja')
globals.language_handler.add('spanish', 'es')
globals.language_handler.add('portuguese', 'pt')
globals.language_handler.add('italian', 'it')
globals.language_handler.add('french', 'fr')
print("{count} languages added successfully.".format(count = len(globals.language_handler.get_available_languages())))
try:
globals.language_handler.set_primary_language('english')
except LanguageError as e:
print("Error setting primary language: "+str(e))
sys.exit()
globals.banlists = parse_lflist('lflist.conf')
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=4000, help="Port to bind to")
parser.add_argument('-w', '--websocket-port', type=int)
parser.add_argument('--websocket-cert', '-c')
parser.add_argument('--websocket-key', '-k')
args = parser.parse_args()
server = Server(port = 4000, default_parser = LoginParser)
server.port = args.port
if args.websocket_port:
start_websocket_server(args.websocket_port, args.websocket_cert, args.websocket_key)
globals.server = server
server.run()
main()
|
90510
|
class View(dict):
"""A View contains the content displayed in the main window."""
def __init__(self, d=None):
"""
View constructor.
Keyword arguments:
d=None: Initial keys and values to initialize the view with.
Regardless of the value of d, keys 'songs', 'artists' and
'albums' are created with empty lists as default values.
"""
self['songs'], self['artists'], self['albums'] = [], [], []
if d is not None:
if isinstance(d, dict):
for k in d:
self[k] = d[k]
else:
raise TypeError('Initializing View with invalid argument')
def __setitem__(self, key, val):
"""Restrict values to lists only."""
if not isinstance(val, list):
raise TypeError('View can only hold lists as values.')
super().__setitem__(key, val)
def __len__(self):
"""Return the sum of each list's length."""
return sum(len(self[k]) for k in self)
def replace(self, other):
"""Replace the view's contents with some other dict."""
self = self.__init__(other)
def clear(self):
"""Clear elements without removing keys."""
for k in self.keys():
del self[k][:]
def is_empty(self):
"""Returns whether or not the view is empty."""
return all(not self[k] for k in self)
def copy(self):
"""Return a deep copy of the view's contents."""
return {k: self[k][:] for k in self}
|
90544
|
def lcs(s1, s2, x, y):
if arr[x-1][y-1] != -1:
return arr[x-1][y-1]
if x==0 or y==0:
arr[x-1][y-1] = 0
return 0
elif s1[x-1] == s2[y-1]:
arr[x-1][y-1] = 1 + lcs(s1, s2, x-1, y-1)
return arr[x-1][y-1]
else:
arr[x-1][y-1] = max(lcs(s1, s2, x-1, y), lcs(s1, s2, x, y-1))
return arr[x-1][y-1]
input_string_1 = 'AGGTABPIXIL'
input_string_2 = 'GXTXAYBPXL'
arr = [[-1 for i in range(len(input_string_2))] for i in range(len(input_string_1))]
import time
init = time.time()
print(lcs(input_string_1, input_string_2, len(input_string_1), len(input_string_2)))
end = time.time()
print((end - init) * 1000)
|
90557
|
import numpy as np
import argparse
from run_search import region_search
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--image_dir', dest='im_filepath')
parser.add_argument('--results_dir', dest='res_filepath')
parser.add_argument('--results_suffix',
dest='results_suffix')
parser.add_argument('--time_file', dest='time_file')
args = parser.parse_args()
im_filepath = args.im_filepath
res_filepath = args.res_filepath
results_suffix = args.results_suffix
time_file = args.time_file
v_x = 214. # Pixels/day
v_y = 81.
v_arr = [v_x, v_y]
radius = 25
num_obs = 6
rs = region_search(v_arr, radius, num_obs)
rs.run_search(im_filepath, res_filepath, results_suffix,
time_file,likelihood_level=19.,mjd_lims=[57070.0, 57072.9])
|
90561
|
import torch
import torch.nn as nn
from .encoders import LBSNet, LEAPOccupancyDecoder, BaseModule
class INVLBS(LBSNet):
def __init__(self, num_joints, hidden_size, pn_dim, fwd_trans_cond_dim):
self.fwd_trans_cond_dim = fwd_trans_cond_dim
super().__init__(num_joints, hidden_size, pn_dim)
self.fc_fwd = nn.Sequential(
nn.Linear(self.num_joints * 12, 100), nn.ReLU(),
nn.Linear(100, 100), nn.ReLU(),
nn.Linear(100, self.fwd_trans_cond_dim),
)
def get_c_dim(self):
return self.pn_dim * 2 + self.fwd_trans_cond_dim
@classmethod
def load_from_file(cls, file_path):
state_dict = cls.parse_pytorch_file(file_path)
config = state_dict['inv_lbs_config']
model_state_dict = state_dict['inv_lbs_model']
return cls.load(config, model_state_dict)
@classmethod
def from_cfg(cls, config):
model = cls(
num_joints=config['num_joints'],
hidden_size=config['hidden_size'],
pn_dim=config['pn_dim'],
fwd_trans_cond_dim=config['fwd_trans_cond_dim'])
return model
def forward(self, points, can_vertices, posed_vertices, fwd_transformation, compute_can_points=True):
"""
Args:
points: B x T x 3
can_vertices: B x N x 3
posed_vertices: B x N x 3
fwd_transformation (torch.tensor): Forward transformation tensor. (B x K x 4 x 4)
compute_can_points (bool): Whether to return estimated canonical points.
Returns:
if compute_can_points is True tuple of:
skinning weights (torch.Tensor): B x T x K
canonical points (torch.Tensor): B x T x 3
otherwise:
skinning weights (torch.Tensor): B x T x K
"""
B, K = fwd_transformation.shape[:2]
can_code = self.point_encoder(can_vertices)
posed_code = self.point_encoder(posed_vertices)
fwd_trans_code = self.fc_fwd(fwd_transformation[..., :3, :].reshape(B, -1))
lbs_code = torch.cat((can_code, posed_code, fwd_trans_code), dim=-1)
point_weights = self._forward(points, lbs_code)
if compute_can_points:
can_points = self.posed2can_points(points, point_weights, fwd_transformation)
ret_interface = point_weights, can_points # B x T x K
else:
ret_interface = point_weights
return ret_interface
@staticmethod
def posed2can_points(points, point_weights, fwd_transformation):
"""
Args:
points: B x T x 3
point_weights: B x T x K
fwd_transformation: B x K x 4 x 4
Returns:
canonical points: B x T x 3
"""
B, T, K = point_weights.shape
point_weights = point_weights.view(B * T, 1, K) # B*T x 1 x K
fwd_transformation = fwd_transformation.unsqueeze(1).repeat(1, T, 1, 1, 1) # B X K x 4 x 4 -> B x T x K x 4 x 4
fwd_transformation = fwd_transformation.view(B * T, K, -1) # B*T x K x 16
back_trans = torch.bmm(point_weights, fwd_transformation).view(B * T, 4, 4)
back_trans = torch.inverse(back_trans)
points = torch.cat([points, torch.ones(B, T, 1, device=points.device)], dim=-1).view(B * T, 4, 1)
can_points = torch.bmm(back_trans, points)[:, :3, 0].view(B, T, 3)
return can_points
class FWDLBS(LBSNet):
def __init__(self, num_joints, hidden_size, pn_dim):
super().__init__(num_joints, hidden_size, pn_dim)
def get_c_dim(self):
return self.pn_dim
@classmethod
def load_from_file(cls, file_path):
state_dict = cls.parse_pytorch_file(file_path)
config = state_dict['fwd_lbs_config']
model_state_dict = state_dict['fwd_lbs_model']
return cls.load(config, model_state_dict)
@classmethod
def from_cfg(cls, config):
model = cls(
num_joints=config['num_joints'],
hidden_size=config['hidden_size'],
pn_dim=config['pn_dim'])
return model
def forward(self, points, can_vertices):
"""
Args:
points: B x T x 3
can_vertices: B x N x 3
Returns:
"""
vert_code = self.point_encoder(can_vertices) # B x pn_dim
point_weights = self._forward(points, vert_code)
return point_weights # B x T x K
class LEAPModel(BaseModule):
def __init__(self,
inv_lbs: INVLBS,
fwd_lbs: FWDLBS,
leap_occupancy_decoder: LEAPOccupancyDecoder):
super(LEAPModel, self).__init__()
# NN modules
self.inv_lbs = inv_lbs
self.fwd_lbs = fwd_lbs
self.leap_occupancy_decoder = leap_occupancy_decoder
@classmethod
def from_cfg(cls, config):
leap_model = cls(
inv_lbs=INVLBS.load_from_file(config['inv_lbs_model_path']),
fwd_lbs=FWDLBS.load_from_file(config['fwd_lbs_model_path']),
leap_occupancy_decoder=LEAPOccupancyDecoder.from_cfg(config))
return leap_model
@classmethod
def load_from_file(cls, file_path):
state_dict = cls.parse_pytorch_file(file_path)
config = state_dict['leap_model_config']
model_state_dict = state_dict['leap_model_model']
leap_model = cls(
inv_lbs=INVLBS.from_cfg(config['inv_lbs_model_config']),
fwd_lbs=FWDLBS.from_cfg(config['fwd_lbs_model_config']),
leap_occupancy_decoder=LEAPOccupancyDecoder.from_cfg(config))
leap_model.load_state_dict(model_state_dict)
return leap_model
def to(self, **kwargs):
self.inv_lbs = self.inv_lbs.to(**kwargs)
self.fwd_lbs = self.fwd_lbs.to(**kwargs)
self.leap_occupancy_decoder = self.leap_occupancy_decoder.to(**kwargs)
return self
def eval(self):
self.inv_lbs.eval()
self.fwd_lbs.eval()
self.leap_occupancy_decoder.eval()
|
90597
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
# import torch.nn.functional as F
import torch.optim as optim
import utils.utils as util
import utils.quantization as q
import numpy as np
import os, time, sys
import copy
import argparse
#########################
# supported model candidates
candidates = [
'binput-pg',
]
#########################
#----------------------------
# Argument parser.
#----------------------------
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--model_id', '-id', type=int, default=0)
parser.add_argument('--gtarget', '-g', type=float, default=0.0)
parser.add_argument('--init_lr', '-lr', type=float, default=1e-3)
parser.add_argument('--batch_size', '-b', type=int, default=128)
parser.add_argument('--num_epoch', '-e', type=int, default=250)
parser.add_argument('--weight_decay', '-wd', type=float, default=1e-5)
parser.add_argument('--last_epoch', '-last', type=int, default=-1)
parser.add_argument('--finetune', '-f', action='store_true', help='finetune the model')
parser.add_argument('--save', '-s', action='store_true', help='save the model')
parser.add_argument('--test', '-t', action='store_true', help='test only')
parser.add_argument('--resume', '-r', type=str, default=None,
help='path of the model checkpoint for resuming training')
parser.add_argument('--data_dir', '-d', type=str, default='/tmp/cifar10_data',
help='path to the dataset directory')
parser.add_argument('--which_gpus', '-gpu', type=str, default='0', help='which gpus to use')
args = parser.parse_args()
_ARCH = candidates[args.model_id]
drop_last = True if 'binput' in _ARCH else False
#----------------------------
# Load the CIFAR-10 dataset.
#----------------------------
def load_cifar10():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train_list = [
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
]
transform_test_list = [transforms.ToTensor()]
if 'binput' not in _ARCH:
transform_train_list.append(normalize)
transform_test_list.append(normalize)
transform_train = transforms.Compose(transform_train_list)
transform_test = transforms.Compose(transform_test_list)
# pin_memory=True makes transfering data from host to GPU faster
trainset = torchvision.datasets.CIFAR10(root=args.data_dir, train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=2,
pin_memory=True, drop_last=drop_last)
testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,
download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size,
shuffle=False, num_workers=2,
pin_memory=True, drop_last=drop_last)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return trainloader, testloader, classes
#----------------------------
# Define the model.
#----------------------------
def generate_model(model_arch):
if 'binput-pg' in model_arch:
import model.fracbnn_cifar10 as m
return m.resnet20(batch_size=args.batch_size, num_gpus=torch.cuda.device_count())
else:
raise NotImplementedError("Model architecture is not supported.")
#----------------------------
# Train the network.
#----------------------------
def train_model(trainloader, testloader, net,
optimizer, scheduler, start_epoch, device):
# define the loss function
criterion = (nn.CrossEntropyLoss().cuda()
if torch.cuda.is_available() else nn.CrossEntropyLoss())
best_acc = 0.0
best_model = copy.deepcopy(net.state_dict())
for epoch in range(start_epoch, args.num_epoch): # loop over the dataset multiple times
# set printing functions
batch_time = util.AverageMeter('Time/batch', ':.2f')
losses = util.AverageMeter('Loss', ':6.2f')
top1 = util.AverageMeter('Acc', ':6.2f')
progress = util.ProgressMeter(
len(trainloader),
[losses, top1, batch_time],
prefix="Epoch: [{}]".format(epoch+1)
)
# switch the model to the training mode
net.train()
print('current learning rate = {}'.format(optimizer.param_groups[0]['lr']))
# each epoch
end = time.time()
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
if 'pg' in _ARCH:
for name, param in net.named_parameters():
if 'threshold' in name:
loss += (0.00001 * 0.5 *
torch.norm(param-args.gtarget) *
torch.norm(param-args.gtarget))
loss.backward()
optimizer.step()
# measure accuracy and record loss
_, batch_predicted = torch.max(outputs.data, 1)
batch_accu = 100.0 * (batch_predicted == labels).sum().item() / labels.size(0)
losses.update(loss.item(), labels.size(0))
top1.update(batch_accu, labels.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 100 == 99:
# print statistics every 100 mini-batches each epoch
progress.display(i) # i = batch id in the epoch
# update the learning rate
scheduler.step()
# print test accuracy every few epochs
if epoch % 1 == 0:
print('epoch {}'.format(epoch+1))
epoch_acc = test_accu(testloader, net, device)
if 'pg' in _ARCH:
sparsity(testloader, net, device)
if epoch_acc >= best_acc:
best_acc = epoch_acc
best_model = copy.deepcopy(net.state_dict())
print("The best test accuracy so far: {:.1f}".format(best_acc))
# save the model if required
if args.save:
print("Saving the trained model and states.")
this_file_path = os.path.dirname(os.path.abspath(__file__))
save_folder = os.path.join(this_file_path, 'save_CIFAR10_model')
util.save_models(best_model, save_folder,
suffix=_ARCH+'-finetune' if args.finetune else _ARCH)
"""
states = {'epoch':epoch+1,
'optimizer':optimizer.state_dict(),
'scheduler':scheduler.state_dict()}
util.save_states(states, save_folder, suffix=_ARCH)
"""
print('Finished Training')
#----------------------------
# Test accuracy.
#----------------------------
def test_accu(testloader, net, device):
correct = 0
total = 0
# switch the model to the evaluation mode
net.eval()
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100.0 * correct / total
print('Accuracy of the network on the 10000 test images: %.1f %%' % accuracy)
return accuracy
#----------------------------
# Report sparsity in PG
#----------------------------
def sparsity(testloader, net, device):
num_out, num_high = [], []
def _report_sparsity(m):
classname = m.__class__.__name__
if isinstance(m, q.PGBinaryConv2d):
num_out.append(m.num_out)
num_high.append(m.num_high)
net.eval()
# initialize cnt_out, cnt_high
net.apply(_report_sparsity)
cnt_out = np.zeros(len(num_out))
cnt_high = np.zeros(len(num_high))
num_out, num_high = [], []
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
""" calculate statistics per PG layer """
net.apply(_report_sparsity)
cnt_out += np.array(num_out)
cnt_high += np.array(num_high)
num_out = []
num_high = []
print('Sparsity of the update phase: %.1f %%' %
(100.0-np.sum(cnt_high)*1.0/np.sum(cnt_out)*100.0))
#----------------------------
# Remove the saved placeholder
#----------------------------
def remove_placeholder(state_dict):
from collections import OrderedDict
temp_state_dict = OrderedDict()
for key, value in state_dict.items():
if 'encoder.placeholder' in key:
pass
else:
temp_state_dict[key] = value
return temp_state_dict
#----------------------------
# Main function.
#----------------------------
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = args.which_gpus
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Available GPUs: {}".format(torch.cuda.device_count()))
print("Create {} model.".format(_ARCH))
net = generate_model(_ARCH)
if torch.cuda.device_count() > 1:
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
print("Activate multi GPU support.")
net = nn.DataParallel(net)
net.to(device)
#------------------
# Load model params
#------------------
if args.resume is not None:
model_path = args.resume
if os.path.exists(model_path):
print("@ Load trained model from {}.".format(model_path))
state_dict = torch.load(model_path)
state_dict = remove_placeholder(state_dict)
net.load_state_dict(state_dict, strict=False)
else:
raise ValueError("Model not found.")
#-----------------
# Prepare Data
#-----------------
print("Loading the data.")
trainloader, testloader, classes = load_cifar10()
#-----------------
# Test
#-----------------
if args.test:
print("Mode: Test only.")
test_accu(testloader, net, device)
if 'pg' in _ARCH:
sparsity(testloader, net, device)
#-----------------
# Finetune
#-----------------
elif args.finetune:
print("num epochs = {}".format(args.num_epoch))
initial_lr = args.init_lr
print("init lr = {}".format(initial_lr))
optimizer = optim.Adam(net.parameters(),
lr = initial_lr,
weight_decay=0.)
lr_decay_milestones = [100, 150, 200]
print("milestones = {}".format(lr_decay_milestones))
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=lr_decay_milestones,
gamma=0.1,
last_epoch=args.last_epoch)
start_epoch=0
print("Start finetuning.")
train_model(trainloader, testloader, net,
optimizer, scheduler, start_epoch, device)
test_accu(testloader, net, device)
#-----------------
# Train
#-----------------
else:
print("num epochs = {}".format(args.num_epoch))
#-----------
# Optimizer
#-----------
initial_lr = args.init_lr
optimizer = optim.Adam(net.parameters(),
lr = initial_lr,
weight_decay=args.weight_decay)
#-----------
# Scheduler
#-----------
print("Use linear learning rate decay.")
lambda1 = lambda epoch : (1.0-epoch/args.num_epoch) # linear decay
#lambda1 = lambda epoch : (0.7**epoch) # exponential decay
scheduler = optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda1,
last_epoch=args.last_epoch)
start_epoch = 0
print("Start training.")
train_model(trainloader, testloader, net,
optimizer, scheduler, start_epoch, device)
test_accu(testloader, net, device)
if __name__ == "__main__":
main()
|
90609
|
import kfp
from kfp import dsl
from kfp.components import func_to_container_op
@func_to_container_op
def show_results(decision_tree : float, logistic_regression : float) -> None:
# Given the outputs from decision_tree and logistic regression components
# the results are shown.
print(f"Decision tree (accuracy): {decision_tree}")
print(f"Logistic regression (accuracy): {logistic_regression}")
@dsl.pipeline(name='First Pipeline', description='Applies Decision Tree and Logistic Regression for classification problem.')
def first_pipeline():
# Loads the yaml manifest for each component
download = kfp.components.load_component_from_file('download_data/download_data.yaml')
decision_tree = kfp.components.load_component_from_file('decision_tree/decision_tree.yaml')
logistic_regression = kfp.components.load_component_from_file('logistic_regression/logistic_regression.yaml')
# Run download_data task
download_task = download()
# Run tasks "decison_tree" and "logistic_regression" given
# the output generated by "download_task".
decision_tree_task = decision_tree(download_task.output)
logistic_regression_task = logistic_regression(download_task.output)
# Given the outputs from "decision_tree" and "logistic_regression"
# the component "show_results" is called to print the results.
show_results(decision_tree_task.output, logistic_regression_task.output)
if __name__ == '__main__':
kfp.compiler.Compiler().compile(first_pipeline, 'FirstPipeline.yaml')
# kfp.Client().create_run_from_pipeline_func(basic_pipeline, arguments={})
|
90659
|
import numpy as np
def softmax(x):
"""Stable softmax"""
x -= np.max(x, axis=0)
e_x = np.exp(x)
return e_x / np.sum(e_x, axis=0)
def get_idx_aug_baseline(LOO_influences):
"""Returns points randomly"""
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=None,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_influence(LOO_influences):
"""Returns points with probability proportional to magnitude of LOO"""
p = np.abs(LOO_influences, dtype=float)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p /= np.sum(p)
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=p,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_k_dpp(LOO_influences, k):
"""Returns points with probability proportional to L matrix using DPP"""
import sample_dpp
L = LOO_influences.T.dot(LOO_influences)
assert len(L) == len(LOO_influences)
idxs = sample_dpp.oct_sample_k_dpp(
L,
k=k,
one_hot=False)
for idx in idxs:
yield [idx]
def get_idx_aug_influence_reverse(LOO_influences):
"""Returns points with probability proportional to magnitude of LOO"""
p = np.abs(LOO_influences)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p = 1 / p
p /= np.sum(p)
p[p == 0] = 1e-20
p /= np.sum(p)
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=p,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_softmax_influence(LOO_influences):
"""Returns points with probability proportional to softmax of magnitude
of LOO"""
p = np.abs(LOO_influences)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p = math_util.softmax(p)
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=p,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_softmax_influence_reverse(LOO_influences):
"""Returns points with probability proportional to softmax of magnitude
of LOO"""
p = np.abs(LOO_influences)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p = 1 / p
p = math_util.softmax(p)
p[p == 0] = 1e-20
p /= np.sum(p)
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=p,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_deterministic_influence(LOO_influences):
"""Returns points in deterministic order ranked by LOO magnitude"""
idxs = np.argsort(-np.abs(LOO_influences))
for idx in idxs:
yield [idx]
def get_idx_aug_deterministic_influence_reverse(LOO_influences):
"""Returns points in deterministic order ranked by LOO magnitude"""
idxs = np.argsort(np.abs(LOO_influences))
for idx in idxs:
yield [idx]
name_to_policy = {
"baseline": get_idx_aug_baseline,
"random_proportional": get_idx_aug_influence,
"random_inverse_proportional": get_idx_aug_influence_reverse,
"random_softmax_proportional": get_idx_aug_softmax_influence,
"random_inverse_softmax_proportional":
get_idx_aug_softmax_influence_reverse,
"deterministic_proportional": get_idx_aug_deterministic_influence,
"deterministic_inverse_proportional":
get_idx_aug_deterministic_influence_reverse,
}
def get_policy_by_name(name):
return name_to_policy[name]
|
90672
|
import errno
import os
import logging
import urllib2
import httplib
import urlparse
import m3u8
import shutil
import crypto
from futures import ThreadPoolExecutor, TimeoutError
import helpers
import atomic
from transcode import transcode_playlist
config = helpers.load_config()
NUM_THREAD_WORKERS = config.getint('hlsclient', 'num_thread_workers')
DOWNLOAD_TIMEOUT = config.getint('hlsclient', 'download_timeout')
CONSUME_TIMEOUT = config.getint('hlsclient', 'consume_timeout')
def consume_from_balancer(balancer, playlists, destination, encrypt=False):
'''
Consume all active playlist resources from ``balancer`` and
report status to it.
'''
def consume_resource(playlist_resource):
m3u8_uri = "{server}:{port}{path}".format(
server=playlist_resource.server.server,
port=playlist_resource.server.port,
path=playlists['streams'][playlist_resource.key]['input-path'])
try:
segments_modified = consume(m3u8_uri, destination, encrypt)
except (httplib.HTTPException, urllib2.HTTPError, IOError, OSError) as err:
logging.warning(u'Notifying error for resource %s: %s' % (m3u8_uri, err))
balancer.notify_error()
else:
if segments_modified:
logging.info('Notifying content modified: %s' % m3u8_uri)
balancer.notify_modified()
m3u8_path = os.path.join(build_full_path(destination, m3u8_uri), os.path.basename(m3u8_uri))
transcode_playlist(playlists, playlist_resource.key, segments_modified, m3u8_path)
else:
logging.debug('Content not modified: %s' % m3u8_uri)
try:
with ThreadPoolExecutor(max_workers=NUM_THREAD_WORKERS) as executor:
list(executor.map(consume_resource, balancer.actives, timeout=CONSUME_TIMEOUT))
except TimeoutError:
balancer.notify_error()
def consume(m3u8_uri, destination_path, encrypt=False):
'''
Given a ``m3u8_uri``, downloads all files to disk
The remote path structure is maintained under ``destination_path``
- encrypt:
If False, keeps existing encryption
If None, decrypts file
If True, a new key is created
'''
logging.debug('Consuming %s' % m3u8_uri)
playlist = m3u8.load(m3u8_uri)
if playlist.is_variant:
return consume_variant_playlist(playlist, m3u8_uri, destination_path, encrypt)
else:
return consume_single_playlist(playlist, m3u8_uri, destination_path, encrypt)
def consume_variant_playlist(playlist, m3u8_uri, destination_path, encrypt=False):
changed = False
full_path = build_full_path(destination_path, m3u8_uri)
for p in playlist.playlists:
changed |= bool(consume(p.absolute_uri, destination_path, encrypt))
save_m3u8(playlist, m3u8_uri, full_path)
return changed
def consume_single_playlist(playlist, m3u8_uri, destination_path, encrypt=False):
full_path = build_full_path(destination_path, m3u8_uri)
if encrypt:
key_name = crypto.get_key_name(m3u8_uri)
new_key = crypto.get_key(key_name, full_path)
else:
new_key = encrypt
downloaded_key = download_key(playlist, full_path, new_key)
downloaded_segments = download_segments(playlist, full_path, new_key)
m3u8_has_changed = downloaded_key or any(downloaded_segments)
if m3u8_has_changed:
save_m3u8(playlist, m3u8_uri, full_path, new_key)
return filter(None, downloaded_segments)
return False
def build_intermediate_path(m3u8_uri):
'''
Returns the original m3u8 base path
'''
url_path = urlparse.urlparse(m3u8_uri).path
return os.path.dirname(url_path)
def build_full_path(destination_path, m3u8_uri):
'''
Returns the path where the m3u8, ts and bin will be saved.
'''
intermediate_path = build_intermediate_path(m3u8_uri)[1:] # ignore first "/"
full_path = os.path.join(destination_path, intermediate_path)
ensure_directory_exists(full_path)
return full_path
def ensure_directory_exists(directory):
try:
os.makedirs(directory)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def download_segments(playlist, destination_path, new_key):
uris = [segment.absolute_uri for segment in playlist.segments]
def download(uri):
try:
return download_to_file(uri, destination_path, playlist.key, new_key)
except urllib2.HTTPError as err:
if err.code == 404:
logging.warning(u'Got 404 trying to download %s' % (uri,))
return None
raise
with ThreadPoolExecutor(max_workers=NUM_THREAD_WORKERS) as executor:
downloads = executor.map(download, uris, timeout=CONSUME_TIMEOUT)
return list(downloads)
def save_m3u8(playlist, m3u8_uri, full_path, new_key=False):
'''
Saves the m3u8, updating the key if needed
- new_key:
If False, keeps existing encryption
If None, decrypts file
If any other value, this value is set
'''
playlist.basepath = build_intermediate_path(m3u8_uri)
if new_key:
crypto.save_new_key(new_key, full_path)
playlist.version = "2"
playlist.key = new_key
elif new_key is None:
playlist.key = None
filename = os.path.join(full_path, os.path.basename(m3u8_uri))
atomic_dump(playlist, filename)
def atomic_dump(playlist, filename):
with atomic.AtomicWriteFile(filename) as tmp_filename:
playlist.dump(tmp_filename)
def download_key(playlist, destination_path, new_key):
if playlist.key:
filename = download_to_file(playlist.key.absolute_uri, destination_path)
with open(filename, 'rb') as f:
playlist.key.key_value = f.read()
return True
def download_to_file(uri, destination_path, current_key=None, new_key=False):
'''
Retrives the file if it does not exist locally and changes the encryption if needed.
'''
filename = os.path.join(destination_path, os.path.basename(uri))
if not os.path.exists(filename):
logging.debug("Downloading {url}".format(url=uri))
raw = urllib2.urlopen(url=uri, timeout=DOWNLOAD_TIMEOUT)
if new_key is not False:
plain = crypto.Decrypt(raw, current_key) if current_key else raw
raw = crypto.Encrypt(plain, new_key) if new_key else plain
atomic_write(raw, filename)
return filename
else:
# change modification time so the file is not removed by hlsclient.cleaner.clean
os.utime(filename, None)
return False
def atomic_write(content, filename):
with atomic.AtomicWriteFileObj(filename) as f:
shutil.copyfileobj(content, f)
|
90690
|
import urllib
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods import posts
import xmlrpclib
from wordpress_xmlrpc.compat import xmlrpc_client
from wordpress_xmlrpc.methods import media, posts
import os
########################### Read Me First ###############################
'''
------------------------------------------In DETAIL--------------------------------
Description
===========
Add new posts to WordPress remotely using Python using XMLRPC library provided by the WordPress.
Installation Requirement
************************
Verify you meet the following requirements
==========================================
Install Python 2.7 (Don't download 3+, as most libraries dont yet support version 3).
Install from PyPI using easy_install python-wordpress-xmlrpc
Easy_Install Link: https://pypi.python.org/pypi/setuptools
==========================================
Windows Installation Guide
==========================
-Download and Install Easy_Install from above Link -Extract Downloaded File and from CMD go to the extracted directory and run 'python setup.py install'. This will install easy_install. -Go to %/python27/script and run following command easy_install python-wordpress-xmlrpc
Ubuntu Installation Guide
=========================
sudo apt-get install python-setuptools
sudo easy_install python-wordpress-xmlrpc
Note: Script has its dummy data to work initially which you can change or integrate with your code easily for making it more dynamic.
****************************************
For Bugs/Suggestions
<EMAIL>
****************************************
------------------------------------------In DETAIL--------------------------------
'''
class Custom_WP_XMLRPC:
def post_article(self,wpUrl,wpUserName,wpPassword,articleTitle, articleCategories, articleContent, articleTags,PhotoUrl):
self.path=os.getcwd()+"\\00000001.jpg"
self.articlePhotoUrl=PhotoUrl
self.wpUrl=wpUrl
self.wpUserName=wpUserName
self.wpPassword=<PASSWORD>
#Download File
f = open(self.path,'wb')
f.write(urllib.urlopen(self.articlePhotoUrl).read())
f.close()
#Upload to WordPress
client = Client(self.wpUrl,self.wpUserName,self.wpPassword)
filename = self.path
# prepare metadata
data = {'name': 'picture.jpg','type': 'image/jpg',}
# read the binary file and let the XMLRPC library encode it into base64
with open(filename, 'rb') as img:
data['bits'] = xmlrpc_client.Binary(img.read())
response = client.call(media.UploadFile(data))
attachment_id = response['id']
#Post
post = WordPressPost()
post.title = articleTitle
post.content = articleContent
post.terms_names = { 'post_tag': articleTags,'category': articleCategories}
post.post_status = 'publish'
post.thumbnail = attachment_id
post.id = client.call(posts.NewPost(post))
print 'Post Successfully posted. Its Id is: ',post.id
#########################################
# POST & Wp Credentials Detail #
#########################################
#Url of Image on the internet
ariclePhotoUrl='http://i1.tribune.com.pk/wp-content/uploads/2013/07/584065-twitter-1375197036-960-640x480.jpg'
# Dont forget the /xmlrpc.php cause thats your posting adress for XML Server
wpUrl='http://YourWebSite.com/xmlrpc.php'
#WordPress Username
wpUserName='WordPressUsername'
#WordPress Password
wpPassword='<PASSWORD>'
#Post Title
articleTitle='Testing Python Script version 3'
#Post Body/Description
articleContent='Final .... Testing Fully Automated'
#list of tags
articleTags=['code','python']
#list of Categories
articleCategories=['language','art']
#########################################
# Creating Class object & calling the xml rpc custom post Function
#########################################
xmlrpc_object = Custom_WP_XMLRPC()
#On Post submission this function will print the post id
xmlrpc_object.post_article(wpUrl,wpUserName,wpPassword,articleTitle, articleCategories, articleContent, articleTags,ariclePhotoUrl)
|
90692
|
from bpy_speckle.convert.to_native import convert_to_native
def get_speckle_subobjects(attr, scale, name):
subobjects = []
for key in attr.keys():
if isinstance(attr[key], dict):
subtype = attr[key].get("type", None)
if subtype:
name = f"{name}.{key}"
subobject = convert_to_native(attr[key], name)
subobjects.append(subobject)
props = attr[key].get("properties", None)
if props:
subobjects.extend(get_speckle_subobjects(props, scale, name))
elif hasattr(attr[key], "type"):
subtype = attr[key].type
if subtype:
name = "{}.{}".format(name, key)
subobject = convert_to_native(attr[key], name)
subobjects.append(subobject)
props = attr[key].get("properties", None)
if props:
subobjects.extend(get_speckle_subobjects(props, scale, name))
return subobjects
|
90715
|
from recon.core.module import BaseModule
from recon.utils.crypto import aes_decrypt
class Module(BaseModule):
meta = {
'name': 'PwnedList - Pwned Domain Credentials Fetcher',
'author': '<NAME> (@LaNMaSteR53)',
'description': 'Queries the PwnedList API to fetch all credentials for a domain. Updates the \'credentials\' table with the results.',
'required_keys': ['pwnedlist_api', 'pwnedlist_secret', 'pwnedlist_iv'],
'comments': (
'API Query Cost: 10,000 queries per request, 1 query for each account returned, and 1 query per unique leak.',
),
'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL',
}
def module_run(self, domains):
key = self.keys.get('pwnedlist_api')
secret = self.keys.get('pwnedlist_secret')
decrypt_key = secret[:16]
iv = self.keys.get('pwnedlist_iv')
# setup the API call
url = 'https://api.pwnedlist.com/api/1/domains/query'
for domain in domains:
self.heading(domain, level=0)
payload = {'domain_identifier': domain, 'daysAgo': 0}
while True:
# build the payload
pwnedlist_payload = self.build_pwnedlist_payload(payload, 'domains.query', key, secret)
# make the request
resp = self.request(url, payload=pwnedlist_payload)
if resp.json: jsonobj = resp.json
else:
self.error('Invalid JSON response for \'%s\'.\n%s' % (domain, resp.text))
break
if len(jsonobj['accounts']) == 0:
self.output('No results returned for \'%s\'.' % (domain))
break
# extract the credentials
for cred in jsonobj['accounts']:
username = cred['plain']
password = aes_decrypt(cred['password'], decrypt_key, iv)
leak = cred['leak_id']
self.add_credentials(username=username, password=password, leak=leak)
self.add_leaks(mute=True, **self.get_pwnedlist_leak(leak))
# paginate
if jsonobj['token']:
payload['token'] = jsonobj['token']
continue
break
|
90725
|
from .LevenshteinMixin import LevenshteinMixin
from .SimpleSearchMixin import SimpleSearchMixin
from .YearsMixin import YearsMixin
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.