content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import json
from csv import DictReader
def parse_txt(fd, settings):
return fd.read().splitlines()
def parse_csv(fd, settings):
return [dict(x) for x in DictReader(fd)]
def parse_json(fd, settings):
return json.load(fd)
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import pytorchvideo
AVAILABLE_3D_BACKBONES = [
"i3d_r50",
"c2d_r50",
"csn_r101",
"r2plus1d_r50",
"slow_r50",
"slowfast_r50",
"slowfast_r101",
"slowfast_16x8_r101_50_50",
"x3d_xs",
"x3d_s",
"x3d_m",
"x3d_l",
]
class CNN3D(nn.Module):
"""
Initializes the 3D Convolution backbone.
**Supported Backbones**
- `i3d_r50`
- `c2d_r50`
- `csn_r101`
- `r2plus1d_r5`
- `slow_r50`
- `slowfast_r50`
- `slowfast_r101`
- `slowfast_16x8_r101_50_50`
- `x3d_xs`
- `x3d_s`
- `x3d_m`
- `x3d_l`
Args:
in_channels (int): Number of input channels
backbone (string): Backbone to use
pretrained (bool, optional): Whether to use pretrained Backbone. Default: ``True``
**kwargs (optional): Will be passed to pytorchvideo.models.hub models;
"""
def __init__(self, in_channels, backbone, pretrained=True, **kwargs):
super().__init__()
self.backbone = self.get_3d_backbone(
backbone, in_channels, pretrained, **kwargs
)
self.n_out_features = 400 # list(self.backbone.modules())[-2].out_features
def forward(self, x):
"""
forward step
"""
x = self.backbone(x)
return x.transpose(0, 1) # Batch-first
def get_3d_backbone(
self,
name,
in_channels=3,
pretrained: bool = False,
progress: bool = True,
**kwargs
):
assert name in AVAILABLE_3D_BACKBONES, "Please use any bonebone from " + str(
AVAILABLE_3D_BACKBONES
)
import pytorchvideo.models.hub as ptv_hub
model = getattr(ptv_hub, name)(
pretrained=pretrained, progress=progress, **kwargs
)
if in_channels != 3:
reshape_conv_input_size(in_channels, model)
return model
def reshape_conv_input_size(in_channels, model):
"""
Change convolution layer to adopt to various input channels
"""
assert in_channels == 1 or in_channels >= 4
for module in model.modules():
if isinstance(module, nn.Conv3d):
break
module.in_channels = in_channels
weight = module.weight.detach()
if in_channels == 1:
module.weight = nn.parameter.Parameter(weight.sum(1, keepdim=True))
else:
curr_in_channels = module.weight.shape[1]
to_concat = torch.Tensor(
module.out_channels,
module.in_channels - curr_in_channels,
*module.kernel_size,
)
module.weight = nn.parameter.Parameter(
torch.cat([module.weight, to_concat], axis=1)
)
|
nilq/baby-python
|
python
|
# Copyright 2016 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from vmware_nsx.plugins.nsx_v3 import cert_utils
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils
from vmware_nsx.shell import resources as shell
from vmware_nsxlib.v3 import client_cert
from vmware_nsxlib.v3 import trust_management
from neutron_lib.callbacks import registry
from neutron_lib import context
from neutron_lib import exceptions
from oslo_config import cfg
LOG = logging.getLogger(__name__)
CERT_DEFAULTS = {'key-size': 2048,
'sig-alg': 'sha256',
'valid-days': 3650,
'country': 'US',
'state': 'California',
'org': 'default org',
'unit': 'default unit',
'host': 'defaulthost.org'}
def get_nsx_trust_management(**kwargs):
username, password = None, None
if kwargs.get('property'):
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
username = properties.get('user')
password = properties.get('password')
nsx_client = utils.get_nsxv3_client(username, password, True)
nsx_trust = trust_management.NsxLibTrustManagement(nsx_client, {})
return nsx_trust
def get_certificate_manager(**kwargs):
storage_driver_type = cfg.CONF.nsx_v3.nsx_client_cert_storage.lower()
LOG.info("Certificate storage is %s", storage_driver_type)
if storage_driver_type == 'nsx-db':
storage_driver = cert_utils.DbCertificateStorageDriver(
context.get_admin_context())
elif storage_driver_type == 'none':
storage_driver = cert_utils.DummyCertificateStorageDriver()
# TODO(annak) - add support for barbican storage driver
return client_cert.ClientCertificateManager(
cert_utils.NSX_OPENSTACK_IDENTITY,
get_nsx_trust_management(**kwargs),
storage_driver)
def verify_client_cert_on():
if cfg.CONF.nsx_v3.nsx_use_client_auth:
return True
LOG.info("Operation not applicable since client authentication "
"is disabled")
return False
@admin_utils.output_header
def generate_cert(resource, event, trigger, **kwargs):
"""Generate self signed client certificate and private key
"""
if not verify_client_cert_on():
return
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none":
LOG.info("Generate operation is not supported "
"with storage type 'none'")
return
# update cert defaults based on user input
properties = CERT_DEFAULTS.copy()
if kwargs.get('property'):
properties.update(admin_utils.parse_multi_keyval_opt(
kwargs['property']))
try:
prop = 'key-size'
key_size = int(properties.get(prop))
prop = 'valid-days'
valid_for_days = int(properties.get(prop))
except ValueError:
LOG.info("%s property must be a number", prop)
return
signature_alg = properties.get('sig-alg')
# TODO(annak): use nsxlib constants when they land
subject = {}
subject['country'] = properties.get('country')
subject['state'] = properties.get('state')
subject['organization'] = properties.get('org')
subject['unit'] = properties.get('org')
subject['hostname'] = properties.get('host')
with get_certificate_manager(**kwargs) as cert:
if cert.exists():
LOG.info("Deleting existing certificate")
# Need to delete cert first
cert.delete()
try:
cert.generate(subject, key_size, valid_for_days, signature_alg)
except exceptions.InvalidInput as e:
LOG.info(e)
return
LOG.info("Client certificate generated succesfully")
@admin_utils.output_header
def delete_cert(resource, event, trigger, **kwargs):
"""Delete client certificate and private key """
if not verify_client_cert_on():
return
with get_certificate_manager(**kwargs) as cert:
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none":
filename = get_cert_filename(**kwargs)
if not filename:
LOG.info("Please specify file containing the certificate "
"using filename property")
return
cert.delete_pem(filename)
else:
if not cert.exists():
LOG.info("Nothing to clean")
return
cert.delete()
LOG.info("Client certificate deleted succesfully")
@admin_utils.output_header
def show_cert(resource, event, trigger, **kwargs):
"""Show client certificate details """
if not verify_client_cert_on():
return
with get_certificate_manager(**kwargs) as cert:
if cert.exists():
cert_pem, key_pem = cert.get_pem()
expires_on = cert.expires_on()
expires_in_days = cert.expires_in_days()
cert_data = cert.get_subject()
cert_data['alg'] = cert.get_signature_alg()
cert_data['key_size'] = cert.get_key_size()
if expires_in_days >= 0:
LOG.info("Client certificate is valid. "
"Expires on %(date)s UTC (in %(days)d days).",
{'date': expires_on,
'days': expires_in_days})
else:
LOG.info("Client certificate expired on %s.", expires_on)
LOG.info("Key Size %(key_size)s, "
"Signature Algorithm %(alg)s\n"
"Subject: Country %(country)s, State %(state)s, "
"Organization %(organization)s, Unit %(unit)s, "
"Common Name %(hostname)s", cert_data)
LOG.info(cert_pem)
else:
LOG.info("Client certificate is not registered "
"in storage")
def get_cert_filename(**kwargs):
filename = cfg.CONF.nsx_v3.nsx_client_cert_file
if kwargs.get('property'):
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
filename = properties.get('filename', filename)
if not filename:
LOG.info("Please specify file containing the certificate "
"using filename property")
return filename
@admin_utils.output_header
def import_cert(resource, event, trigger, **kwargs):
"""Import client certificate that was generated externally"""
if not verify_client_cert_on():
return
if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() != "none":
LOG.info("Import operation is supported "
"with storage type 'none' only")
return
with get_certificate_manager(**kwargs) as cert:
if cert.exists():
LOG.info("Deleting existing certificate")
cert.delete()
filename = get_cert_filename(**kwargs)
if not filename:
return
cert.import_pem(filename)
LOG.info("Client certificate imported succesfully")
@admin_utils.output_header
def show_nsx_certs(resource, event, trigger, **kwargs):
"""Show client certificates associated with openstack identity in NSX"""
# Note - this operation is supported even if the feature is disabled
nsx_trust = get_nsx_trust_management(**kwargs)
ids = nsx_trust.get_identities(cert_utils.NSX_OPENSTACK_IDENTITY)
if not ids:
LOG.info("Principal identity %s not found",
cert_utils.NSX_OPENSTACK_IDENTITY)
return
LOG.info("Certificate(s) associated with principal identity %s\n",
cert_utils.NSX_OPENSTACK_IDENTITY)
cert = None
for identity in ids:
if 'certificate_id' in identity:
cert = nsx_trust.get_cert(identity['certificate_id'])
LOG.info(cert['pem_encoded'])
if not cert:
LOG.info("No certificates found")
registry.subscribe(generate_cert,
constants.CERTIFICATE,
shell.Operations.GENERATE.value)
registry.subscribe(show_cert,
constants.CERTIFICATE,
shell.Operations.SHOW.value)
registry.subscribe(delete_cert,
constants.CERTIFICATE,
shell.Operations.CLEAN.value)
registry.subscribe(import_cert,
constants.CERTIFICATE,
shell.Operations.IMPORT.value)
registry.subscribe(show_nsx_certs,
constants.CERTIFICATE,
shell.Operations.NSX_LIST.value)
|
nilq/baby-python
|
python
|
def distance(x, y):
return (x-y).norm(2,-1)
def invprod(x, y):
return 1/(((x*y).sigmoid()).sum(-1))
|
nilq/baby-python
|
python
|
import os
import cv2
import numpy as np
if __name__ == '__main__':
# 启动一个dicom server,用于接收来自X光机的dicom文件
from pydicom.uid import ImplicitVRLittleEndian
from pynetdicom import AE, debug_logger, evt
from pynetdicom.sop_class import XRayAngiographicImageStorage
from pynetdicom.sop_class import _VERIFICATION_CLASSES as VC
debug_logger()
def handle_store(event, storage_dir):
"""Handle EVT_C_STORE events."""
try:
os.makedirs(storage_dir, exist_ok=True)
except:
return 0xC001
ds = event.dataset
if len(ds.PixelData) == 2097152:
img = np.frombuffer(ds.PixelData, dtype=np.uint16)
img = (img.reshape((ds.Rows, ds.Columns)) / 256).astype(np.uint8)
elif len(ds.PixelData) == 3145728:
img = np.frombuffer(ds.PixelData, dtype=np.uint8)
img = img.reshape((ds.Rows, ds.Columns, 3))
else:
raise Exception('Not support pixel data format...')
img = np.rot90(img, 1) # TODO: -1 为实验室,1 为医院
bmp = os.path.join(storage_dir, ds.SOPInstanceUID + '.bmp')
print(bmp, 'saved...')
cv2.imwrite(bmp, img)
return 0x0000
handlers = [(evt.EVT_C_STORE, handle_store, ['static/data'])]
ae = AE()
ae.add_supported_context(XRayAngiographicImageStorage, ImplicitVRLittleEndian)
for key in VC:
ae.add_supported_context(VC[key])
print('server starting...')
ae.start_server(('0.0.0.0', 5104), block=True, evt_handlers=handlers)
|
nilq/baby-python
|
python
|
from app.data_models.relationship_store import Relationship, RelationshipStore
relationships = [
{
"list_item_id": "123456",
"to_list_item_id": "789101",
"relationship": "Husband or Wife",
},
{
"list_item_id": "123456",
"to_list_item_id": "ghijkl",
"relationship": "Husband or Wife",
},
]
def test_serialisation():
relationship_store = RelationshipStore(relationships)
assert relationship_store.serialize() == relationships
def test_deserialisation():
relationship_store = RelationshipStore(relationships)
assert Relationship(**relationships[0]) in relationship_store
assert len(relationship_store) == 2
def test_clear(): # pylint: disable=redefined-outer-name
relationship_store = RelationshipStore(relationships)
relationship_store.clear()
assert not relationship_store
assert relationship_store.is_dirty
def test_add_relationship():
relationship = Relationship(**relationships[0])
relationship_store = RelationshipStore()
relationship_store.add_or_update(relationship)
assert (
relationship_store.get_relationship(
relationship.list_item_id, relationship.to_list_item_id
)
== relationship
)
assert len(relationship_store) == 1
assert relationship_store.is_dirty
def test_add_relationship_that_already_exists():
relationship = relationships[0]
relationship_store = RelationshipStore([relationship])
relationship_store.add_or_update(Relationship(**relationship))
assert len(relationship_store) == 1
assert not relationship_store.is_dirty
def test_get_relationship():
relationship_store = RelationshipStore(relationships)
relationship = relationship_store.get_relationship(
list_item_id="123456", to_list_item_id="789101"
)
assert relationship
def test_get_relationship_that_doesnt_exist():
relationship_store = RelationshipStore(relationships)
relationship = relationship_store.get_relationship(
list_item_id="123456", to_list_item_id="yyyyyy"
)
assert not relationship
def test_remove_relationship():
relationship_store = RelationshipStore(relationships)
relationship_store.remove_relationship(
list_item_id="123456", to_list_item_id="789101"
)
assert relationship_store.is_dirty
assert len(relationship_store) == 1
def test_remove_relationship_that_doesnt_exist():
relationship_store = RelationshipStore(relationships)
relationship_store.remove_relationship(
list_item_id="123456", to_list_item_id="yyyyyy"
)
assert not relationship_store.is_dirty
assert len(relationship_store) == 2
def test_remove_id_in_multiple_relationships():
relationship_store = RelationshipStore(relationships)
relationship_store.remove_all_relationships_for_list_item_id("123456")
assert not relationship_store
assert relationship_store.is_dirty
def test_remove_id_in_single_relationship():
relationship_store = RelationshipStore(relationships)
relationship_store.remove_all_relationships_for_list_item_id("789101")
remaining_relationship = Relationship(**relationships[1])
assert len(relationship_store) == 1
assert (
relationship_store.get_relationship(
remaining_relationship.list_item_id, remaining_relationship.to_list_item_id
)
== remaining_relationship
)
assert relationship_store.is_dirty
def test_update_existing_relationship():
relationship_store = RelationshipStore(relationships)
relationship = Relationship(**relationships[0])
relationship.relationship = "test"
relationship_store.add_or_update(relationship)
assert len(relationship_store) == 2
updated_relationship = relationship_store.get_relationship(
relationship.list_item_id, relationship.to_list_item_id
)
assert updated_relationship.relationship == "test"
assert relationship_store.is_dirty
|
nilq/baby-python
|
python
|
import os
import gc
import gym
import random
import numpy as np
from collections import deque
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, epochs, state_dim, action_size=2, action_limit=1.):
super(Actor, self).__init__()
self.epochs = epochs
self.state_dim = state_dim
self.action_dim = action_size
self.action_lim = action_limit
''' softmax network '''
hidden_layers=[64, 32, 8]
modules = []
seq = [state_dim] + hidden_layers
for in_dim, out_dim in zip(seq[: -1], seq[1:]):
modules.append(nn.Linear(in_dim, out_dim))
modules.append(nn.ReLU())
self.hidden = nn.Sequential(*seq)
self.out = nn.Linear(seq[-1], action_size)
self._init_weight()
def forward(self, state):
x = self.hidden(state)
x = self.out(x)
action = F.tanh(x)
action *= self.action_lim
return action
def _init_weight(self):
for m in self.hidden:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.01)
nn.init.normal_(self.softmax_in.weight)
nn.init.constant_(self.softmax_in.bias, 0.01)
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
s_layer = [64, 32, 8]
modules = []
seq = [state_dim] + s_layer
for in_dim, out_dim in zip(seq[: -1], seq[1:]):
modules.append(nn.Linear(in_dim, out_dim))
modules.append(nn.ReLU())
self.s_hidden = nn.Sequential(*seq)
s_layer = [64, 32, 8]
modules = []
seq = [state_dim] + s_layer
for in_dim, out_dim in zip(seq[: -1], seq[1:]):
modules.append(nn.Linear(in_dim, out_dim))
modules.append(nn.ReLU())
self.s_hidden = nn.Sequential(*seq)
a_layer = [32, 8]
modules = []
seq = [action_dim] + s_layer
for in_dim, out_dim in zip(seq[: -1], seq[1:]):
modules.append(nn.Linear(in_dim, out_dim))
modules.append(nn.ReLU())
self.a_hidden = nn.Sequential(*seq)
self.out = nn.Linear(a_layer[-1] + s_layer[-1], 1)
self._init_weight()
def _init_weight(self):
for m in self.s_hidden:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.01)
for m in self.a_hidden:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.01)
nn.init.normal_(self.out.weight)
nn.init.constant_(self.out.bias, 0.01)
def forward(self, state, action):
'''
Q(s, a)
'''
s = self.s_hidden(state)
a = self.a_hidden(action)
x = torch.cat((s, a), dim=1)
x = self.out(x)
return x
class Noise(object):
"""
implement ornstein-uhlenbeck noise
Example:
>>> no = Noise(1)
>>> states = []
>>> for i in range(1000):
... states.append(no.sample())
>>> import matplotlib.pyplot as plt
>>> plt.plot(states)
>>> plt.show()
"""
def __init__(self, action_dim, mu=0, theta=0.15, sigma=0.2):
self.action_dim = action_dim
self.mu = mu
self.theta = theta
self.sigma = sigma
self.X = mu * np.ones(action_dim)
def reset(self):
self.X = np.ones(self.action_dim) * self.mu
def sample(self):
dx = self.theta * (self.mu - self.X)
dx += self.sigma * np.random.randn(len(self.X))
self.X += dx
return self.X
class Trainer(object):
def __init__(self, buffer, state_dim, action_dim, action_limit, batch_size=128, lr=0.001, gamma=0.99, tau=0.001):
self.state_dim = state_dim
self.action_dim = action_dim
self.action_lim = action_limit
self.buffer = buffer
self.iter = 0
self.batch_size = batch_size
self.tau = tau
self.gamma = gamma
self.noise = Noise(action_dim)
self.actor = Actor(state_dim, action_dim, action_limit)
self.target_actor = Actor(state_dim, action_dim, action_limit)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr)
self.critic = Critic(state_dim, action_dim)
self.target_critic = Critic(state_dim, action_dim)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr)
self._update(self.target_actor, self.actor)
self._update(self.target_critic, self.critic)
def _update(self, tar, src):
for tar_param, param in zip(tar.parameters(), src.parameters()):
tar_param.data.copy_(param.data)
def _soft_update(self, tar, src):
for target_param, param in zip(tar.parameters(), src.parameters()):
target_param.data.copy_(
target_param.data * (1 - self.tau) + param.data * self.tau
)
def get_exploitation_action(self, state):
state = torch.from_numpy(state)
action = self.target_actor.forward(state).detach()
return action.data.numpy()
def get_exploration_action(self, state):
state = torch.from_numpy(state)
action = self.actor.forward(state).detach()
new_action = action.data.numpy() + (self.noise.sample() * self.action_lim)
return new_action
def optimize(self):
s1, a1, r1, s2 = self.buffer.sample(self.batch_size)
s1 = torch.from_numpy(s1)
a1 = torch.from_numpy(a1)
r1 = torch.from_numpy(r1)
s2 = torch.from_numpy(s2)
''' optimize critic '''
a2 = self.target_actor.forward(s2).detach()
next_val = torch.squeeze(self.target_critic.forward(s2, a2).detach())
val_expected = r1 + self.gamma * next_val
val_predicted = torch.squeeze(self.critic.forward(s1, a1))
critic_loss = F.mse_loss(val_predicted, val_expected)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
''' optimize actor '''
pred_a1 = self.actor.forward(s1)
actor_loss = -1 * torch.sum(self.critic.forward(s1, pred_a1))
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self._soft_update(self.target_actor, self.actor)
self._soft_update(self.target_critic, self.critic)
if self.iter % 100 == 0:
print(f'Iteration :- {self.iter}, Loss_actor :- {actor_loss.data.numpy()}, Loss_critic :- {critic_loss.data.numpy()}')
self.iter += 1
def save(self, eps_cnt):
if not os.path.exists('./model/'):
os.makedirs('./model/')
torch.save(self.target_actor.state_dict(), f'./model/{eps_cnt}_actor.pt')
torch.save(self.target_critic.state_dict(), f'./model/{eps_cnt}_critic.pt')
print('Models saved successfully')
def load(self, eps_cnt):
self.actor.load_state_dict(torch.load(f'./model/{eps_cnt}_actor.pt'))
self.critic.load_state_dict(torch.load(f'./model/{eps_cnt}_critic.pt'))
self._update(self.target_actor, self.actor)
self._update(self.target_critic, self.critic)
print('Models loaded successfully')
class Buffer(object):
def __init__(self, size):
self.buffer = deque(maxlen=size)
self.max_size = size
self.len = 0
def sample(self, cnt):
"""
samples a random batch from the replay memory buffer
:param cnt: batch size
:return: batch (numpy array)
"""
batch = []
cnt = min(cnt, self.len)
s_arr = np.float32([arr[0] for arr in batch])
a_arr = np.float32([arr[1] for arr in batch])
r_arr = np.float32([arr[2] for arr in batch])
s1_arr = np.float32([arr[3] for arr in batch])
return s_arr, a_arr, r_arr, s1_arr
def add(self, s, a, r, s1):
"""
add a particular transaction in the memory buffer
:param s: current state
:param a: action taken
:param r: reward received
:param s1: next state
"""
transaction = (s, a, r, s1)
self.len += 1
if self.len > self.max_size:
self.len = self.max_size
self.buffer.append(transaction)
def length(self):
return self.len
if __name__ == '__main__':
max_episodes = 400
# state_dim = 10
# action_dim = 2
# action_max = 1
max_step = 1000
env = gym.make('BipedalWalker-v2')
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_max = env.action_space.high[0]
print(
f'State Dimension : {state_dim}',
f'action Dimension : {action_dim}',
f'action limitation : {action_max}',
sep='\n'
)
ram = Buffer(max_episodes)
trainer = Trainer(ram, state_dim, action_dim, action_max)
for eps in range(max_episodes):
observation = env.reset()
print(f'[EPISODE {eps}]')
for r in range(max_step):
state = np.float32(observation)
action = trainer.get_exploration_action(state)
new_observation, reward, done, info = env.step(action)
if done:
new_state = None
else:
new_state = np.float32(new_observation)
# push this experience in ram
ram.add(state, action, reward, new_state)
observation = new_observation
trainer.optimize()
if done:
break
gc.collect()
if eps % 100 == 0:
trainer.save(eps)
print('Complete!')
|
nilq/baby-python
|
python
|
#All MPOS
MPOS = {"Abilene": {"Jones": "253", "Taylor": "441"},
"Amarillo": {"Potter": "375", "Randall": "381"},
"Brownsville": {"Cameron": "061"},
"Bryan-College Station": {"Brazos": "041"},
"Capital Area": {"Bastrop": "021", "Burnet": "053", "Caldwell": "055", "Hays": "209", "Travis": "453", "Williamson": "491"},
"Corpus Christi": {"Aransas": "007", "Nueces": "355", "San Patricio": "409"},
"El Paso": {"Atascosa": "013", "El Paso": "141"},
"Harlingen-San Benito": {"Cameron": "061"},
"Hidalgo": {"Hidalgo": "215"},
"Killeen-Temple": {"Bell": "027", "Coryell": "099", "Lampasas": "281" },
"Laredo": {"Webb": "479"},
"Longview": {"Gregg": "183", "Harrison": "203", "Rusk": "401", "Upshur": "459"},
"LRGV": {"Cameron": "061", "Hidalgo": "215"},
"Lubbock": {"Lubbock": "303"},
"Midland-Odessa": {"Ector": "135", "Midland": "329"},
"San Angelo": {"Tom Green": "451"},
"Sherman-Denison": {"Grayson": "181"},
"South East Texas": {"Hardin": "199", "Jefferson": "245", "Orange": "361"},
"Texarkana": {"Bowie": "037", "Comal": "091"},
"Victoria": {"Victoria": "469"},
"Waco": {"McLennan": "309"},
"Witchita Falls": {"Archer": "009", "Wichita": "485"}
}
|
nilq/baby-python
|
python
|
from drivers import *
print "Driver loaded"
from drivers.nidaq.asserv import Asserv
from PyDAQmx import *
import numpy as np
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import sys
default_fm_dev = 400 # Profondeur de modulation (Hz pour 5 V)
fs = E8254A(gpibAdress=19,name="freqSynth")
default_frequency = fs.frequency
sampling_rate = 1e6 # Hz
modulation_frequency = 271 # Hz
cycle_number = 50 # Number of cycles between fc correction
n_samples_per_cycle = int(sampling_rate/(modulation_frequency*2))*2 #Make sure that this is divisible by 2
modulation_frequency = sampling_rate/n_samples_per_cycle
discarded_samples = n_samples_per_cycle/4
gain = 100000
amplitude = 1 # V
waveform = np.hstack([-amplitude *np.ones(n_samples_per_cycle/2),
amplitude *np.ones(n_samples_per_cycle/2)])
# dds_frequency = default_frequency
asserv = Asserv(dds_frequency=default_frequency, gain = gain, device="Dev2",outChan="ao2",inChanList=["ai0"],numSamp=n_samples_per_cycle,nbSampCropped=discarded_samples,vpp=2*amplitude,freq=sampling_rate,inRange=(-5.,5.),outRange=(-10.,10.), waveform =waveform, cycle_number=cycle_number)
app = QtGui.QApplication([])
win = pg.GraphicsWindow()
win.resize(1000,600)
win.setWindowTitle('Pyqtgraph : Live NIDAQmx data')
pg.setConfigOptions(antialias=True)
p1 = win.addPlot(title="correction_DDS", col = 0, row = 0)
p1.addLegend()
p2 = win.addPlot(title="error signal", col = 0, row = 1)
p2.addLegend()
p3 = win.addPlot(title="laser power", col = 0, row = 2)
p3.addLegend()
p4 = win.addPlot(title="aux photodiode", col = 0, row = 3)
p4.addLegend()
p5 = win.addPlot(title="therminstance", col = 0, row = 4)
p5.addLegend()
curve = p1.plot(pen = 'm', name = 'DDS_freq')
curve2 = p2.plot(pen = 'c', name = 'error_signal')
curve3 = p3.plot(pen = 'r', name = 'transmitted_power')
curve4 = p4.plot(pen = 'g', name = 'aux photodiode')
curve5 = p5.plot(pen = 'y', name = 'thermistance')
def update() :
x, y1, y2, y3, y4, y5 = asserv.graph[0], asserv.graph[1], asserv.graph[2], asserv.graph[3], asserv.graph[4], asserv.graph[5]
curve.setData(x=x, y=y1)
curve2.setData(x=x, y=y2)
curve3.setData(x=x, y=y3)
curve4.setData(x=x, y=y4)
curve5.setData(x=x, y=y5)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
asserv.start()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
ret = QtGui.QApplication.instance().exec_()
print "Closing"
asserv.stop()
sys.exit(ret)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright 2018 Brian T. Park <brian@xparks.net>
#
# MIT License
#
"""Monitor the output of the given serial port and echo the output to the
STDOUT. If nothing is seen on the serial output for more than 10 seconds, an
error message is printed.
If the --test flag is given, the output is assumed to come from an AUnit unit
test, and the script validates that the test ran successfully. The script exits
with a status 0 if the test is successful, otherwise exits with a status 1.
Usage:
serial_monitor.py [--help] [--log_level] [--list | --test | --monitor)
[--port /dev/ttyPort] [--baud 115200] [--eof eof]
Flags:
--list List the known tty ports. (default)
--monitor Monitor the serial port and echo the lines to the STDOUT.
--test Verify an AUnit test suite.
--port {tty} Set the tty port.
--baud {baud} Set the baud rate.
--log_level (INFO|DEBUG|ERROR) Set the logging level.
--eof eof The End-of-File string marker.
"""
import argparse
import serial
import serial.tools.list_ports
import logging
import re
from time import sleep
# Logging message format.
LOG_FORMAT = '%(asctime)s %(levelname)s %(name)s: %(message)s'
# Logging date format.
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S%z'
# Time out after this many seconds if the serial port produces no output.
TIMEOUT_ON_IDLE = 10
# Starting point of the number of seconds to wait for the serial port.
# Actual wait time increases using exponential back off.
WAIT_TIME_BASE = 1
# Number attempts to try opening the serial port.
NUM_ATTEMPTS = 4
# Regular expressions that match the start and end of an AUnit test run.
TEST_START_RE = re.compile(r'TestRunner started')
TEST_END_RE = re.compile(r'TestRunner summary.*(\d+) failed.*(\d+) timed out')
# Constants for the test_mode finite state machine
TEST_MODE_UNKNOWN = 0
TEST_MODE_START_FOUND = 1
TEST_MODE_END_SUMMARY_FOUND = 2
def monitor(port, baud, eof, timeout):
"""Read the serial output and echo the lines to the STDOUT."""
logging.info('Reading the serial port %s at %s baud' % (port, baud))
ser = open_port(port, baud, timeout)
logging.info('Monitoring port %s...' % port)
try:
while True:
line = ser.readline()
line = line.decode('ascii')
if line == '':
logging.error(
f"No output detected after {timeout} seconds... exiting."
)
break
line = line.rstrip()
print(line)
if eof and eof in line:
# The line with eof is *included* in the output.
logging.info(f"Detected '{eof}' EOF string... exiting.")
break
finally:
ser.close()
def validate_test(port, baud, timeout):
"""Read and verify an AUnit test looking and matching specific lines from
the TestRunner of AUnit in the serial output.
"""
logging.info('Reading the AUnit test on serial port %s at %s baud' %
(port, baud))
ser = open_port(port, baud, timeout)
try:
summary_line = ''
test_mode = TEST_MODE_UNKNOWN
while True:
line = ser.readline()
line = line.decode('ascii')
if line == '': break
line = line.rstrip()
print(line)
if test_mode == TEST_MODE_UNKNOWN:
match = TEST_START_RE.match(line)
if match:
test_mode = TEST_MODE_START_FOUND
continue
match = TEST_END_RE.match(line)
if match:
logging.error("Found 'TestRunner summary' " +
"without 'TestRunner started'")
break
elif test_mode == TEST_MODE_START_FOUND:
match = TEST_START_RE.match(line)
if match:
logging.error("Unexpected 'TestRunner started'")
break
match = TEST_END_RE.match(line)
if match:
test_mode = TEST_MODE_END_SUMMARY_FOUND
summary_line = line
break
finally:
ser.close()
if test_mode != TEST_MODE_END_SUMMARY_FOUND:
raise Exception('No output detected after 10 seconds... exiting.')
if summary_line:
match = TEST_END_RE.match(line)
if match:
num_failed = match.group(1)
num_expired = match.group(2)
if num_failed != '0' or num_expired != '0':
raise Exception('Found %s failed and/or %s timed out' %
(num_failed, num_expired))
else:
raise Exception('Unexpected TestRunner output')
# See https://stackoverflow.com/questions/12090503
def list_ports():
"""List the available serial ports."""
for comport in serial.tools.list_ports.comports():
print(comport)
def open_port(port, baud, timeout):
"""Open the given port. Boards like Teensy, Leonardo, and Micro do not
create a virtual serial port until the Arduino program runs, so we make
multiple attempts (NUM_ATTEMPTS) to open the port using an exponential back
off wait time.
"""
wait_time = WAIT_TIME_BASE
count = 1
ser = serial.Serial(port=None, baudrate=baud, timeout=timeout)
ser.port = port
while True:
try:
logging.info('Opening serial port %s' % port)
ser.open()
break
except:
if count >= NUM_ATTEMPTS:
break
logging.info('Failed... waiting %s seconds to retry...' %
wait_time)
sleep(wait_time)
count += 1
wait_time *= 1.5
if not ser.is_open:
raise Exception('Unable to open serial port %s after %s attempts' %
(port, NUM_ATTEMPTS))
return ser
def main():
parser = argparse.ArgumentParser(
description='Read the given Arduino serial port')
parser.add_argument(
'--log_level', action='store', default='DEBUG', help='Logging level')
parser.add_argument(
'--port', action='store', default='/dev/ttyUSB0', help='port')
parser.add_argument(
'--baud', action='store', default='115200', help='baud')
parser.add_argument(
'--list',
action='store_true',
help='List the available ports (default)')
parser.add_argument(
'--test', action='store_true', help='Verify an AUnit test')
parser.add_argument(
'--monitor', action='store_true', help='Monitor the serial port')
parser.add_argument(
'--eof', action='store', default='', help='End of File string')
parser.add_argument(
'--timeout',
action='store',
default=TIMEOUT_ON_IDLE,
help='End of File string')
args = parser.parse_args()
# Configure logging.
logging.basicConfig(
level=args.log_level, format=LOG_FORMAT, datefmt=DATE_FORMAT)
if args.monitor:
monitor(args.port, args.baud, args.eof, args.timeout)
elif args.test:
validate_test(args.port, args.baud, args.timeout)
else:
list_ports()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for computations.py (and __init__.py)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.core import api as tff
class ComputationsTest(test.TestCase):
def test_tf_comp_first_mode_of_usage_as_non_polymorphic_wrapper(self):
# Wrapping a lambda with a parameter.
foo = tff.tf_computation(lambda x: x > 10, tf.int32)
self.assertEqual(str(foo.type_signature), '(int32 -> bool)')
self.assertEqual(foo(9), False)
self.assertEqual(foo(11), True)
# Wrapping an existing Python function with a parameter.
bar = tff.tf_computation(tf.add, (tf.int32, tf.int32))
self.assertEqual(str(bar.type_signature), '(<int32,int32> -> int32)')
# Wrapping a no-parameter lambda.
baz = tff.tf_computation(lambda: tf.constant(10))
self.assertEqual(str(baz.type_signature), '( -> int32)')
self.assertEqual(baz(), 10)
# Wrapping a no-parameter Python function.
def bak_fn():
return tf.constant(10)
bak = tff.tf_computation(bak_fn)
self.assertEqual(str(bak.type_signature), '( -> int32)')
self.assertEqual(bak(), 10)
def test_tf_fn_with_variable(self):
@tff.tf_computation
def read_var():
v = tf.Variable(10, name='test_var')
return v
self.assertEqual(read_var(), 10)
def test_tf_comp_second_mode_of_usage_as_non_polymorphic_decorator(self):
# Decorating a Python function with a parameter.
@tff.tf_computation(tf.int32)
def foo(x):
return x > 10
self.assertEqual(str(foo.type_signature), '(int32 -> bool)')
self.assertEqual(foo(9), False)
self.assertEqual(foo(10), False)
self.assertEqual(foo(11), True)
# Decorating a no-parameter Python function.
@tff.tf_computation
def bar():
return tf.constant(10)
self.assertEqual(str(bar.type_signature), '( -> int32)')
self.assertEqual(bar(), 10)
def test_tf_comp_with_sequence_inputs_and_outputs_does_not_fail(self):
@tff.tf_computation(tff.SequenceType(tf.int32))
def _(x):
return x
def test_with_sequence_of_pairs(self):
pairs = tf.data.Dataset.from_tensor_slices(
(list(range(5)), list(range(5, 10))))
@tff.tf_computation
def process_pairs(ds):
return ds.reduce(0, lambda state, pair: state + pair[0] + pair[1])
self.assertEqual(process_pairs(pairs), 45)
def test_with_four_element_dataset_pipeline(self):
@tff.tf_computation
def comp1():
return tf.data.Dataset.range(5)
@tff.tf_computation(tff.SequenceType(tf.int64))
def comp2(ds):
return ds.map(lambda x: tf.cast(x + 1, tf.float32))
@tff.tf_computation(tff.SequenceType(tf.float32))
def comp3(ds):
return ds.repeat(5)
@tff.tf_computation(tff.SequenceType(tf.float32))
def comp4(ds):
return ds.reduce(0.0, lambda x, y: x + y)
@tff.tf_computation
def comp5():
return comp4(comp3(comp2(comp1())))
self.assertEqual(comp5(), 75.0)
def test_tf_comp_third_mode_of_usage_as_polymorphic_callable(self):
# Wrapping a lambda.
foo = tff.tf_computation(lambda x: x > 0)
self.assertEqual(foo(-1), False)
self.assertEqual(foo(0), False)
self.assertEqual(foo(1), True)
# Decorating a Python function.
@tff.tf_computation
def bar(x, y):
return x > y
self.assertEqual(bar(0, 1), False)
self.assertEqual(bar(1, 0), True)
self.assertEqual(bar(0, 0), False)
def test_fed_comp_typical_usage_as_decorator_with_unlabeled_type(self):
@tff.federated_computation((tff.FunctionType(tf.int32, tf.int32), tf.int32))
def foo(f, x):
assert isinstance(f, tff.Value)
assert isinstance(x, tff.Value)
assert str(f.type_signature) == '(int32 -> int32)'
assert str(x.type_signature) == 'int32'
result_value = f(f(x))
assert isinstance(result_value, tff.Value)
assert str(result_value.type_signature) == 'int32'
return result_value
self.assertEqual(
str(foo.type_signature), '(<(int32 -> int32),int32> -> int32)')
@tff.tf_computation(tf.int32)
def third_power(x):
return x**3
self.assertEqual(foo(third_power, 10), int(1e9))
self.assertEqual(foo(third_power, 1), 1)
def test_fed_comp_typical_usage_as_decorator_with_labeled_type(self):
@tff.federated_computation((
('f', tff.FunctionType(tf.int32, tf.int32)),
('x', tf.int32),
))
def foo(f, x):
return f(f(x))
@tff.tf_computation(tf.int32)
def square(x):
return x**2
@tff.tf_computation(tf.int32, tf.int32)
def square_drop_y(x, y): # pylint: disable=unused-argument
return x * x
self.assertEqual(
str(foo.type_signature), '(<f=(int32 -> int32),x=int32> -> int32)')
self.assertEqual(foo(square, 10), int(1e4))
self.assertEqual(square_drop_y(square_drop_y(10, 5), 100), int(1e4))
self.assertEqual(square_drop_y(square_drop_y(10, 100), 5), int(1e4))
with self.assertRaisesRegexp(TypeError,
'is not assignable from source type'):
self.assertEqual(foo(square_drop_y, 10), 100)
def test_with_tf_datasets(self):
@tff.tf_computation(tff.SequenceType(tf.int64))
def foo(ds):
return ds.reduce(np.int64(0), lambda x, y: x + y)
self.assertEqual(str(foo.type_signature), '(int64* -> int64)')
@tff.tf_computation
def bar():
return tf.data.Dataset.range(10)
self.assertEqual(str(bar.type_signature), '( -> int64*)')
self.assertEqual(foo(bar()), 45)
def test_no_argument_fed_comp(self):
@tff.federated_computation
def foo():
return 10
self.assertEqual(str(foo.type_signature), '( -> int32)')
self.assertEqual(foo(), 10)
if __name__ == '__main__':
test.main()
|
nilq/baby-python
|
python
|
import pandas as pd
pd.options.display.max_columns = None
from sklearn.preprocessing import OrdinalEncoder
from torchvision import datasets, transforms
import torch
import plotly.express as px
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from utils.dataset import NumpyDataset, TorchDataSet
class MNISTData(TorchDataSet):
def __init__(self, split=False, normalize=False, shuffle=True, seed=None):
X, y = self.get_X_y()
super().__init__(X=X, y=y, one_hot_target=False, normalize=normalize, split=split, dataloader_shuffle=shuffle, seed=seed, label_type='categoric')
# self.get_tensors()
def get_X_y(self):
mnist_train = datasets.MNIST(root="./mnist-model/datasets/mnist_train",
download=True, train=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
mnist_test = datasets.MNIST(root="./mnist-model/datasets/mnist_test",
download=True, train=False,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
X = mnist_train.data
X = torch.cat((X, mnist_test.data), dim=0).reshape(-1, 1, 28, 28)
y = mnist_train.targets
y = torch.cat((y, mnist_test.targets), dim=0)
return X.detach().numpy(), y.detach().numpy()
if __name__ == "__main__":
mnist = MNISTData(split=True, normalize=True)
X = mnist.X
print(X.shape)
print(X.dtype)
print(torch.unique(mnist.y))
print(mnist.y_sets[0].shape)
|
nilq/baby-python
|
python
|
import asyncio
import logging
from ottoengine import const, helpers
from ottoengine.model import dataobjects
_LOG = logging.getLogger(__name__)
# _LOG.setLevel(logging.DEBUG)
class RuleActionItem(object):
""" This is a single action step in an action sequence """
def get_dict_config(self) -> dict:
# This will be overridden by the subclasses
raise NotImplementedError("get_dict_config was not properly overridden")
def serialize(self) -> dict:
# This MAY be overridden by the subclass to accomodate special handling
return self.get_dict_config()
async def async_execute(self, engine) -> bool:
'''Runs the action.
Returns True if action was successful.
Returns False if the action was unsuccessful.
'''
# This will be overridden by the subclasses
raise NotImplementedError("async_execute was not properly overridden")
class ServiceAction(RuleActionItem):
# domain: light
# service: turn_on
# data:
# entity_id: group.bedroom
# brightness: 100
def __init__(self, domain, service, entity_id=None, data_dict={}):
self._domain = domain
self._service = service # string
self._data_dict = data_dict # {} dictionary
if entity_id is not None:
self._data_dict["entity_id"] = entity_id
# Override
async def async_execute(self, engine):
_LOG.info("Service called - domain: {}, service: {}, data: {}".format(
self._domain, self._service, self._data_dict)
)
await engine.call_service(
dataobjects.ServiceCall(self._domain, self._service, self._data_dict)
)
return True
@staticmethod
def from_dict(dict_obj):
# j = json
# kwargs = {
# "domain": j['domain'],
# "service": j["service"]
# }
# if "data" in j:
# kwargs["data"] = j["data"]
# return ServiceAction(**kwargs)
domain = dict_obj.get(const.DOMAIN)
service = dict_obj.get(const.SERVICE)
data = dict_obj.get(const.DATA, [])
return ServiceAction(domain, service, data_dict=data)
# Override
def get_dict_config(self) -> dict:
d = {
"domain": self._domain,
"service": self._service,
}
if self._data_dict:
d["data"] = self._data_dict
return d
class ConditionAction(RuleActionItem):
# This is just a condition object
def __init__(self, condition_obj):
self._condition_obj = condition_obj
# No from_dict function since this is just a condition object
# We use the _condition_from_dict() function in persistence.py instead
# Override
async def async_execute(self, engine):
'''Tests the condition. Returns the result of the test'''
result = False
if self._condition_obj.evaluate(engine):
result = True
_LOG.info("Condition action is {}: {}".format(result, self._condition_obj.serialize()))
return result
# Override
def get_dict_config(self) -> dict:
return self._condition_obj.get_condition_config()
class DelayAction(RuleActionItem):
# delay: 00:01:30
def __init__(self, delay_delta):
self._delay_delta = delay_delta # datetime.timedelta
# Override
async def async_execute(self, engine):
delay_secs = self._delay_delta.total_seconds()
_LOG.info("Delay action for {} seconds".format(delay_secs))
await asyncio.sleep(delay_secs)
return True
@staticmethod
def from_dict(json):
return DelayAction(helpers.hms_string_to_timedelta(json["delay"]))
# Override
def get_dict_config(self) -> dict:
# To re-create: timedelta(days, seconds, microseconds)
return {
"delay": helpers.timedelta_to_hms_string(self._delay_delta)
}
class LogAction(RuleActionItem):
# log_message: message
def __init__(self, message):
self._message = message
@staticmethod
def from_dict(json):
return LogAction(json.get("log_message"))
# Overrides
async def async_execute(self, engine):
_LOG.info("LogAction: {}".format(self._message))
return True
def get_dict_config(self) -> dict:
return {"log_message": self._message}
|
nilq/baby-python
|
python
|
import tests2 as t
t.testing(method = 'KIR', initial = 'sin', velocity = 'const')
t.testing(method = 'KIR', initial = 'sin', velocity = 'x')
t.testing(method = 'KIR', initial = 'sin', velocity = 'func')
t.testing(method = 'KIR', initial = 'peak', velocity = 'const')
t.testing(method = 'KIR', initial = 'peak', velocity = 'x')
t.testing(method = 'KIR', initial = 'peak', velocity = 'func')
t.testing(method = 'KIR', initial = 'rectangle', velocity = 'const')
t.testing(method = 'KIR', initial = 'rectangle', velocity = 'x')
t.testing(method = 'KIR', initial = 'rectangle', velocity = 'func')
t.testing(method = 'McCormack', initial = 'sin', velocity = 'const')
t.testing(method = 'McCormack', initial = 'sin', velocity = 'x')
t.testing(method = 'McCormack', initial = 'sin', velocity = 'func')
t.testing(method = 'McCormack', initial = 'peak', velocity = 'const')
t.testing(method = 'McCormack', initial = 'peak', velocity = 'x')
t.testing(method = 'McCormack', initial = 'peak', velocity = 'func')
t.testing(method = 'McCormack', initial = 'rectangle', velocity = 'const')
t.testing(method = 'McCormack', initial = 'rectangle', velocity = 'x')
t.testing(method = 'McCormack', initial = 'rectangle', velocity = 'func')
t.testing(method = 'Beam-Warming', initial = 'sin', velocity = 'const')
t.testing(method = 'Beam-Warming', initial = 'sin', velocity = 'x')
t.testing(method = 'Beam-Warming', initial = 'sin', velocity = 'func')
t.testing(method = 'Beam-Warming', initial = 'peak', velocity = 'const')
t.testing(method = 'Beam-Warming', initial = 'peak', velocity = 'x')
t.testing(method = 'Beam-Warming', initial = 'peak', velocity = 'func')
t.testing(method = 'Beam-Warming', initial = 'rectangle', velocity = 'const')
t.testing(method = 'Beam-Warming', initial = 'rectangle', velocity = 'x')
t.testing(method = 'Beam-Warming', initial = 'rectangle', velocity = 'func')
t.testing(method = 'Lax-Wendroff', initial = 'sin', velocity = 'const')
t.testing(method = 'Lax-Wendroff', initial = 'sin', velocity = 'x')
t.testing(method = 'Lax-Wendroff', initial = 'sin', velocity = 'func')
t.testing(method = 'Lax-Wendroff', initial = 'peak', velocity = 'const')
t.testing(method = 'Lax-Wendroff', initial = 'peak', velocity = 'x')
t.testing(method = 'Lax-Wendroff', initial = 'peak', velocity = 'func')
t.testing(method = 'Lax-Wendroff', initial = 'rectangle', velocity = 'const')
t.testing(method = 'Lax-Wendroff', initial = 'rectangle', velocity = 'x')
t.testing(method = 'Lax-Wendroff', initial = 'rectangle', velocity = 'func')
t.testing(method = 'Fedorenko', initial = 'sin', velocity = 'const')
t.testing(method = 'Fedorenko', initial = 'sin', velocity = 'x')
t.testing(method = 'Fedorenko', initial = 'sin', velocity = 'func')
t.testing(method = 'Fedorenko', initial = 'peak', velocity = 'const')
t.testing(method = 'Fedorenko', initial = 'peak', velocity = 'x')
t.testing(method = 'Fedorenko', initial = 'peak', velocity = 'func')
t.testing(method = 'Fedorenko', initial = 'rectangle', velocity = 'const')
t.testing(method = 'Fedorenko', initial = 'rectangle', velocity = 'x')
t.testing(method = 'Fedorenko', initial = 'rectangle', velocity = 'func')
t.testing(method = 'Rusanov', initial = 'sin', velocity = 'const')
t.testing(method = 'Rusanov', initial = 'sin', velocity = 'x')
t.testing(method = 'Rusanov', initial = 'sin', velocity = 'func')
t.testing(method = 'Rusanov', initial = 'peak', velocity = 'const')
t.testing(method = 'Rusanov', initial = 'peak', velocity = 'x')
t.testing(method = 'Rusanov', initial = 'peak', velocity = 'func')
t.testing(method = 'Rusanov', initial = 'rectangle', velocity = 'const')
t.testing(method = 'Rusanov', initial = 'rectangle', velocity = 'x')
t.testing(method = 'Rusanov', initial = 'rectangle', velocity = 'func')
|
nilq/baby-python
|
python
|
import numpy as np
import tqdm
def add_iteration_column_np(df):
"""
Only used for numerical integral timings, but perhaps also useful for other timings with some
adaptations. Adds iteration information, which can be deduced from the order, ppid, num_cpu
and name (because u0_int is only done once, we have to add a special check for that).
"""
iteration = np.empty(len(df), dtype='int64')
it = 0
N_names = len(df.name.unique())
# local_N_num_int is the number of numerical integrals in the local (current) iteration
# it determines after how long the next iteration starts
local_N_num_int = df.num_cpu.iloc[0] * N_names
# the current iteration starts here:
current_iteration_start = 0
current_ppid = df.ppid.iloc[0]
for irow, row in tqdm.tqdm(enumerate(df.itertuples())):
# for irow in tqdm.tqdm(range(len(df))):
# if current_ppid != df.ppid.iloc[irow] or ((irow - current_iteration_start) == local_N_num_int):
if current_ppid != row.ppid or ((irow - current_iteration_start) == local_N_num_int):
# current_ppid = df.ppid.iloc[irow]
current_ppid = row.ppid
current_iteration_start = irow
it += 1
# num_cpu = df.num_cpu.iloc[irow]
num_cpu = row.num_cpu
local_N_names = len(df[irow:irow + N_names * num_cpu].name.unique())
local_N_num_int = num_cpu * local_N_names
iteration[irow] = it
# if (irow + 1) % local_N_num_int == 0:
# it += 1
df['iteration'] = iteration
# following stuff thanks to Carlos, Janneke, Atze, Berend and Lourens for discussion and suggestions on Slack:
class IterationGrouper:
"""
N.B.: the used df must have a reset index!
Use df = df.reset_index(drop=True) if necessary before grouping with this
class.
"""
def __init__(self, df):
self._group_id = 0
self._count = {}
self._max = {}
self._df = df
def __call__(self, index):
row = self._df.iloc[index]
if row.name not in self._count:
self._max[row.name] = row.num_cpu
self._count[row.name] = 1
else:
if self._count[row.name] < self._max[row.name]:
self._count[row.name] += 1
else:
self._group_id += 1
self._count = {}
self._count[row.name] = 1
self._max[row.name] = row.num_cpu
return self._group_id
df_numints_selection0 = df_numints.iloc[:100000].copy()
df_numints_selection1 = df_numints.iloc[:100000].copy()
df_numints_selection2 = df_numints.iloc[:100000].copy().reset_index(drop=True)
load_timing.add_iteration_column(df_numints_selection0)
add_iteration_column_np(df_numints_selection1)
for it, (count, group) in enumerate(df_numints_selection2.groupby(IterationGrouper(df_numints_selection2))):
df_numints_selection2.set_value(group.index, 'iteration', it)
|
nilq/baby-python
|
python
|
"""
0.92%
"""
import collections
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = collections.deque()
self.minlist = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack.append(x)
self.minlist.append(x)
self.minlist = sorted(self.minlist)
def pop(self):
"""
:rtype: void
"""
p = self.stack.pop()
self.minlist.remove(p)
return p
def top(self):
"""
:rtype: int
"""
top = self.stack.pop()
self.stack.append(top)
return top
def getMin(self):
"""
:rtype: int
"""
return self.minlist[0] if self.minlist else None
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
|
nilq/baby-python
|
python
|
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It"s nice, because now 1) we have a top level
# README file and 2) it"s easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="kaggle_learn",
version="0.0.1",
author="Bangda Sun",
author_email="bangdasun94@gmail.com",
description=("Generic data science toolbox"),
license="MIT",
url="https://github.com/bangdasun/kaggle_learn",
# url="http://packages.python.org/an_example_pypi_project",
# packages=["an_example_pypi_project", "tests"],
long_description=read("README.md"),
install_requires=[
"numpy",
"pandas",
"scikit-learn",
"matplotlib",
"tensorflow",
"keras"
],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
|
nilq/baby-python
|
python
|
index = {'Halifax': 'Q2141',
'Los Angeles': 'Q65',
'Wilkesboro': 'Q1025995',
'New York': 'Q1384',
'Uvalde': 'Q868860',
'Saint James': 'Q7401398',
'Ottawa': 'Q1930',
'Newton': 'Q49196',
'Mahé':'Q277480',
'Milwaukee': 'Q37836',
'Pomona': 'Q486868',
'Pasco': 'Q844016',
'Triumph': 'Q7844478',
'United States': 'Q30',
'Canada': 'Q16',
'India': 'Q668',
'Trinidad and Tobago': 'Q754',
'acetaminophen': 'Q57055',
'aspirin': 'Q18216',
'ibuprofen': 'Q186969',
'naproxen': 'Q1215575',
'sertraline': 'Q407617'}
|
nilq/baby-python
|
python
|
num=input("enter any number")
if num > 0:
print("positive")
elif num < 0:
print("negative")
else:
print("it is a zero")
|
nilq/baby-python
|
python
|
import pyviz3d.visualizer as viz
import numpy as np
import math
def main():
v = viz.Visualizer()
v.add_arrow('Arrow_1', start=np.array([0, 0.2, 0]), end=np.array([1, 0.2, 0]))
v.add_arrow('Arrow_2', start=np.array([0, 0.5, 0.5]), end=np.array([0.5, 0, 0.5]), color=np.array([0, 0, 255]))
v.add_arrow('Arrow_3', start=np.array([0, 1, 0]), end=np.array([1, 1, 1]), color=np.array([30, 255, 50]),
alpha=0.5, stroke_width=0.04, head_width=0.1)
v.save('example_arrows')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn.metrics import confusion_matrix
import sys
def main():
print(sys.argv)
BlockId = sys.argv[1]
data = pd.read_csv('./model/upload/data.csv')
# data = pd.read_csv('./test_data/data.csv')
del data['Unnamed: 32']
# data = data[:50]
X = data.iloc[:, 2:].values
y = data.iloc[:, 1].values
labelencoder_X1 = LabelEncoder()
y = labelencoder_X1.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
model = Sequential()
model.add(Dense(16, activation='relu', input_dim=30))
model.add(Dropout(0.1))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
model.load_weights("./model/downloadedWeights/"+ BlockId +".h5")
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=100, epochs=5)
scores = model.evaluate(X_test, y_test)
print("Loss: ", scores[0]) #Loss
print("Accuracy: ", scores[1]) #Accuracy
#Saving Model
model.save("./output.h5")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# encoding=utf8
import jenkins_job_wrecker.modules.base
from jenkins_job_wrecker.helpers import get_bool, gen_raw
from jenkins_job_wrecker.modules.triggers import Triggers
PARAMETER_MAPPER = {
'stringparameterdefinition': 'string',
'booleanparameterdefinition': 'bool',
'choiceparameterdefinition': 'choice',
'textparameterdefinition': 'text',
'fileparameterdefinition': 'file',
}
class Properties(jenkins_job_wrecker.modules.base.Base):
component = 'properties'
def gen_yml(self, yml_parent, data):
parameters = []
properties = []
for child in data:
object_name = child.tag.split('.')[-1].lower()
object_name = object_name.replace('-', '').replace('_', '')
if object_name == 'parametersdefinitionproperty':
self.registry.dispatch(self.component, 'parameters', child, parameters)
continue
elif object_name == 'pipelinetriggersjobproperty':
# Pipeline scripts put triggers in properties section
trigger = Triggers(self.registry)
for grandchild in child:
# Find the triggers tag and then generate the yaml
if grandchild.tag == 'triggers':
trigger.gen_yml(yml_parent, grandchild)
continue
self.registry.dispatch(self.component, object_name, child, properties)
if len(properties) > 0:
yml_parent.append(['properties', properties])
if len(parameters) > 0:
yml_parent.append(['parameters', parameters])
def githubprojectproperty(top, parent):
github = {}
for child in top:
if child.tag == 'projectUrl':
github['url'] = child.text
elif child.tag == 'displayName':
pass
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'github': github})
def envinjectjobproperty(top, parent):
env_info = {}
for child in top:
if child.tag == 'info':
for grandchild in child:
if grandchild.tag == 'loadFilesFromMaster':
env_info['load-from-master'] = get_bool(grandchild.text)
elif grandchild.tag == 'groovyScriptContent':
if grandchild.text:
env_info['groovy-content'] = grandchild.text
elif grandchild.tag == 'secureGroovyScript':
for ggchild in grandchild:
if ggchild.tag == 'script':
if ggchild.text:
env_info['groovy-content'] = ggchild.text
elif ggchild.tag == 'sandbox':
# No support in jjb for this, fail quietly for
# this one
pass
else:
raise NotImplementedError("cannot handle XML %s" % ggchild.tag)
elif grandchild.tag == 'scriptContent':
if grandchild.text:
env_info['script-content'] = grandchild.text
elif grandchild.tag == 'scriptFilePath':
if grandchild.text:
env_info['script-file'] = grandchild.text
elif grandchild.tag == 'propertiesContent':
if grandchild.text:
env_info['properties-content'] = grandchild.text
elif grandchild.tag == 'propertiesFilePath':
if grandchild.text:
env_info['properties-file'] = grandchild.text
else:
raise NotImplementedError("cannot handle XML %s" % grandchild.tag)
elif child.tag == 'on':
env_info['enabled'] = get_bool(child.text)
elif child.tag == 'keepJenkinsSystemVariables':
env_info['keep-system-variables'] = get_bool(child.text)
elif child.tag == 'keepBuildVariables':
env_info['keep-build-variables'] = get_bool(child.text)
elif child.tag == 'overrideBuildParameters':
env_info['override-build-parameters'] = get_bool(child.text)
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'inject': env_info})
def parameters(top, parent):
for params in top:
if params.tag != 'parameterDefinitions':
raise NotImplementedError("cannot handle XML %s" % params.tag)
for param in params:
param_name = param.tag.split('.')[-1].lower()
if param_name not in PARAMETER_MAPPER:
gen_raw(param, parent)
continue
param_type = PARAMETER_MAPPER[param_name]
parameter = {}
for setting in param:
key = {'defaultValue': 'default'}.get(setting.tag, setting.tag)
if setting.text is None:
parameter[key] = ''
elif param_type == 'bool' and (setting.text == 'true' or setting.text == 'false'):
parameter[key] = (setting.text == 'true')
elif param_type == 'choice' and setting.tag == 'choices':
choices = []
for sub_setting in setting:
if(sub_setting.attrib['class'] == 'string-array'):
for item in sub_setting:
choices.append(item.text)
else:
raise NotImplementedError(sub_setting.attrib['class'])
parameter[key] = choices
else:
parameter[key] = setting.text
parent.append({param_type: parameter})
def throttlejobproperty(top, parent):
throttle = {}
for child in top:
if child.tag == 'maxConcurrentPerNode':
throttle['max-per-node'] = child.text
elif child.tag == 'maxConcurrentTotal':
throttle['max-total'] = child.text
elif child.tag == 'throttleOption':
throttle['option'] = child.text
elif child.tag == 'throttleEnabled':
throttle['enabled'] = get_bool(child.text)
elif child.tag == 'categories':
throttle['categories'] = []
elif child.tag == 'configVersion':
pass # assigned by jjb
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'throttle': throttle})
def slacknotifierslackjobproperty(top, parent):
slack = {}
notifications = {
"notifySuccess": "notify-success",
"notifyAborted": "notify-aborted",
"notifyNotBuilt": "notify-not-built",
"notifyUnstable": "notify-unstable",
"notifyFailure": "notify-failure",
"notifyBackToNormal": "notify-back-to-normal",
"notifyRepeatedFailure": "notify-repeated-failure"
}
for child in top:
if child.tag == 'teamDomain':
slack['team-domain'] = child.text
elif child.tag == 'token':
slack['token'] = child.text
elif child.tag == 'room':
slack['room'] = child.text
elif child.tag == 'includeTestSummary':
slack['include-test-summary'] = (child.text == 'true')
elif child.tag == 'showCommitList':
slack['show-commit-list'] = (child.text == 'true')
elif child.tag == 'includeCustomMessage':
slack['include-custom-message'] = (child.text == 'true')
elif child.tag == 'customMessage':
slack['custom-message'] = child.text
elif child.tag == 'startNotification':
slack['start-notification'] = (child.text == 'true')
elif child.tag in notifications:
slack[notifications[child.tag]] = (child.text == 'true')
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'slack': slack})
def builddiscarderproperty(top, parent):
discarder = {}
mapping = {'daysToKeep': 'days-to-keep',
'numToKeep': 'num-to-keep',
'artifactDaysToKeep': 'artifact-days-to-keep',
'artifactNumToKeep': 'artifact-num-to-keep'}
for child in top[0]:
discarder[mapping[child.tag]] = int(child.text)
parent.append({'build-discarder': discarder})
def disableconcurrentbuildsjobproperty(top, parent):
# Pipeline job specific tag.
# concurrent is false by default anyway, so just going to ignore it
# Check cli.py root_to_yaml func for more info
pass
def authorizationmatrixproperty(top, parent):
# mirror image of: https://opendev.org/jjb/jenkins-job-builder/src/commit/074985c7ff9360bb58be80ffab686746267f814f/jenkins_jobs/modules/properties.py#L530
credentials = 'com.cloudbees.plugins.credentials.CredentialsProvider.'
ownership = 'com.synopsys.arc.jenkins.plugins.ownership.OwnershipPlugin.'
permissions = {
''.join((credentials, 'Create')): 'credentials-create',
''.join((credentials, 'Delete')): 'credentials-delete',
''.join((credentials, 'ManageDomains')): 'credentials-manage-domains',
''.join((credentials, 'Update')): 'credentials-update',
''.join((credentials, 'View')): 'credentials-view',
'hudson.model.Item.Build': 'job-build',
'hudson.model.Item.Cancel': 'job-cancel',
'hudson.model.Item.Configure': 'job-configure',
'hudson.model.Item.Create': 'job-create',
'hudson.model.Item.Delete': 'job-delete',
'hudson.model.Item.Discover': 'job-discover',
'hudson.model.Item.ExtendedRead': 'job-extended-read',
'hudson.model.Item.Move': 'job-move',
'hudson.model.Item.Read': 'job-read',
'hudson.model.Item.ViewStatus': 'job-status',
'hudson.model.Item.Workspace': 'job-workspace',
''.join((ownership, 'Jobs')): 'ownership-jobs',
'hudson.model.Run.Delete': 'run-delete',
'hudson.model.Run.Replay': 'run-replay',
'hudson.model.Run.Update': 'run-update',
'hudson.scm.SCM.Tag': 'scm-tag'
}
authorization = {}
for child in top:
if child.tag == 'inheritanceStrategy':
class_ = child.get('class')
if class_ != 'org.jenkinsci.plugins.matrixauth.inheritance.InheritParentStrategy':
raise NotImplementedError('cannot handle inheritance strategy - not implemented in JJB')
elif child.tag == 'permission':
permission, name = child.text.split(':', 1)
if name not in authorization:
authorization[name] = []
authorization[name].append(permissions[permission])
else:
raise NotImplementedError('cannot handle XML {}'.format(child.tag))
parent.append({'authorization': authorization})
|
nilq/baby-python
|
python
|
import itertools
import pymel.core as pm
import flottitools.test as mayatest
import flottitools.utils.materialutils as matutils
import flottitools.utils.skeletonutils as skelutils
import flottitools.utils.skinutils as skinutils
class TestGetSkinCluster(mayatest.MayaTestCase):
def test_get_skin_cluster_from_cube(self):
cube = self.create_cube()
joint = self.create_joint()
skin_cluster = self.pm.skinCluster(joint, cube)
result = skinutils. get_skincluster(cube)
self.assertEqual(result, skin_cluster)
def test_get_from_shape_node(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
shape = test_cube.getShape()
result = skinutils.get_skincluster(shape)
self.assertEqual(test_skincluster, result)
def test_returns_none_if_no_skincluster(self):
test_cube = self.create_cube()
self.assertIsNone(skinutils.get_skincluster(test_cube))
def test_returns_none_if_no_shape(self):
test_node = self.create_transform_node()
self.assertIsNone(skinutils.get_skincluster(test_node))
def test_get_skin_cluster_from_vert(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
test_vert = test_cube.vtx[0]
result = skinutils.get_skincluster(test_vert)
self.assertEqual(test_skincluster, result)
class TestBindMeshToJoints(mayatest.MayaTestCase):
def setUp(self):
super(TestBindMeshToJoints, self).setUp()
self.test_cube = self.create_cube()
self.test_joints = [self.create_joint() for _ in range(5)]
def test_returns_skincluster(self):
skincl = skinutils.bind_mesh_to_joints(self.test_cube, self.test_joints)
self.assertIsNotNone(skincl)
def test_raises_with_no_mesh_to_skin(self):
self.assertRaises(RuntimeError, lambda: skinutils.bind_mesh_to_joints(None, self.test_joints))
def test_raises_with_no_joint(self):
self.assertRaises(RuntimeError, lambda: skinutils.bind_mesh_to_joints(self.test_cube, None))
def test_maintains_max_influences_default_four(self):
skincl = skinutils.bind_mesh_to_joints(self.test_cube, self.test_joints)
inf_values = pm.skinPercent(skincl, self.test_cube.vtx[0], q=True, value=True)
inf_count = len([i for i in inf_values if i != 0.0])
self.assertEqual(4, inf_count)
def test_maintains_max_influences_five(self):
skincl = skinutils.bind_mesh_to_joints(self.test_cube, self.test_joints, maximumInfluences=5)
inf_values = pm.skinPercent(skincl, self.test_cube.vtx[0], q=True, value=True)
inf_count = len([i for i in inf_values if i != 0.0])
self.assertEqual(5, inf_count)
def test_extra_joints_in_skeleton(self):
skincl = skinutils.bind_mesh_to_joints(self.test_cube, self.test_joints[2:4])
result = skincl.influenceObjects()
self.assertListEqual(self.test_joints[2:4], result)
def test_voxel_method(self):
# the geodesic voxel bind method requires a GPU so the command cannot be run in Maya standalone.
# skincl = skinutils.bind_mesh_geodesic_voxel(self.test_cube, self.test_joints, maximumInfluences=1)
# self.assertIsNotNone(skincl)
pass
class TestGetVertsWithExceedingInfluences(mayatest.MayaTestCase):
def test_get_verts_with_more_than_four_infs(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=5)
flagged_vert_indexes = skinutils.get_vert_indexes_with_exceeding_influences(
test_cube, skin_cluster=skincl, max_influences=4)
flagged_verts = [test_cube.vtx[i] for i in flagged_vert_indexes.keys()]
flagged_verts.sort()
expected = list(test_cube.vtx)
expected.sort()
self.assertListEqual(expected, flagged_verts)
def test_no_bad_verts(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
flagged_vert_indexes = skinutils.get_vert_indexes_with_exceeding_influences(
test_cube, skin_cluster=skincl, max_influences=4)
flagged_verts = [test_cube.vtx[i] for i in flagged_vert_indexes.keys()]
self.assertListEqual([], flagged_verts)
class TestGetNonZeroInfluencesFromVert(mayatest.MayaTestCase):
def test_get_non_zero_influences_from_vert(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=5)
non_zero_infs = skinutils.get_weighted_influences(test_cube.vtx[0], skincl)
self.assertEqual(5, len(non_zero_infs))
class TestGetSkinnedMeshesFromScene(mayatest.MayaTestCase):
def test_get_skinned_meshes_from_scene(self):
test_skinned_cubes = [self.create_cube() for x in range(3)]
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinclusters = []
for each in test_skinned_cubes:
skincl = skinutils.bind_mesh_to_joints(each, test_joints, maximumInfluences=5)
skinclusters.append(skincl)
skinned_meshes_from_scene = skinutils.get_skinned_meshes_from_scene()
skinned_meshes_from_scene.sort()
test_skinned_cubes.sort()
self.assertListEqual(test_skinned_cubes, skinned_meshes_from_scene)
def test_skinned_curve_in_scene(self):
"""
Should only return skinned meshes in the scene. Not skinned curves.
"""
test_skinned_cubes = [self.create_cube() for x in range(3)]
test_curve = self.pm.curve(p=[(0, 0, 0), (3, 5, 6), (5, 6, 7), (9, 9, 9)])
test_joints = [self.create_joint() for _ in range(5)]
curve_skincl = skinutils.bind_mesh_to_joints(test_curve, test_joints)
skinclusters = []
for each in test_skinned_cubes:
skincl = skinutils.bind_mesh_to_joints(each, test_joints, maximumInfluences=5)
skinclusters.append(skincl)
skinned_meshes_from_scene = skinutils.get_skinned_meshes_from_scene()
skinned_meshes_from_scene.sort()
test_skinned_cubes.sort()
self.assertListEqual(test_skinned_cubes, skinned_meshes_from_scene)
def test_multiple_mats_assigned_to_skinned_mesh(self):
test_skinned_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_skinned_cube, test_joints, maximumInfluences=5)
mat1, _ = matutils.create_material('foo')
mat2, _ = matutils.create_material('bar')
matutils.assign_material(test_skinned_cube, mat1)
matutils.assign_material(test_skinned_cube.f[0], mat2)
skinned_meshes_from_scene = skinutils.get_skinned_meshes_from_scene()
self.assertListEqual([test_skinned_cube], skinned_meshes_from_scene)
class TestGetPrunedInfluencesToWeights(mayatest.MayaTestCase):
def test_no_op_with_four_infs(self):
influences_to_weights = {'foo': 0.5, 'bar': 0.1, 'spam': 0.1, 'eggs': 0.3}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
self.assertDictEqual(influences_to_weights, result)
def test_max_3_influences(self):
influences_to_weights = {'foo': 0.5, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.1}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights, max_influences=3)
expected = {'foo': 0.5, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.0}
self.assertDictEqual(expected, result)
def test_five_influences(self):
influences_to_weights = {'foo': 0.5, 'bar': 0.2, 'spam': 0.1, 'eggs': 0.1, 'ham': 0.05}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
expected = {'foo': 0.5, 'bar': 0.2, 'spam': 0.1, 'eggs': 0.1, 'ham': 0.0}
self.assertDictEqual(expected, result)
def test_five_influences_with_equal_min_values(self):
influences_to_weights = {'foo': 0.5, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.05, 'ham': 0.05}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
expected = {'foo': 0.5, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.0, 'ham': 0.0}
self.assertDictEqual(expected, result)
def test_divisor_is_2(self):
influences_to_weights = {'foo': 1.0, 'bar': 0.4, 'spam': 0.2, 'eggs': 0.2}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights, divisor=2.0)
expected = {'foo': 0.5, 'bar': 0.2, 'spam': 0.1, 'eggs': 0.1}
self.assertDictEqual(expected, result)
def test_too_many_infs_all_equal(self):
influences_to_weights = {'foo': 0.2, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.2, 'ham': 0.2}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
expected = {'foo': 0.2, 'bar': 0.2, 'spam': 0.0, 'eggs': 0.2, 'ham': 0.2}
self.assertDictEqual(expected, result)
def test_far_too_many_infs_all_equal(self):
influences_to_weights = {'foo': 0.2, 'bar': 0.2, 'spam': 0.2, 'eggs': 0.2, 'ham': 0.2,
'foo2': 0.2, 'bar2': 0.2, 'spam2': 0.2, 'eggs2': 0.2, 'ham2': 0.2}
result = skinutils.get_pruned_influences_to_weights(influences_to_weights)
expected = {'foo': 0.0, 'bar': 0.2, 'spam': 0.0, 'eggs': 0.2, 'ham': 0.0,
'foo2': 0.0, 'bar2': 0.2, 'spam2': 0.0, 'eggs2': 0.2, 'ham2': 0.0}
self.assertDictEqual(expected, result)
class TestPruneExceedingInfluences(mayatest.MayaTestCase):
def test_prune_exceeding_influences(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=5)
influences_to_weights = skinutils.get_weighted_influences(test_cube.vtx[0], skincl)
skinutils.prune_exceeding_influences(test_cube.vtx[0], skincl, influences_to_weights)
result = skinutils.get_weighted_influences(test_cube.vtx[0], skincl)
self.assertEqual(4, len(result))
class TestGetNonNormalizedVerts(mayatest.MayaTestCase):
def test_zero_bad_verts(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
skincl.setNormalizeWeights(2) # 2 == post normalize method
result = skinutils.get_non_normalized_vert_indexes(test_cube.vtx, skincl)
self.assertEqual(0, len(result))
def test_one_bad_vert(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
skincl.setNormalizeWeights(2) # 2 == post normalize method
pm.skinPercent(skincl, test_cube.vtx[0], transformValue=(test_joints[0], 1.5))
result = skinutils.get_non_normalized_vert_indexes(test_cube.vtx, skincl)
self.assertEqual(1, len(result))
def test_returns_total(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
skincl.setNormalizeWeights(2) # 2 == post normalize method
pm.skinPercent(skincl, test_cube.vtx[0], transformValue=(test_joints[0], 1.5))
pm.skinPercent(skincl, test_cube.vtx[1], transformValue=(test_joints[0], 1.5))
expected = {0: 2.25, 1: 2.25}
result = skinutils.get_non_normalized_vert_indexes(test_cube.vtx, skincl)
self.assertDictEqual(expected, result)
class TestMoveWeights(mayatest.MayaTestCase):
def setUp(self):
super(TestMoveWeights, self).setUp()
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
self.skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
self.vert = test_cube.vtx[0]
self.origin_inf = test_joints[0]
self.destination_inf = test_joints[1]
self.initial_origin_weight = self.pm.skinPercent(self.skincl, self.vert, q=True, transform=self.origin_inf)
self.initial_destination_weight = self.pm.skinPercent(
self.skincl, self.vert, q=True, transform=self.destination_inf)
def test_move_weight_single_vert_expected_dest_weight(self):
# test_cube = self.create_cube()
# test_joints = [self.create_joint() for _ in range(5)]
# skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=4)
# vert = test_cube.vtx[0]
# origin_inf = test_joints[0]
# destination_inf = test_joints[1]
# initial_origin_weight = self.pm.skinPercent(skincl, vert, q=True, transform=origin_inf)
# initial_destination_weight = self.pm.skinPercent(skincl, vert, q=True, transform=destination_inf)
skinutils.move_weights(self.skincl, self.vert, self.origin_inf, self.destination_inf)
expected_dest_weight = self.initial_origin_weight + self.initial_destination_weight
result_dest_weight = self.pm.skinPercent(self.skincl, self.vert, q=True, transform=self.destination_inf)
self.assertEqual(expected_dest_weight, result_dest_weight)
def test_single_vert_expected_origin_weight(self):
skinutils.move_weights(self.skincl, self.vert, self.origin_inf, self.destination_inf)
expected_origin_weight = 0.0
result_origin_weight = self.pm.skinPercent(self.skincl, self.vert, q=True, transform=self.origin_inf)
self.assertEqual(expected_origin_weight, result_origin_weight)
class TestMaxInfluencesNormalizeWeightsDisabled(mayatest.MayaTestCase):
def test_max_influences_normalize_weights_disabled(self):
pass
class TestPruneExceedingSkinnedMesh(mayatest.MayaTestCase):
def test_prune_exceeding_skinned_mesh(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skincl = skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=5)
initial_influences = []
for vert in test_cube.vtx:
initial_inf = skinutils.get_weighted_influences(vert, skincl)
initial_influences.append(len(initial_inf))
expected_initial = [5, 5, 5, 5, 5, 5, 5, 5]
self.assertListEqual(expected_initial, initial_influences)
skinutils.prune_exceeding_skinned_mesh(test_cube, skincluster=skincl)
results = []
for vert in test_cube.vtx:
result = skinutils.get_weighted_influences(vert, skincl)
results.append(len(result))
expected = [4, 4, 4, 4, 4, 4, 4, 4]
self.assertListEqual(expected, results)
class TestDeltaMeshSkinning(mayatest.MayaTestCase):
def test_modifies_skinning(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
[pm.move(j, (1,0,0)) for j in test_joints]
skinutils.bind_mesh_to_joints(test_cube, test_joints, maximumInfluences=1)
start_infs = skinutils.get_weighted_influences(test_cube.vtx[0])
self.assertEqual(1, len(start_infs))
skinutils.apply_delta_mush_skinning(test_cube, cleanup=True)
after_infs = skinutils.get_weighted_influences(test_cube.vtx[0])
self.assertEqual(4, len(after_infs))
def test_clean_up_mush_nodes(self):
pass
def test_clean_up_extra_meshes(self):
pass
class TestApplyDeltaMush(mayatest.MayaTestCase):
def test_creates_mush_node(self):
test_cube = self.create_cube()
result = skinutils.apply_delta_mush(test_cube)
mush_nodes = pm.ls(type=pm.nt.DeltaMush)
self.assertEqual(mush_nodes, [result])
def test_default_settings(self):
test_cube = self.create_cube()
mush_node = skinutils.apply_delta_mush(test_cube)
self.scene_nodes.append(mush_node)
expected = {'smoothingIterations': 20,
'smoothingStep': 1.0,
'pinBorderVertices': False,
'envelope': 1.0,
'inwardConstraint': 0.0,
'outwardConstraint': 0.0,
'distanceWeight': 1.0,
'displacement': 1.0}
result = {'smoothingIterations': mush_node.smoothingIterations.get(),
'smoothingStep': mush_node.smoothingStep.get(),
'pinBorderVertices': mush_node.pinBorderVertices.get(),
'envelope': mush_node.envelope.get(),
'inwardConstraint': mush_node.inwardConstraint.get(),
'outwardConstraint': mush_node.outwardConstraint.get(),
'distanceWeight': mush_node.distanceWeight.get(),
'displacement': mush_node.displacement.get()}
self.assertDictEqual(expected, result)
def test_not_default_settings(self):
test_cube = self.create_cube()
kwargs = {'smoothingIterations': 10,
'smoothingStep': 0.5,
'pinBorderVertices': True,
'envelope': 0.5,
'inwardConstraint': 0.5,
'outwardConstraint': 1.0}
mush_node = skinutils.apply_delta_mush(test_cube, 0.0, 0.0, **kwargs)
self.scene_nodes.append(mush_node)
expected = {'distanceWeight': 0.0,
'displacement': 0.0}
expected.update(kwargs)
result = {'smoothingIterations': mush_node.smoothingIterations.get(),
'smoothingStep': mush_node.smoothingStep.get(),
'pinBorderVertices': mush_node.pinBorderVertices.get(),
'envelope': mush_node.envelope.get(),
'inwardConstraint': mush_node.inwardConstraint.get(),
'outwardConstraint': mush_node.outwardConstraint.get(),
'distanceWeight': mush_node.distanceWeight.get(),
'displacement': mush_node.displacement.get()}
self.assertDictEqual(expected, result)
class TestBakeDeformer(mayatest.MayaTestCase):
def test_one_skeleton(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(source_cube, test_joints)
target_skincl = skinutils.bind_mesh_to_joints(target_cube, test_joints)
self.scene_nodes.append(skinutils.apply_delta_mush(source_cube))
pm.skinPercent(target_skincl, target_cube.vtx, transformValue=(test_joints[-1], 1.0))
previous_val = pm.skinPercent(target_skincl, target_cube.vtx[0], query=True, transform=test_joints[-1])
# pm.skinPercent(skincluster, vertex, transformValue=pruned_infs_to_weights.items())
target_skincl = skinutils.bake_deformer_to_skin(source_cube, target_cube)
result = pm.skinPercent(target_skincl, target_cube.vtx[0], query=True, transform=test_joints[-1])
self.assertNotEqual(previous_val, result)
def test_two_skeletons(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
source_joints = [self.create_joint() for _ in range(5)]
pm.select(clear=True)
target_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(source_cube, source_joints)
target_skincl = skinutils.bind_mesh_to_joints(target_cube, target_joints)
self.scene_nodes.append(skinutils.apply_delta_mush(source_cube))
pm.skinPercent(target_skincl, target_cube.vtx, transformValue=(target_joints[-1], 1.0))
previous_val = pm.skinPercent(target_skincl, target_cube.vtx[0], query=True, transform=target_joints[-1])
# pm.skinPercent(skincluster, vertex, transformValue=pruned_infs_to_weights.items())
target_skincl = skinutils.bake_deformer_to_skin(source_cube, target_cube, source_joints, target_joints)
result = pm.skinPercent(target_skincl, target_cube.vtx[0], query=True, transform=target_joints[-1])
self.assertNotEqual(previous_val, result)
def test_respects_max_influences(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(source_cube, test_joints)
skinutils.bind_mesh_to_joints(target_cube, test_joints)
self.scene_nodes.append(skinutils.apply_delta_mush(source_cube))
expected = 3
target_skincl = skinutils.bake_deformer_to_skin(source_cube, target_cube, max_influences=expected)
result = target_skincl.getMaximumInfluences()
self.assertEqual(expected, result)
def test_normalizes_weights(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(source_cube, test_joints)
target_skincl = skinutils.bind_mesh_to_joints(target_cube, test_joints)
target_skincl.setNormalizeWeights(False)
pm.skinPercent(target_skincl, target_cube.vtx, transformValue=(test_joints[-1], 2.0))
weights = [sum(pm.skinPercent(target_skincl, v, value=True, q=True)) for v in target_cube.vtx]
[self.assertLess(1.0, w) for w in weights]
self.scene_nodes.append(skinutils.apply_delta_mush(source_cube))
target_skincl = skinutils.bake_deformer_to_skin(source_cube, target_cube, cleanup=True)
# target_skincl.forceNormalizeWeights()
weights = [sum(pm.skinPercent(target_skincl, v, value=True, q=True)) for v in target_cube.vtx]
[self.assertGreaterEqual(1.0, w) for w in weights]
class CopyWeights(mayatest.MayaTestCase):
def test_simple(self):
source_cube = self.create_cube()
target_cube = self.create_cube()
source_joints = [self.create_joint() for _ in range(5)]
[pm.move(j, (0.1, 0.1, 0.1)) for j in source_joints]
source_skincl = skinutils.bind_mesh_to_joints(source_cube, source_joints)
expected = [pm.skinPercent(source_skincl, v, value=True, q=True) for v in source_cube.vtx]
pm.select(clear=True)
target_joints = [self.create_joint() for _ in range(5)]
[pm.move(j, (0.1, 0.1, 0.1)) for j in target_joints]
target_skincl = skinutils.bind_mesh_to_joints(target_cube, target_joints)
pm.skinPercent(target_skincl, target_cube.vtx, transformValue=(target_joints[-1], 1.0))
skinutils.copy_weights(source_cube, target_cube)
result = [pm.skinPercent(source_skincl, v, value=True, q=True) for v in source_cube.vtx]
for e, r in zip(expected, result):
[self.assertAlmostEqual(expected_weight, result_weight) for expected_weight, result_weight in zip(e, r)]
class TestGetRootFromSkinnedMesh(mayatest.MayaTestCase):
def test_get_root_joint_from_skinned_mesh(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(5)]
skinutils.bind_mesh_to_joints(test_cube, test_joints)
result = skinutils.get_root_joint_from_skinned_mesh(test_cube)
self.assertEqual(test_joints[0], result)
class TestGetVertsToWeightedInfluences(mayatest.MayaTestCase):
def test_get_verts_to_weighted_influences(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = {}
inf_index = 0
for vert in test_cube.vtx:
expected[vert.index()] = {test_joints[inf_index]: 1.0}
pm.skinPercent(skin_cluster, vert, transformValue=expected[vert.index()].items())
inf_index += 1
if inf_index > 4:
inf_index = 0
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster)
self.assertDictEqual(expected, result)
def test_multiple_influences_per_vert(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = {}
inf_index = 0
weight_values = [0.3, 0.2, 0.4, 0.1]
for vert in test_cube.vtx:
inf_wts = {}
for weight in weight_values:
inf_wts[test_joints[inf_index]] = weight
inf_index += 1
if inf_index > 4:
inf_index = 0
pm.skinPercent(skin_cluster, vert, transformValue=inf_wts.items())
expected[vert.index()] = inf_wts
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster)
self.assertDictEqual(expected, result)
def test_subset_of_meshes_verts(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = {}
inf_index = 0
weight_values = [0.3, 0.2, 0.4, 0.1]
for vert in test_cube.vtx:
inf_wts = {}
for weight in weight_values:
inf_wts[test_joints[inf_index]] = weight
inf_index += 1
if inf_index > 4:
inf_index = 0
pm.skinPercent(skin_cluster, vert, transformValue=inf_wts.items())
expected[vert.index()] = inf_wts
for i in [0, 1, 7]:
expected.pop(i)
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster, test_cube.vtx[2:6])
self.assertDictEqual(expected, result)
def test_skin_cluster_has_removed_influences(self):
"""An influence index can be greater than the length all influences in the skin_cluster"""
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(15)]
skin_cluster = self.pm.skinCluster(test_joints, test_cube)
for index in [13, 10, 9]:
skin_cluster.removeInfluence(test_joints[index])
self.scene_nodes.append(skin_cluster)
expected = {}
for vert in test_cube.vtx:
expected[vert.index()] = {test_joints[-1]: 1.0}
pm.skinPercent(skin_cluster, vert, transformValue=expected[vert.index()].items())
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster)
self.assertDictEqual(expected, result)
def test_removed_influence_had_non_zero_weights_before(self):
test_cube = self.create_cube()
test_joints = [self.create_joint() for _ in range(15)]
skin_cluster = self.pm.skinCluster(test_joints, test_cube)
test_indices = [13, 10, 9]
for vert in test_cube.vtx:
for index in test_indices:
pm.skinPercent(skin_cluster, vert, transformValue=(test_joints[index], 0.5))
for index in test_indices[1:]:
skin_cluster.removeInfluence(test_joints[index])
expected = {}
for vert in test_cube.vtx:
expected[vert.index()] = {test_joints[0]: 1.0}
pm.skinPercent(skin_cluster, vert, transformValue=(expected[vert.index()].items()))
self.scene_nodes.append(skin_cluster)
result = skinutils.get_vert_indexes_to_weighted_influences(skin_cluster)
self.assertDictEqual(expected, result)
class TestGetInfluenceIndex(mayatest.MayaTestCase):
def test_influence_passed_as_pynode(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = 3
result = skinutils.get_influence_index(test_joints[expected], skin_cluster)
self.assertEqual(expected, result)
def test_influence_passed_as_string(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
expected = 3
result = skinutils.get_influence_index(test_joints[expected].name(), skin_cluster)
self.assertEqual(expected, result)
def test_more_than_one_joint_with_same_name_pynode(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
dummy_joints = [self.create_joint() for _ in range(5)]
expected = 3
test_joints[expected].rename('foo')
dummy_joints[expected].rename('foo')
result = skinutils.get_influence_index(test_joints[expected], skin_cluster)
self.assertEqual(expected, result)
def test_more_than_one_joint_with_same_name_string(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
dummy_joints = [self.create_joint() for _ in range(5)]
expected = 3
test_joints[expected].rename('foo')
dummy_joints[expected].rename('foo')
result = skinutils.get_influence_index(test_joints[expected].nodeName(), skin_cluster)
self.assertEqual(expected, result)
class TestMoveWeightAndRemoveInfluence(mayatest.MayaTestCase):
def test_removes_influence(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
skinutils.move_weight_and_remove_influence(test_joints[-1], test_joints[0], skin_cluster)
self.assertFalse(test_joints[-1] in skin_cluster.getInfluence())
def test_moves_weights_to_parent(self):
test_cube, test_joints, skin_cluster = self.create_skinned_cube()
values = [0, 0.25, 0.25, 0.25, 0.25]
infs_to_wts = dict(zip(test_joints, values))
with skinutils.max_influences_normalize_weights_disabled(skin_cluster):
for vertex in test_cube.vtx:
pm.skinPercent(skin_cluster, vertex, transformValue=infs_to_wts.items())
skinutils.move_weight_and_remove_influence(test_joints[-1], test_joints[-2], skin_cluster)
result = skinutils.get_weighted_influences(test_cube.vtx[0], skin_cluster)
expected_values = [0.25, 0.25, 0.5]
expected = dict(zip(test_joints[1:-1], expected_values))
self.assertDictEqual(expected, result)
class TestCopyWeightsVertOrder(mayatest.MayaTestCase):
def test_simple(self):
source_test_cube, source_test_joints, source_skin_cluster = self.create_skinned_cube()
target_test_cube, target_test_joints, target_skin_cluster = self.create_skinned_cube()
inf_map = dict([(sj, [tj]) for sj, tj in zip(source_test_joints, target_test_joints)])
for vertex in source_test_cube.vtx:
pm.skinPercent(source_skin_cluster, vertex, transformValue=(source_test_joints[0], 1.0))
skinutils.copy_weights_vert_order(source_test_cube, target_test_cube, inf_map)
result = skinutils.get_weighted_influences(target_test_cube.vtx[0])
expected = {target_test_joints[0]: 1.0}
self.assertDictEqual(expected, result)
class TestGetInfluenceMapByInfluenceIndex(mayatest.MayaTestCase):
def test_update_inf_map_by_skincluster_index(self):
source_cube, source_joints, source_skin_cluster = self.create_skinned_cube()
target_cube, target_joints, target_skin_cluster = self.create_skinned_cube()
expected_map = dict([(x, [y]) for x, y in zip(source_joints, target_joints)])
result_map, result_remaining = skinutils.update_inf_map_by_skincluster_index(source_joints,
target_joints,
source_skin_cluster,
target_skin_cluster)
self.assertDictEqual(result_map, expected_map)
self.assertListEqual([], result_remaining)
def test_skincluster_index_influence_lists_order_differ(self):
source_cube, source_joints, source_skin_cluster = self.create_skinned_cube()
target_cube, target_joints, target_skin_cluster = self.create_skinned_cube()
expected_map = dict([(x, [y]) for x, y in zip(source_joints, target_joints)])
target_joints.reverse()
result_map, result_remaining = skinutils.update_inf_map_by_skincluster_index(source_joints,
target_joints,
source_skin_cluster,
target_skin_cluster)
self.assertDictEqual(result_map, expected_map)
self.assertListEqual([], result_remaining)
def test_more_source_influences(self):
source_cube, source_joints, source_skin_cluster = self.create_skinned_cube(joint_count=10)
target_cube, target_joints, target_skin_cluster = self.create_skinned_cube()
expected_map = dict([(x, [y]) for x, y in zip(source_joints, target_joints)])
result_map, result_remaining = skinutils.update_inf_map_by_skincluster_index(source_joints,
target_joints,
source_skin_cluster,
target_skin_cluster)
self.assertDictEqual(result_map, expected_map)
self.assertListEqual([], result_remaining)
def test_more_target_influences(self):
source_cube, source_joints, source_skin_cluster = self.create_skinned_cube()
target_cube, target_joints, target_skin_cluster = self.create_skinned_cube(joint_count=10)
expected_map = dict([(x, [y]) for x, y in zip(source_joints, target_joints)])
expected_remaining = target_joints[5:]
result_map, result_remaining = skinutils.update_inf_map_by_skincluster_index(source_joints,
target_joints,
source_skin_cluster,
target_skin_cluster)
self.assertDictEqual(result_map, expected_map)
self.assertListEqual(expected_remaining, result_remaining)
class TestCopyWeights(mayatest.MayaTestCase):
def test_copy_weights_vert_order_same_skeleton(self):
source_cube, source_joints, source_skincluster = self.create_skinned_cube()
target_cube = self.create_cube()
target_skincluster = skinutils.bind_mesh_to_joints(target_cube, source_joints)
transform_values = dict(itertools.zip_longest(source_joints[:4], [0.25], fillvalue=0.25))
transform_values[source_joints[-1]] = 0.0
pm.skinPercent(source_skincluster, source_cube.vtx[0], transformValue=transform_values.items())
source_weightedinfs = skinutils.get_weighted_influences(target_cube.vtx[0], target_skincluster)
transform_values = dict(itertools.zip_longest(source_joints[1:], [0.25], fillvalue=0.25))
transform_values[source_joints[0]] = 0.0
pm.skinPercent(target_skincluster, target_cube.vtx[0], transformValue=transform_values.items())
target_weightedinfs = skinutils.get_weighted_influences(target_cube.vtx[0], target_skincluster)
self.assertNotEqual(source_weightedinfs, target_weightedinfs)
skinutils.copy_weights_vert_order_inf_order(source_cube, target_cube, source_skincluster, target_skincluster)
expected = skinutils.get_weighted_influences(source_cube.vtx[0], source_skincluster)
result = skinutils.get_weighted_influences(target_cube.vtx[0], target_skincluster)
self.assertDictEqual(expected, result)
class TestGetBindPose(mayatest.MayaTestCase):
def test_get_bind_pose_from_skinned_mesh(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
expected = pm.ls(type='dagPose')[0]
result = skinutils.get_bind_pose_from_skinned_mesh(test_cube)
self.assertEqual(expected, result)
def test_multiple_bind_poses_on_skel(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
expected = pm.ls(type='dagPose')[0]
dummy_cube = self.create_cube()
test_joints[2].rotateX.set(30)
skinutils.bind_mesh_to_joints(dummy_cube, test_joints)
pm.dagPose(test_joints[0], bindPose=True, save=True)
bind_poses = pm.ls(type='dagPose')
self.assertEqual(3, len(bind_poses))
result = skinutils.get_bind_pose_from_skincluster(test_skincluster)
self.assertEqual(expected, result)
class TestDuplicateSkinnedMesh(mayatest.MayaTestCase):
def test_default_params(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
dup_cube, dup_cluster = skinutils.duplicate_skinned_mesh(test_cube)
self.scene_nodes.extend([dup_cube, dup_cluster])
self.assertListEqual(test_joints, dup_cluster.influenceObjects())
self.assertNotEqual(test_cube, dup_cube)
test_weights = skinutils.get_vert_indexes_to_weighted_influences(test_skincluster)
dup_weights = skinutils.get_vert_indexes_to_weighted_influences(dup_cluster)
self.assertDictEqual(test_weights, dup_weights)
def test_dup_skinnedmesh_and_skel(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
dup_cube, dup_root, dup_cluster = skinutils.duplicate_skinned_mesh_and_skeleton(test_cube)
self.scene_nodes.extend([dup_cube, dup_root, dup_cluster])
self.assertEqual(len(test_joints), len(dup_cluster.influenceObjects()))
self.assertNotEqual(test_joints, dup_cluster.influenceObjects())
self.assertNotEqual(test_cube, dup_cube)
def test_dup_namespace(self):
test_cube, test_joints, test_skincluster = self.create_skinned_cube()
pm.namespace(set=':')
self.create_namespace('foo')
dup_cube, dup_root, dup_cluster = skinutils.duplicate_skinned_mesh_and_skeleton(test_cube, dup_namespace='foo')
self.scene_nodes.extend([dup_cube, dup_root, dup_cluster])
expected_joint_names = [x.nodeName(stripNamespace=True) for x in skelutils.get_hierarchy_from_root(test_joints[0])]
result_joint_names = [x.nodeName(stripNamespace=True) for x in skelutils.get_hierarchy_from_root(dup_root)]
self.assertListEqual(expected_joint_names, result_joint_names)
self.assertNotEqual(test_joints, dup_cluster.influenceObjects())
self.assertNotEqual(test_cube, dup_cube)
self.assertEqual('foo', dup_root.parentNamespace())
|
nilq/baby-python
|
python
|
import array
import unittest
import pickle
import struct
import sys
from pyhmmer.easel import Vector, VectorF, VectorU8
class _TestVectorBase(object):
Vector = NotImplemented
def test_pickle(self):
v1 = self.Vector(range(6))
v2 = pickle.loads(pickle.dumps(v1))
self.assertSequenceEqual(v1, v2)
def test_pickle_protocol4(self):
v1 = self.Vector(range(6))
v2 = pickle.loads(pickle.dumps(v1, protocol=4))
self.assertEqual(v1.shape, v2.shape)
self.assertSequenceEqual(v1, v2)
self.assertSequenceEqual(memoryview(v1), memoryview(v2))
@unittest.skipUnless(sys.version_info >= (3, 8), "pickle protocol 5 requires Python 3.8+")
def test_pickle_protocol5(self):
v1 = self.Vector(range(6))
v2 = pickle.loads(pickle.dumps(v1, protocol=5))
self.assertEqual(v1.shape, v2.shape)
self.assertSequenceEqual(v1, v2)
self.assertSequenceEqual(memoryview(v1), memoryview(v2))
def test_empty_vector(self):
v1 = self.Vector([])
v2 = self.Vector.zeros(0)
v3 = self.Vector()
self.assertEqual(len(v1), 0)
self.assertEqual(len(v2), 0)
self.assertEqual(len(v3), 0)
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertFalse(v1)
self.assertFalse(v2)
self.assertFalse(v3)
if sys.implementation.name != "pypy":
v3 = self.Vector.zeros(3)
self.assertLess(sys.getsizeof(v1), sys.getsizeof(v3))
def test_init(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec[0], 1)
self.assertEqual(vec[1], 2)
self.assertEqual(vec[2], 3)
def test_init_memcpy(self):
v1 = self.Vector([1, 2, 3])
a = array.array(v1.format, v1)
v2 = self.Vector(a)
self.assertEqual(v1, v2)
def test_init_error(self):
self.assertRaises(TypeError, self.Vector, 1)
self.assertRaises(TypeError, self.Vector.zeros, [1, 2, 3])
self.assertRaises(TypeError, self.Vector.zeros, "1")
def test_shape(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec.shape, (3,))
vec2 = self.Vector.zeros(100)
self.assertEqual(vec2.shape, (100,))
vec3 = self.Vector.zeros(0)
self.assertEqual(vec3.shape, (0,))
def test_len(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(len(vec), 3)
vec2 = self.Vector.zeros(100)
self.assertEqual(len(vec2), 100)
vec3 = self.Vector([])
self.assertEqual(len(vec3), 0)
def test_copy(self):
vec = self.Vector([1, 2, 3])
vec2 = vec.copy()
del vec
self.assertIsInstance(vec2, self.Vector)
self.assertEqual(vec2[0], 1)
self.assertEqual(vec2[1], 2)
self.assertEqual(vec2[2], 3)
vec3 = self.Vector([])
vec4 = vec3.copy()
self.assertEqual(vec3, vec4)
self.assertEqual(len(vec4), 0)
def test_reverse(self):
vec = self.Vector([1, 2, 3])
vec.reverse()
self.assertEqual(vec[0], 3)
self.assertEqual(vec[1], 2)
self.assertEqual(vec[2], 1)
vec2 = self.Vector([1, 2, 3, 4])
vec2.reverse()
self.assertEqual(vec2[0], 4)
self.assertEqual(vec2[1], 3)
self.assertEqual(vec2[2], 2)
self.assertEqual(vec2[3], 1)
vec3 = self.Vector([])
vec3.reverse()
self.assertEqual(vec3, self.Vector([]))
self.assertEqual(len(vec3), 0)
def test_add(self):
vec = self.Vector([1, 2, 3])
vec2 = vec + 1
self.assertEqual(vec2[0], 2)
self.assertEqual(vec2[1], 3)
self.assertEqual(vec2[2], 4)
with self.assertRaises(ValueError):
vec + self.Vector([1])
v2 = self.Vector([])
v3 = v2 + self.Vector([])
self.assertEqual(v3, self.Vector([]))
def test_iadd_scalar(self):
vec = self.Vector([1, 2, 3])
vec += 3
self.assertEqual(vec[0], 4)
self.assertEqual(vec[1], 5)
self.assertEqual(vec[2], 6)
v2 = self.Vector([])
v2 += 3
self.assertEqual(v2, self.Vector([]))
def test_iadd_vector(self):
vec = self.Vector([4, 5, 6])
vec += self.Vector([10, 11, 12])
self.assertEqual(vec[0], 14)
self.assertEqual(vec[1], 16)
self.assertEqual(vec[2], 18)
with self.assertRaises(ValueError):
vec += self.Vector([1])
v2 = self.Vector([])
v2 += self.Vector([])
self.assertEqual(v2, self.Vector([]))
def test_sub(self):
vec = self.Vector([1, 2, 3])
v2 = vec - 1
self.assertEqual(v2[0], 0)
self.assertEqual(v2[1], 1)
self.assertEqual(v2[2], 2)
v3 = self.Vector([8, 10, 12])
v4 = self.Vector([1, 2, 3])
v5 = v3 - v4
self.assertEqual(v5[0], 7)
self.assertEqual(v5[1], 8)
self.assertEqual(v5[2], 9)
def test_isub_scalar(self):
vec = self.Vector([4, 5, 6])
vec -= 2
self.assertEqual(vec[0], 2)
self.assertEqual(vec[1], 3)
self.assertEqual(vec[2], 4)
def test_isub_vector(self):
vec = self.Vector([4, 5, 6])
vec -= self.Vector([2, 3, 2])
self.assertEqual(vec[0], 2)
self.assertEqual(vec[1], 2)
self.assertEqual(vec[2], 4)
def test_mul_scalar(self):
vec = self.Vector([1, 2, 3])
v2 = vec * 3
self.assertEqual(v2[0], 3)
self.assertEqual(v2[1], 6)
self.assertEqual(v2[2], 9)
v2 = self.Vector([])
v3 = v2 * 3
self.assertEqual(v3, self.Vector([]))
def test_mul_vector(self):
vec = self.Vector([1, 2, 3])
v2 = self.Vector([3, 6, 9])
v3 = vec * v2
self.assertEqual(v3[0], 3)
self.assertEqual(v3[1], 12)
self.assertEqual(v3[2], 27)
v2 = self.Vector([])
v3 = v2 * self.Vector([])
self.assertEqual(v3, self.Vector([]))
def test_imul_scalar(self):
vec = self.Vector([1, 2, 3])
vec *= 3
self.assertEqual(vec[0], 3)
self.assertEqual(vec[1], 6)
self.assertEqual(vec[2], 9)
v2 = self.Vector([])
v2 *= 3
self.assertEqual(v2, self.Vector([]))
def test_matmul_vector(self):
u = self.Vector([4, 5, 6])
v = self.Vector([1, 2, 3])
self.assertEqual(u @ v, 1*4 + 2*5 + 3*6)
x = self.Vector([])
y = self.Vector([])
self.assertEqual(x @ y, 0)
def test_sum(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec.sum(), 1 + 2 + 3)
vec2 = self.Vector([])
self.assertEqual(vec2.sum(), 0)
def test_slice(self):
vec = self.Vector([1, 2, 3, 4])
v1 = vec[:]
self.assertEqual(len(v1), 4)
self.assertEqual(v1[0], 1)
self.assertEqual(v1[-1], 4)
v2 = vec[1:3]
self.assertEqual(len(v2), 2)
self.assertEqual(v2[0], 2)
self.assertEqual(v2[1], 3)
v3 = vec[:-1]
self.assertEqual(len(v3), 3)
self.assertEqual(v3[-1], 3)
v4 = vec[0:10]
self.assertEqual(len(v4), 4)
self.assertEqual(v4[-1], 4)
with self.assertRaises(ValueError):
vec[::-1]
def test_min(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec.min(), 1)
v2 = self.Vector([])
self.assertRaises(ValueError, v2.min)
def test_max(self):
vec = self.Vector([1, 2, 3])
self.assertEqual(vec.max(), 3)
v2 = self.Vector([])
self.assertRaises(ValueError, v2.max)
def test_argmin(self):
vec = self.Vector([4, 2, 8])
self.assertEqual(vec.argmin(), 1)
v2 = self.Vector([])
self.assertRaises(ValueError, v2.argmin)
def test_argmax(self):
vec = self.Vector([2, 8, 4])
self.assertEqual(vec.argmax(), 1)
v2 = self.Vector([])
self.assertRaises(ValueError, v2.argmax)
class TestVector(unittest.TestCase):
def test_abstract(self):
self.assertRaises(TypeError, Vector, [1, 2, 3])
self.assertRaises(TypeError, Vector.zeros, 1)
class TestVectorF(_TestVectorBase, unittest.TestCase):
Vector = VectorF
def test_strides(self):
vec = self.Vector([1, 2, 3])
sizeof_float = len(struct.pack('f', 1.0))
self.assertEqual(vec.strides, (sizeof_float,))
def test_normalize(self):
vec = self.Vector([1, 3])
vec.normalize()
self.assertEqual(vec[0], 1/4)
self.assertEqual(vec[1], 3/4)
vec2 = self.Vector([])
vec2.normalize()
def test_memoryview_tolist(self):
vec = self.Vector([1, 2, 3])
mem = memoryview(vec)
self.assertEqual(mem.tolist(), [1.0, 2.0, 3.0])
def test_neg(self):
vec = self.Vector([1, 2, 3])
v2 = -vec
self.assertEqual(v2[0], -1)
self.assertEqual(v2[1], -2)
self.assertEqual(v2[2], -3)
def test_div_scalar(self):
vec = self.Vector([1, 2, 3])
v2 = vec / 2
self.assertEqual(v2[0], 0.5)
self.assertEqual(v2[1], 1.0)
self.assertEqual(v2[2], 1.5)
v2 = self.Vector([])
v3 = v2 / 3
self.assertEqual(v3, self.Vector([]))
def test_div_vector(self):
vec = self.Vector([1, 2, 3])
v2 = self.Vector([2, 4, 6])
v3 = vec / v2
self.assertEqual(v3[0], 0.5)
self.assertEqual(v3[1], 0.5)
self.assertEqual(v3[2], 0.5)
v2 = self.Vector([])
v3 = v2 / self.Vector([])
self.assertEqual(v3, self.Vector([]))
def test_idiv_scalar(self):
vec = self.Vector([1, 2, 3])
vec /= 2
self.assertEqual(vec[0], 0.5)
self.assertEqual(vec[1], 1.0)
self.assertEqual(vec[2], 1.5)
vec = self.Vector([])
vec /= 3
self.assertEqual(vec, self.Vector([]))
def test_idiv_vector(self):
vec = self.Vector([1, 2, 3])
vec /= self.Vector([2, 4, 6])
self.assertEqual(vec[0], 0.5)
self.assertEqual(vec[1], 0.5)
self.assertEqual(vec[2], 0.5)
vec = self.Vector([])
vec /= self.Vector([])
self.assertEqual(vec, self.Vector([]))
class TestVectorU8(_TestVectorBase, unittest.TestCase):
Vector = VectorU8
def test_strides(self):
vec = self.Vector([1, 2, 3])
sizeof_u8 = len(struct.pack('B', 1))
self.assertEqual(vec.strides, (sizeof_u8,))
def test_isub_wrapping(self):
vec = self.Vector([0, 1, 2])
vec -= 1
self.assertEqual(vec[0], 255)
self.assertEqual(vec[1], 0)
self.assertEqual(vec[2], 1)
def test_sum_wrapping(self):
vec = self.Vector([124, 72, 116])
self.assertEqual(vec.sum(), (124 + 72 + 116) % 256)
def test_memoryview_tolist(self):
vec = self.Vector([1, 2, 3])
mem = memoryview(vec)
self.assertEqual(mem.tolist(), [1, 2, 3])
def test_eq_bytebuffer(self):
vec = self.Vector([1, 2, 3])
b1 = bytearray([1, 2, 3])
self.assertEqual(vec, b1)
b2 = array.array('B', [1, 2, 3])
self.assertEqual(vec, b2)
b3 = array.array('B', [1, 2, 3, 4])
self.assertNotEqual(vec, b3)
b4 = array.array('L', [1, 2, 3])
self.assertNotEqual(vec, b4)
def test_floordiv_scalar(self):
vec = self.Vector([1, 2, 3])
v2 = vec // 2
self.assertEqual(v2[0], 0)
self.assertEqual(v2[1], 1)
self.assertEqual(v2[2], 1)
v2 = self.Vector([])
v3 = v2 // 3
self.assertEqual(v3, self.Vector([]))
def test_floordiv_vector(self):
vec = self.Vector([1, 2, 3])
v2 = self.Vector([2, 4, 1])
v3 = vec // v2
self.assertEqual(v3[0], 0)
self.assertEqual(v3[1], 0)
self.assertEqual(v3[2], 3)
v2 = self.Vector([])
v3 = v2 // self.Vector([])
self.assertEqual(v3, self.Vector([]))
def test_ifloordiv_scalar(self):
vec = self.Vector([1, 2, 3])
vec //= 2
self.assertEqual(vec[0], 0)
self.assertEqual(vec[1], 1)
self.assertEqual(vec[2], 1)
vec = self.Vector([])
vec //= 3
self.assertEqual(vec, self.Vector([]))
def test_ifloordiv_vector(self):
vec = self.Vector([1, 2, 3])
vec //= self.Vector([2, 4, 6])
self.assertEqual(vec[0], 0)
self.assertEqual(vec[1], 0)
self.assertEqual(vec[2], 0)
vec = self.Vector([])
vec //= self.Vector([])
self.assertEqual(vec, self.Vector([]))
|
nilq/baby-python
|
python
|
from distutils.core import setup
import requests.certs
import py2exe
setup(
name='hogge',
version='1.0.1',
url='https://github.com/igortg/ir_clubchamps',
license='LGPL v3.0',
author='Igor T. Ghisi',
description='',
console=[{
"dest_base": "ir_clubchamps",
"script": "main.py",
}],
zipfile = None,
data_files = [(".", [requests.certs.where()])],
options={
"py2exe": {
"compressed": True,
"dll_excludes": ["msvcr100.dll"],
"excludes": ["Tkinter"],
"bundle_files": 1,
"dist_dir": "ir_clubchamps"
}
},
)
|
nilq/baby-python
|
python
|
import re
from abc import ABC
class TemplateFillerI(ABC):
def fill(self, template: str, entity: str, **kwargs):
return template.replace("XXX", entity)
class ItalianTemplateFiller(TemplateFillerI):
def __init__(self):
self._reduction_rules = {'diil': 'del', 'dilo': 'dello', 'dila': 'della', 'dii': 'dei', 'digli': 'degli',
'dile': 'delle', 'dil': 'dell\'',
'ail': 'al', 'alo': 'allo', 'ala': 'alla', 'ai': 'ai', 'agli': 'agli', 'ale': 'alle',
'dail': 'dal', 'dalo': 'dallo', 'dala': 'dalla', 'dai': 'dai', 'dagli': 'dagli',
'dale': 'dalle',
'inil': 'nel', 'inlo': 'nello', 'inla': 'nella', 'ini': 'nei', 'ingli': 'negli',
'inle': 'nelle',
'conil': 'col', 'conlo': 'cóllo', 'conla': 'cólla', 'coni': 'coi', 'congli': 'cogli',
'conle': 'cólle',
'suil': 'sul', 'sulo': 'sullo', 'sula': 'sulla', 'sui': 'sui', 'sugli': 'sugli',
'sule': 'sulle',
'peril': 'pel', 'perlo': 'pello', 'perla': 'pella', 'peri': 'pei', 'pergli': 'pegli',
'perle': 'pelle'}
self._template = "(?P<preposition>" + "|".join(["\\b" + preposition + "\\b"
for preposition in self._reduction_rules.keys()]) + ")"
self._finder = re.compile(self._template, re.IGNORECASE)
self._articles_gender = {'il': 'o', 'lo': 'o', 'i': 'i', 'gli': 'i', 'la': 'a', 'le': 'e'}
def fill(self, template: str, entity: str, **kwargs):
article = kwargs['article'].lower()
article_in_entity = True if entity.lower().startswith(article) else False
if article:
if article_in_entity and re.search("(di|a|da|in|con|su|per)YYY", template):
entity = re.sub("\\b" + article + "\\b", "", entity, 1, re.IGNORECASE)
template = template.replace("YYY", article)
elif article_in_entity:
template = template.replace("YYY", "")
else:
template = template.replace("YYY", article)
template = self._reduce(template)
else:
template = template.replace("YYY", "")
gender = self._articles_gender.get(article, 'o')
template = template.replace("GGG", gender)
template = template.replace("XXX", entity)
if '\' ' + entity in template:
template = template.replace("\' ", "\'")
template = re.sub("\s{2,}", " ", template)
return template
def _reduce(self, template):
match = self._finder.search(template)
if match:
preposition = match.group('preposition').lower().strip()
template = template.replace(preposition, self._reduction_rules[preposition])
return template
class FrenchTemplateFiller(TemplateFillerI):
def __init__(self):
self._vowels = {'a', 'e', 'i', 'o', 'u', 'â', 'ê', 'î', 'ô', 'û', 'ë', 'ï', 'ü', 'y', 'ÿ', 'à', 'è', 'ù', 'é'}
def fill(self, template: str, entity: str, **kwargs):
if re.search("de\sXXX", template) and entity[0].lower() in self._vowels:
template = re.sub("de\sXXX", "d'XXX", template)
template = template.replace("XXX", entity)
template = re.sub("\s{2,}", " ", template)
return template.strip()
class GermanTemplateFiller(TemplateFillerI):
def fill(self, template: str, entity: str, **kwargs):
article = kwargs['article'].lower()
article_in_entity = True if entity.lower().startswith(article) else False
if article_in_entity:
article = ""
template = re.sub("YYY", article, template)
template = template.replace("XXX", entity)
template = re.sub("\s{2,}", " ", template)
template = template.strip()
template = template[0].upper() + template[1:]
return template.strip()
class SpanishTemplateFiller(TemplateFillerI):
def __init__(self):
self._articles_gender = {'el': 'o', 'la': 'a', 'los': 'es', 'las': 'as'}
def fill(self, template: str, entity: str, **kwargs):
article = kwargs['article'].lower()
article_in_entity = True if entity.lower().startswith(article) else False
skip = False
if article_in_entity and not re.search("(de)YYY", template):
skip = True
if article and not skip:
if article == "el" and re.search("(de)YYY", template):
template = template.replace("deYYY", 'del')
else:
template = template.replace("YYY", " " + article)
else:
template = template.replace("YYY", "")
gender = self._articles_gender.get(article, 'o')
template = template.replace("GGG", gender)
template = template.replace("XXX", entity)
template = re.sub("\s{2,}", " ", template)
return template
class TemplateFillerFactory(object):
@staticmethod
def make_filler(lang):
if lang == "en":
return TemplateFillerI()
if lang == "it":
return ItalianTemplateFiller()
if lang == "de":
return GermanTemplateFiller()
if lang == "es":
return SpanishTemplateFiller()
if lang == "fr":
return FrenchTemplateFiller()
return TemplateFillerI()
|
nilq/baby-python
|
python
|
import gc
import os
import cv2
import numpy as np
import torch
from SRL4RL import SRL4RL_path
from SRL4RL.rl.utils.runner import StateRunner
from SRL4RL.utils.nn_torch import numpy2pytorch, pytorch2numpy, save_model
from SRL4RL.utils.utils import createFolder, loadPickle
from SRL4RL.utils.utilsEnv import (
NCWH2WHC,
add_noise,
render_env,
reset_stack,
tensor2image,
update_video,
)
from SRL4RL.utils.utilsPlot import plot_xHat, plotEmbedding, visualizeMazeExplor
from SRL4RL.xsrl.arguments import is_with_discoveryPi
np2torch = lambda x, device: numpy2pytorch(x, differentiable=False, device=device)
def omega_last_layer(x):
return torch.sigmoid(x)
def sampleNormal(mu, sig):
noise = torch.randn_like(mu)
return mu + noise * sig, noise
def resetState(obs, alpha, beta, gamma, config):
device = torch.device(config["device"])
if len(obs.shape) > 3:
numEnv = obs.shape[0]
else:
numEnv = 1
state = np.random.normal(0, 0.02, [numEnv, config["state_dim"]])
# do not add noise at reset! obs = add_noise(obs)
state = initState(numEnv, state, np2torch(obs, device), alpha, beta, gamma, config)
return state
def init_action(size, config):
return np.zeros((size, config["action_dim"]))
def initState(size, states, x, alpha, beta, gamma, config):
device = torch.device(config["device"])
with torch.no_grad():
actions = init_action(size, config)
# Compute state
o_alpha = alpha(x)
o_beta = beta(
torch.cat((np2torch(states, device), np2torch(actions, device)), dim=1)
)
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
states = pytorch2numpy(gamma(input_gamma))
return states
def update_target_network(target, source, device=None):
if device:
source.to("cpu")
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
if device:
source.to(device)
return target
def normalizePi(pi, logPi, mu):
"""Apply squashing function.
See appendix C from https://arxiv.org/pdf/1812.05905.pdf.
"""
# action_max = envEval.action_space.high[0]
# action_min = envEval.action_space.low[0]
# action_scale = torch.tensor((action_max - action_min).item() / 2.)
# action_bias = torch.tensor((action_max + action_min) / 2.)
action_scale = 1
action_bias = 0
mu = torch.tanh(mu) * action_scale + action_bias
pi = torch.tanh(pi)
epsilon = 1e-6 # Avoid NaN (prevents division by zero or log of zero)
LogPi_jacobian = torch.log(action_scale * (1 - pi.pow(2)) + epsilon).sum(
-1, keepdim=True
)
logPi -= LogPi_jacobian
pi = pi * action_scale + action_bias
return pi, logPi, mu, LogPi_jacobian
def gaussian_logprob(noise, log_sig):
"""Compute Gaussian log probability."""
residual = (-0.5 * noise.pow(2) - log_sig).sum(-1, keepdim=True)
return residual - 0.5 * np.log(2 * np.pi) * noise.size(-1)
def policy_last_layer_op(s, pi_head, mu_tail, log_sig_tail, config):
head_out = pi_head(s)
mu = mu_tail(head_out)
log_sig_min = -10 # before: - config['action_dim'] * norm
log_sig_max = 2 # before: 12 * norm
log_sig = log_sig_tail(head_out) # +3
log_sig = torch.clamp(log_sig, min=log_sig_min, max=log_sig_max)
sig = log_sig.exp()
assert not torch.isnan(log_sig).any().item(), "isnan in log_sig!!"
log_sig_detach = log_sig
# for repameterization trick (mu + sig * N(0,1))
x_t, noise = sampleNormal(mu=mu, sig=sig)
logPi = gaussian_logprob(noise, log_sig)
pi, logPi, mu, LogPi_jacobian = normalizePi(x_t, logPi, mu)
assert not torch.isnan(head_out).any().item(), "isnan in head_out!!"
assert not torch.isnan(mu).any().item(), "isnan in mu!!"
return pi, logPi, log_sig_detach, mu, LogPi_jacobian.detach()
def policy_last_layer(
s,
pi_head,
mu_tail,
log_sig_tail,
config,
s_dvt=None,
pi_head_dvt=None,
mu_tail_dvt=None,
log_sig_tail_dvt=None,
save_pi_logs=False,
):
if s_dvt is not None:
pi_dvt, logPi_dvt, _, _, _ = policy_last_layer_op(
s_dvt, pi_head_dvt, mu_tail_dvt, log_sig_tail_dvt, config
)
pi, logPi, log_sig, mu, LogPi_jacobian = policy_last_layer_op(
s, pi_head, mu_tail, log_sig_tail, config
)
if save_pi_logs and (s_dvt is None):
return pi, logPi, log_sig.detach(), mu.detach(), LogPi_jacobian.detach()
elif save_pi_logs and (s_dvt is not None):
return (
pi,
logPi,
pi_dvt,
logPi_dvt,
log_sig.detach(),
mu.detach(),
LogPi_jacobian.detach(),
)
else:
return pi
def XSRL_nextObsEval(
alpha,
beta,
gamma,
omega,
config,
save_dir,
gradientStep=None,
saved_step=None,
suffix="last",
debug=False,
):
evaluate = suffix == "evaluate"
if evaluate:
path_eval = os.path.join(save_dir, "eval2obs")
createFolder(path_eval, "eval2obs already exist")
actionRepeat = config["actionRepeat"]
datasetEval_path = "testDatasets/testDataset_{}".format(config["new_env_name"])
if actionRepeat > 1:
datasetEval_path += "_noRepeatAction"
elif config["distractor"]:
datasetEval_path += "_withDistractor"
datasetEval_path += ".pkl"
datasetEval_path = os.path.join(SRL4RL_path, datasetEval_path)
dataset = loadPickle(datasetEval_path)
actions, observations, measures = (
dataset["actions"],
dataset["observations"],
dataset["measures"],
)
# if debug:
# last_index = actionRepeat * 200
# actions, observations, measures = actions[:-last_index], observations[:-last_index], measures[:-last_index]
measures = measures[1:][actionRepeat:][::actionRepeat]
"force the Garbage Collector to release unreferenced memory"
del dataset
gc.collect()
device = torch.device(config["device"])
Loss_obs = lambda x, y: torch.nn.MSELoss(reduction="sum")(x, y) / (
x.shape[0] * config["n_stack"]
)
loss_log = 0
print(" XSRL_nextObsEval (predicting next obs with PIeval_dataset) ......")
eval_steps = None
if config["new_env_name"] == "TurtlebotMazeEnv":
xHat_nextObsEval_step = 84
eval_steps = [87, 88, 101, 115, 117, 439, 440]
elif config["new_env_name"] == "HalfCheetahBulletEnv":
xHat_nextObsEval_step = 119
elif config["new_env_name"] == "InvertedPendulumSwingupBulletEnv":
xHat_nextObsEval_step = 45
elif config["new_env_name"] == "ReacherBulletEnv":
xHat_nextObsEval_step = 42
eval_steps = [14, 25, 396]
video_path = os.path.join(save_dir, "piEval_{}.mp4".format(suffix))
if config["new_env_name"] == "TurtlebotMazeEnv":
fps = 5
elif actionRepeat > 1:
fps = 20 // actionRepeat
else:
fps = 5
video_out = (
cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"mp4v"),
fps=fps,
frameSize=(int(588 * 2), 588),
)
if config["color"]
else cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"XVID"),
fps=fps,
frameSize=(int(588 * 2), 588),
isColor=0,
)
)
"init state with obs without noise"
if config["n_stack"] > 1:
nc = 3
observation = reset_stack(observations[0][None], config)
next_observation = reset_stack(observations[0][None], config)
else:
observation = observations[0][None]
with torch.no_grad():
stateExpl = resetState(observation, alpha, beta, gamma, config)
step_rep = 0
elapsed_steps = 0
len_traj = (len(observations) - 1) // actionRepeat - 1
assert len_traj == len(measures), "wrong division in len_traj"
all_states = np.zeros([len_traj, config["state_dim"]])
"observations[1:] -> remove reset obs and first actionRepeat time steps"
for step, (pi, next_obs) in enumerate(zip(actions, observations[1:])):
"Make a step"
if config["n_stack"] > 1:
if (step_rep + 1) > (config["actionRepeat"] - config["n_stack"]):
next_observation[
:, (step_rep - 1) * nc : ((step_rep - 1) + 1) * nc
] = next_obs
elif (step_rep + 1) == config["actionRepeat"]:
next_observation = next_obs[None]
step_rep += 1
if ((step + 1) % actionRepeat == 0) and (step + 1) > actionRepeat:
# (step + 1) > actionRepeat: let one iteration to better bootstrap the state estimation
step_rep = 0
TensA = numpy2pytorch(pi, differentiable=False, device=device).unsqueeze(
dim=0
)
"predict next states"
with torch.no_grad():
o_alpha = alpha(np2torch(observation, device))
o_beta = beta(torch.cat((np2torch(stateExpl, device), TensA), dim=1))
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
s_next = gamma(input_gamma)
"Predict next observations of current elapsed_steps for all trajectories"
xHat = omega_last_layer(omega(s_next))
loss_log += pytorch2numpy(
Loss_obs(xHat, np2torch(next_observation, device))
)
"update video"
update_video(
im=255 * NCWH2WHC(next_observation[:, -3:, :, :]),
color=config["color"],
video_size=588,
video=video_out,
fpv=config["fpv"],
concatIM=255 * tensor2image(xHat[:, -3:, :, :]),
)
if type(eval_steps) is list:
saveIm = elapsed_steps in [xHat_nextObsEval_step] + eval_steps
name_ = "xHat_nextObsEval{}".format(elapsed_steps)
else:
saveIm = elapsed_steps == xHat_nextObsEval_step
name_ = "xHat_nextObsEval"
if saveIm:
"plot image to check the image prediction quality"
if config["n_stack"] > 1:
"saving other frames"
for step_r in range(config["n_stack"]):
name = "xHat_nextObsEval{}_frame{}".format(
elapsed_steps, step_r
)
plot_xHat(
NCWH2WHC(observation[:, step_r * nc : (step_r + 1) * nc]),
tensor2image(xHat[:, step_r * nc : (step_r + 1) * nc]),
imgTarget=NCWH2WHC(
next_observation[:, step_r * nc : (step_r + 1) * nc]
),
figure_path=save_dir,
with_nextObs=True,
name=name,
gradientStep=gradientStep,
suffix=suffix,
evaluate=evaluate,
)
else:
plot_xHat(
NCWH2WHC(observation[:, -3:, :, :]),
tensor2image(xHat[:, -3:, :, :]),
imgTarget=NCWH2WHC(next_observation[:, -3:, :, :]),
figure_path=save_dir,
with_nextObs=True,
name=name_,
gradientStep=gradientStep,
suffix=suffix,
evaluate=evaluate,
)
if elapsed_steps == xHat_nextObsEval_step:
if saved_step is not None:
plot_xHat(
NCWH2WHC(observation[:, -3:, :, :]),
tensor2image(xHat[:, -3:, :, :]),
imgTarget=NCWH2WHC(next_observation[:, -3:, :, :]),
figure_path=os.path.join(save_dir, "xHat_nextObsEval"),
with_nextObs=True,
name="xHat_nextObsEval",
gradientStep=gradientStep,
saved_step=saved_step,
)
if evaluate:
"plot image of all time steps"
plot_xHat(
NCWH2WHC(observation[:, -3:, :, :]),
tensor2image(xHat[:, -3:, :, :]),
imgTarget=NCWH2WHC(next_observation[:, -3:, :, :]),
figure_path=path_eval,
with_noise=config["with_noise"],
with_nextObs=True,
saved_step=elapsed_steps,
)
"save state"
all_states[elapsed_steps] = stateExpl[0]
elapsed_steps += 1
"update states"
stateExpl = pytorch2numpy(s_next)
"update inputs without noise for test"
# observation = add_noise(next_observation.copy(), noise_adder, config)
observation = next_observation.copy()
elif ((step + 1) % actionRepeat == 0) and (step + 1) == actionRepeat:
step_rep = 0
observation = next_observation.copy()
"Release everything if job is finished"
video_out.release()
cv2.destroyAllWindows()
loss_logNorm = loss_log / len_traj
print(" " * 100 + "done: nextObsEval = {:.3f}".format(loss_logNorm))
plotEmbedding(
"UMAP",
measures.copy(),
all_states,
figure_path=save_dir,
gradientStep=gradientStep,
saved_step=saved_step,
proj_dim=3,
suffix=suffix,
env_name=config["env_name"],
evaluate=evaluate,
)
plotEmbedding(
"PCA",
measures,
all_states,
figure_path=save_dir,
gradientStep=gradientStep,
saved_step=saved_step,
proj_dim=3,
suffix=suffix,
env_name=config["env_name"],
evaluate=evaluate,
)
"force the Garbage Collector to release unreferenced memory"
del (
actions,
observations,
measures,
video_out,
all_states,
stateExpl,
s_next,
observation,
next_observation,
xHat,
)
gc.collect()
return loss_logNorm
def piExplore2obs(
envExplor,
noise_adder,
alpha,
beta,
gamma,
omega,
pi_head,
mu_tail,
log_sig_tail,
config,
save_dir,
suffix="last",
debug=False,
evaluate=False,
saved_step=None,
):
device = torch.device(config["device"])
with_discoveryPi = is_with_discoveryPi(config)
if saved_step is None:
saved_step = ""
else:
saved_step = "_E{}".format(saved_step)
if config["env_name"] in ["TurtlebotEnv-v0", "TurtlebotMazeEnv-v0"]:
camera_id_eval = 1
imLabel = "map"
else:
camera_id_eval = -1
imLabel = "env"
if evaluate:
path_eval = os.path.join(save_dir, "piExplore2obs{}/".format(saved_step))
createFolder(path_eval, "piExplore2obs already exist")
path_eval_im = os.path.join(save_dir, "piExplore2im{}/".format(saved_step))
createFolder(path_eval_im, "piExplore2im already exist")
obs = envExplor.reset()
"init state with obs without noise"
if config["n_stack"] > 1:
nc = 3
actionRepeat = config["actionRepeat"]
observation = reset_stack(obs, config)
next_observation = reset_stack(obs, config)
else:
actionRepeat = 1
observation = obs
with torch.no_grad():
stateExpl = resetState(observation, alpha, beta, gamma, config)
eval_steps = 30 if debug else 500
video_path = os.path.join(save_dir, "piExplore_{}{}.mp4".format(suffix, saved_step))
fps = 5
video_out = (
cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"mp4v"),
fps=fps,
frameSize=(int(588 * 2), 588),
)
if config["color"]
else cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"XVID"),
fps=fps,
frameSize=(int(588 * 2), 588),
isColor=0,
)
)
print(" piExplore2obs (exploring and predicting next obs) ......")
for step in range(eval_steps):
"Make a step"
has_bump = True
num_bump = 0
while has_bump:
if evaluate:
assert num_bump < 500, "num_bump > 500"
num_bump += 1
if with_discoveryPi:
"update policy distribution and sample action"
with torch.no_grad():
TensA = policy_last_layer(
np2torch(stateExpl, "cpu"),
pi_head,
mu_tail,
log_sig_tail,
config=config,
).to(device)
pi = pytorch2numpy(TensA.squeeze(dim=0))
else:
pi = envExplor.action_space.sample()
TensA = numpy2pytorch(
pi, differentiable=False, device=device
).unsqueeze(dim=0)
if config["bumpDetection"]:
has_bump = envExplor.bump_detection(pi)
else:
has_bump = False
"Make a step"
for step_rep in range(actionRepeat):
obs, _, done, _ = envExplor.step(pi)
if config["n_stack"] > 1:
if (step_rep + 1) > (config["actionRepeat"] - config["n_stack"]):
next_observation[
:, (step_rep - 1) * nc : ((step_rep - 1) + 1) * nc
] = obs
elif (step_rep + 1) == actionRepeat:
assert step_rep < 2, "actionRepeat is already performed in env"
next_observation = obs
with torch.no_grad():
"predict next states"
o_alpha = alpha(np2torch(observation, device))
o_beta = beta(torch.cat((np2torch(stateExpl, device), TensA), dim=1))
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
s_next = gamma(input_gamma)
"Predict next observations of current step for all trajectories"
xHat = omega_last_layer(omega(s_next))
"update video"
update_video(
im=255 * NCWH2WHC(next_observation[:, -3:, :, :]),
color=config["color"],
video_size=588,
video=video_out,
fpv=config["fpv"],
concatIM=255 * tensor2image(xHat[:, -3:, :, :]),
)
if evaluate:
im_high_render = (
render_env(
envExplor,
256,
False,
camera_id_eval,
config["color"],
downscaling=False,
)
/ 255.0
)
plot_xHat(
NCWH2WHC(observation[:, -3:, :, :]),
tensor2image(xHat[:, -3:, :, :]),
imgTarget=NCWH2WHC(next_observation[:, -3:, :, :]),
im_high_render=im_high_render,
imLabel=imLabel,
figure_path=path_eval,
with_noise=config["with_noise"],
with_nextObs=True,
saved_step=step,
)
im_high_render = render_env(
envExplor,
588,
False,
camera_id_eval,
config["color"],
downscaling=False,
)
cv2.imwrite(
path_eval_im + "ob_{:05d}".format(step) + ".png",
im_high_render[:, :, ::-1].astype(np.uint8),
)
"update inputs without noise for test"
# observation = add_noise(next_observation.copy(), noise_adder, config)
observation = next_observation.copy()
stateExpl = pytorch2numpy(s_next)
"Release everything if job is finished"
video_out.release()
cv2.destroyAllWindows()
"force the Garbage Collector to release unreferenced memory"
del video_out, stateExpl, s_next, observation, next_observation, xHat
gc.collect()
def getPiExplore(
envExplor,
noise_adder,
alpha,
beta,
gamma,
pi_head,
mu_tail,
log_sig_tail,
config,
save_dir,
n_epoch=None,
debug=False,
evaluate=False,
suffix="",
):
assert config["env_name"] in [
"TurtlebotEnv-v0",
"TurtlebotMazeEnv-v0",
], "getPiExplore only with Turtlebot"
device = torch.device(config["device"])
with_discoveryPi = is_with_discoveryPi(config)
observation = envExplor.reset()
with torch.no_grad():
stateExpl = resetState(observation, alpha, beta, gamma, config)
if debug:
eval_steps = [50, 100]
elif config["env_name"] == "TurtlebotEnv-v0":
eval_steps = [100, 200, 300]
elif config["env_name"] == "TurtlebotMazeEnv-v0":
eval_steps = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
robot_pos = np.zeros((eval_steps[-1] + 1, 2))
eval_i = 0
robot_pos[0] = envExplor.object.copy()
if n_epoch:
n_epoch_ = "-%06d" % n_epoch
else:
n_epoch_ = ""
print(" getPiExplore (exploring) ......")
for step in range(eval_steps[-1]):
"Make a step"
has_bump = True
num_bump = 0
while has_bump:
if evaluate:
assert num_bump < 500, "num_bump > 500"
num_bump += 1
if with_discoveryPi:
"update policy distribution and sample action"
with torch.no_grad():
TensA = policy_last_layer(
np2torch(stateExpl, "cpu"),
pi_head,
mu_tail,
log_sig_tail,
config=config,
).to(device)
pi = pytorch2numpy(TensA.squeeze(dim=0))
else:
pi = envExplor.action_space.sample()
TensA = numpy2pytorch(
pi, differentiable=False, device=device
).unsqueeze(dim=0)
if config["bumpDetection"]:
has_bump = envExplor.bump_detection(pi)
else:
has_bump = False
"Make a step"
obs, _, done, _ = envExplor.step(pi)
"store robot pos"
robot_pos[step + 1] = envExplor.object.copy()
if (step + 1) == eval_steps[eval_i]:
visualizeMazeExplor(
config["env_name"],
robot_pos=robot_pos[: eval_steps[eval_i]].copy(),
save_dir=save_dir,
name="explore{}{}{}".format(eval_steps[eval_i], n_epoch_, suffix),
)
eval_i += 1
next_observation = obs
"predict next states"
with torch.no_grad():
o_alpha = alpha(np2torch(observation, device))
o_beta = beta(torch.cat((np2torch(stateExpl, device), TensA), dim=1))
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
s_next = gamma(input_gamma)
"update inputs without noise for test"
# observation = add_noise(next_observation.copy(), noise_adder, config)
observation = next_observation
stateExpl = pytorch2numpy(s_next)
"force the Garbage Collector to release unreferenced memory"
del robot_pos, s_next, stateExpl, observation, next_observation
gc.collect()
class XSRLRunner(StateRunner):
def __init__(self, config):
super().__init__(config)
self.alpha, self.beta, self.gamma = torch.load(
os.path.join(config["srl_path"], "state_model.pt"),
map_location=torch.device("cpu"),
)
self.alpha.eval(), self.beta.eval(), self.gamma.eval()
self.initState()
def resetState(self):
self.state = self.initState().to("cpu")
self.pi = np.zeros((self.action_dim))
def update_state(self, x, demo=False):
with torch.no_grad():
"predict next state"
inputs = add_noise(x, self.noise_adder, self.noiseParams)
o_alpha = self.alpha(inputs.to(self.device)).to("cpu")
"FNNs only faster with cpu"
o_beta = self.beta(
torch.cat((self.state, np2torch(self.pi, "cpu").unsqueeze(0)), dim=1)
)
input_gamma = torch.cat((o_alpha, o_beta), dim=1)
new_state = self.gamma(input_gamma)
if demo:
self.last_inputs = pytorch2numpy(inputs)[0][-3:, :, :].transpose(1, 2, 0)
self.state = new_state
return new_state
def save_state_model(self, save_path):
print("Saving models ......")
save_model([self.alpha, self.beta, self.gamma], save_path + "state_model")
def train(self, training=True):
self.alpha.train(training)
self.beta.train(training)
self.gamma.train(training)
def to_device(self, device="cpu"):
torchDevice = torch.device(device)
self.alpha.to(torchDevice)
self.beta.to("cpu")
self.gamma.to("cpu")
|
nilq/baby-python
|
python
|
# coding=utf-8
from __future__ import unicode_literals
from django.db import models
import pytz
import requests
from datetime import timedelta
import datetime
import math
import wargaming
from django.db.models.signals import pre_save
from django.db.models import Q
from django.contrib.postgres.fields import JSONField
from django.dispatch import receiver
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.utils.functional import cached_property
wot = wargaming.WoT(settings.WARGAMING_KEY, language='ru', region='ru')
wgn = wargaming.WGN(settings.WARGAMING_KEY, language='ru', region='ru')
def utc_now():
return datetime.datetime.now(tz=pytz.UTC)
def combine_dt(date, time):
return datetime.datetime.combine(date, time)
class TournamentInfo(dict):
def __init__(self, province_id, seq=None, **kwargs):
super(TournamentInfo, self).__init__(seq=None, **kwargs)
# {u'applications_decreased': False,
# u'apply_error_message': u'Чтобы подать заявку, войдите на сайт.',
# u'arena_name': u'Аэродром',
# u'available_applications_number': 0,
# u'battles': [],
# u'can_apply': False,
# u'front_id': u'campaign_05_ru_west',
# u'is_apply_visible': False,
# u'is_superfinal': False,
# u'next_round': None,
# u'next_round_start_time': u'19:15:00.000000',
# u'owner': None,
# u'pretenders': [{u'arena_battles_count': 49,
# u'arena_wins_percent': 38.78,
# u'cancel_action_id': None,
# u'clan_id': 94365,
# u'color': u'#b00a10',
# u'division_id': None,
# u'elo_rating_10': 1155,
# u'elo_rating_6': 1175,
# u'elo_rating_8': 1259,
# u'emblem_url': u'https://ru.wargaming.net/clans/media/clans/emblems/cl_365/94365/emblem_64x64_gm.png',
# u'fine_level': 0,
# u'id': 94365,
# u'landing': True,
# u'name': u'Deadly Decoy',
# u'tag': u'DECOY',
# u'xp': None}],
# u'province_id': u'herning',
# u'province_name': u'\u0425\u0435\u0440\u043d\u0438\u043d\u0433',
# u'province_pillage_end_datetime': None,
# u'province_revenue': 0,
# u'revenue_level': 0,
# u'round_number': 1,
# u'size': 32,
# u'start_time': u'19:00:00',
# u'turns_till_primetime': 11}
self.update(requests.get(
'https://ru.wargaming.net/globalmap/game_api/tournament_info?alias=%s' % province_id).json())
try:
province = Province.objects.get(province_id=self['province_id'], front__front_id=self['front_id'])
except Province.DoesNotExist:
return
arena_id = province.arena_id
owner = self['owner']
if owner:
update_clan_province_stat(arena_id, **owner)
for clan_data in self.clans_info.values():
update_clan_province_stat(arena_id, **clan_data)
@property
def clans_info(self):
clans = {}
for battle in self['battles']:
if 'first_competitor' in battle and battle['first_competitor']:
clans[battle['first_competitor']['id']] = battle['first_competitor']
if 'second_competitor' in battle and battle['second_competitor']:
clans[battle['second_competitor']['id']] = battle['second_competitor']
if isinstance(self['pretenders'], list):
for clan in self['pretenders']:
clans[clan['id']] = clan
if self['owner'] and self['owner']['id'] in clans:
del clans[self['owner']['id']]
return clans
@property
def pretenders(self):
return self.clans_info.keys()
def update_clan_province_stat(arena_id, tag, name, elo_rating_6, elo_rating_8, elo_rating_10,
arena_wins_percent, arena_battles_count, **kwargs):
pk = kwargs.get('id') or kwargs['clan_id']
clan = Clan.objects.update_or_create(id=pk, defaults={
'tag': tag, 'title': name,
'elo_6': elo_rating_6, 'elo_8': elo_rating_8,
'elo_10': elo_rating_10,
})[0]
ClanArenaStat.objects.update_or_create(clan=clan, arena_id=arena_id, defaults={
'wins_percent': arena_wins_percent,
'battles_count': arena_battles_count,
})
class Clan(models.Model):
tag = models.CharField(max_length=5, null=True)
title = models.CharField(max_length=255, null=True)
elo_6 = models.IntegerField(null=True)
elo_8 = models.IntegerField(null=True)
elo_10 = models.IntegerField(null=True)
def __repr__(self):
return '<Clan: %s>' % self.tag
def __str__(self):
return self.tag
def force_update(self):
clan_info = wgn.clans.info(clan_id=self.pk)[str(self.pk)]
self.tag = clan_info['tag']
self.title = clan_info['name']
self.save()
def as_json(self):
return {
'clan_id': self.pk,
'tag': self.tag,
'name': self.title,
'elo_6': self.elo_6,
'elo_8': self.elo_8,
'elo_10': self.elo_10,
}
def as_json_with_arena(self, arena_id):
data = self.as_json()
stat = self.arena_stats.filter(arena_id=arena_id)
if stat:
data['arena_stat'] = stat[0].as_json()
else:
data['arena_stat'] = ClanArenaStat(
clan=self,
arena_id=arena_id,
wins_percent=0,
battles_count=0,
).as_json()
return data
class Player(models.Model):
nickname = models.CharField(max_length=255)
clan = models.ForeignKey(Clan, null=True)
email = models.CharField(null=True, max_length=255)
password = models.CharField(null=True, max_length=255)
system_account = models.BooleanField(default=False)
class Front(models.Model):
front_id = models.CharField(max_length=254)
max_vehicle_level = models.IntegerField()
class Province(models.Model):
province_id = models.CharField(max_length=255)
front = models.ForeignKey(Front)
province_name = models.CharField(max_length=255)
province_owner = models.ForeignKey(Clan, on_delete=models.SET_NULL, null=True, blank=True)
arena_id = models.CharField(max_length=255)
arena_name = models.CharField(max_length=255)
prime_time = models.TimeField()
server = models.CharField(max_length=10)
def __repr__(self):
return '<Province: %s>' % self.province_id
def __str__(self):
return self.province_id
def force_update(self):
data = wot.globalmap.provinces(
front_id=self.front.front_id, province_id=self.province_id,
fields='arena_id,arena_name,province_name,prime_time,owner_clan_id,server')
if len(data) == 0:
raise Exception("Province '%s' not found on front '%s'", self.province_id, self.front.front_id)
data = data[0]
self.arena_id = data['arena_id']
self.arena_name = data['arena_name']
self.province_name = data['province_name']
self.prime_time = data['prime_time']
if data['owner_clan_id']:
self.province_owner = Clan.objects.get_or_create(pk=data['owner_clan_id'])[0]
self.server = data['server']
@cached_property
def tournament_info(self):
return TournamentInfo(self.province_id)
def as_json(self):
return {
'province_id': self.province_id,
'province_name': self.province_name,
'province_owner': self.province_owner and self.province_owner.as_json(),
'arena_id': self.arena_id,
'arena_name': self.arena_name,
'prime_time': self.prime_time,
'server': self.server,
'max_vehicle_level': self.front.max_vehicle_level,
}
class ClanArenaStat(models.Model):
clan = models.ForeignKey(Clan, related_name='arena_stats')
arena_id = models.CharField(max_length=255)
wins_percent = models.FloatField()
battles_count = models.IntegerField()
# level = models.IntegerField()
# base = models.IntegerField(choices=((1, 'Fist base'), (2, 'Second Base')))
def as_json(self):
return {
'wins_percent': self.wins_percent,
'battles_count': self.battles_count,
}
# CLEAN MAP
# [{u'active_battles': [],
# u'arena_id': u'10_hills',
# u'arena_name': u'\u0420\u0443\u0434\u043d\u0438\u043a\u0438',
# u'attackers': [],
# u'battles_start_at': u'2016-11-23T19:15:00',
# u'competitors': [192,
# 3861,
# 45846,
# 61752,
# 80424,
# 82433,
# 146509,
# 170851,
# 179351,
# 190526,
# 200649,
# 201252,
# 219575],
# u'current_min_bet': 0,
# u'daily_revenue': 0,
# u'front_id': u'campaign_05_ru_west',
# u'front_name': u'\u041a\u0430\u043c\u043f\u0430\u043d\u0438\u044f: \u0417\u0430\u043f\u0430\u0434',
# u'is_borders_disabled': False,
# u'landing_type': u'tournament',
# u'last_won_bet': 0,
# u'max_bets': 32,
# u'neighbours': [u'herning', u'odense', u'uddevalla'],
# u'owner_clan_id': None,
# u'pillage_end_at': None,
# u'prime_time': u'19:15',
# u'province_id': u'aarhus',
# u'province_name': u'\u041e\u0440\u0445\u0443\u0441',
# u'revenue_level': 0,
# u'round_number': None,
# u'server': u'RU6',
# u'status': None,
# u'uri': u'/#province/aarhus',
# u'world_redivision': False}]
class ProvinceAssault(models.Model):
date = models.DateField() # On what date Assault was performed
province = models.ForeignKey(Province, # On what province
related_name='assaults')
current_owner = models.ForeignKey(Clan, related_name='+', null=True)
clans = models.ManyToManyField(Clan) # By which clans
prime_time = models.TimeField()
arena_id = models.CharField(max_length=255)
round_number = models.IntegerField(null=True)
landing_type = models.CharField(max_length=255, null=True)
status = models.CharField(max_length=20, default='FINISHED', null=True)
division = JSONField(null=True)
class Meta:
ordering = ('date', )
unique_together = ('date', 'province')
def __repr__(self):
return '<ProvinceAssault @%s: %s owned by %s>' % (
self.date, self.province.province_id, str(self.current_owner))
@cached_property
def datetime(self):
if isinstance(self.date, str):
self.date = datetime.date(*[int(i) for i in self.date.split('-')])
if isinstance(self.prime_time, str):
self.prime_time = datetime.time(*[int(i) for i in self.prime_time.split(':')])
return combine_dt(self.date, self.prime_time).replace(tzinfo=pytz.UTC)
@cached_property
def planned_times(self):
if utc_now() > self.datetime:
if isinstance(self.round_number, int):
round_number = self.round_number
else:
# Bug-fix: WGAPI can return None on round number if map is new
round_number = 1
else:
round_number = 1 # Bug-Fix: WGAPI return round number from previous day
clans_count = len(self.clans.all())
if clans_count > 0:
total_rounds = round_number + int(math.ceil(math.log(clans_count, 2))) - 1
else:
total_rounds = round_number - 1
times = [
self.datetime + timedelta(minutes=30) * i
for i in range(0, total_rounds)
]
if self.current_owner:
times.append(self.datetime + timedelta(minutes=30) * total_rounds)
return times
def clan_battles(self, clan):
max_rounds = len(self.planned_times)
existing_battles = {b.round: b for b in self.battles.filter(Q(clan_a=clan) | Q(clan_b=clan))}
res = []
for round_number in range(1, max_rounds + 1):
if round_number in existing_battles:
res.append(existing_battles[round_number])
else:
# create FAKE planned battle
pb = ProvinceBattle(
assault=self,
province=self.province,
arena_id=self.arena_id,
round=round_number,
)
if round_number <= self.round_number and self.status == 'STARTED':
pb.winner = clan
if round_number == max_rounds and self.current_owner:
pb.clan_a = self.current_owner
pb.clan_b = clan
res.append(pb)
return res
@cached_property
def max_rounds(self):
return len(self.planned_times)
def as_clan_json(self, clan, current_only=True):
if current_only:
battles = [b.as_json() for b in self.clan_battles(clan)
if b.round >= self.round_number and self.status != 'FINISHED'
or self.datetime > utc_now()]
else:
battles = [b.as_json() for b in self.clan_battles(clan)]
if self.current_owner == clan:
mode = 'defence'
battles = battles[-1:-2:-1]
else:
mode = 'attack'
return {
'mode': mode,
'province_info': self.province.as_json(),
'prime_time': self.datetime,
'clans': {c.pk: c.as_json_with_arena(self.arena_id) for c in self.clans.all()},
'battles': battles,
}
class ProvinceBattle(models.Model):
assault = models.ForeignKey(ProvinceAssault, related_name='battles')
province = models.ForeignKey(Province, related_name='battles')
arena_id = models.CharField(max_length=255)
clan_a = models.ForeignKey(Clan, related_name='+')
clan_b = models.ForeignKey(Clan, related_name='+')
winner = models.ForeignKey(Clan, null=True, related_name='battles_winner')
start_at = models.DateTimeField()
round = models.IntegerField()
class Meta:
ordering = ('round', 'start_at')
def __repr__(self):
clan_a_tag = clan_b_tag = province_id = None
try:
clan_a_tag = self.clan_a.tag
except ObjectDoesNotExist:
clan_a_tag = None
try:
clan_b_tag = self.clan_b.tag
except ObjectDoesNotExist:
clan_b_tag = None
try:
province_id = self.province.province_id
except ObjectDoesNotExist:
province_id = None
return '<Battle round %s: %s VS %s on %s>' % (self.round, clan_a_tag, clan_b_tag, province_id)
def __str__(self):
return repr(self)
@property
def round_datetime(self):
prime_time = self.province.prime_time
date = self.assault.date
return combine_dt(date, prime_time).replace(tzinfo=pytz.UTC) + timedelta(minutes=30) * (self.round - 1)
@property
def title(self):
power = self.assault.max_rounds - self.round - 1
if power == 0:
return 'Final'
else:
return 'Round 1 / %s' % (2 ** power)
def as_json(self):
try:
clan_a = self.clan_a
except ObjectDoesNotExist:
clan_a = None
try:
clan_b = self.clan_b
except ObjectDoesNotExist:
clan_b = None
return {
'planned_start_at': self.round_datetime,
'real_start_at': self.start_at,
'clan_a': clan_a.as_json_with_arena(self.arena_id) if clan_a else None,
'clan_b': clan_b.as_json_with_arena(self.arena_id) if clan_b else None,
'winner': self.winner.as_json() if self.winner else None
}
class ProvinceTag(models.Model):
date = models.DateField()
tag = models.CharField(max_length=255)
province_id = models.CharField(max_length=255)
def __repr__(self):
return "<ProvinceTag %s: %s@%s>" % (self.date, self.tag, self.province_id)
@receiver(pre_save, sender=Clan)
def fetch_minimum_clan_info(sender, instance, **kwargs):
if (not instance.tag or not instance.title) and instance.pk:
instance.force_update()
elif not instance.pk and instance.tag:
info = [i for i in wgn.clans.list(search=instance.tag) if i['tag'] == instance.tag]
if len(info) == 1:
instance.pk = info[0]['clan_id']
instance.title = info[0]['name']
else:
# No clan with such tag, do not allow such Clan
instance.tag = None
instance.title = None
@receiver(pre_save, sender=Province)
def fetch_minimum_clan_info(sender, instance, **kwargs):
required_fields = ['province_name', 'arena_id', 'arena_name', 'prime_time', 'server']
for field in required_fields:
if not getattr(instance, field):
instance.force_update()
|
nilq/baby-python
|
python
|
"""
python setup.py sdist
twine upload dist/*
"""
import cv2
if cv2.cuda.getCudaEnabledDeviceCount() > 0:
print("检测到cuda环境")
|
nilq/baby-python
|
python
|
import librosa as lr
import numpy as np
def mu_law_encoding(data, mu):
mu_x = np.sign(data) * np.log(1 + mu * np.abs(data)) / np.log(mu + 1)
return mu_x
def mu_law_expansion(data, mu):
s = np.sign(data) * (np.exp(np.abs(data) * np.log(mu + 1)) - 1) / mu
return s
def quantize_data(data, classes):
mu_x = mu_law_encoding(data, classes)
bins = np.linspace(-1, 1, classes)
quantized = np.digitize(mu_x, bins) - 1
return quantized
def create_chunks(location):
print("create dataset from audio files at", location)
files = list_all_audio_files(location)
processed_files = []
for i, file in enumerate(files):
print(" processed " + str(i) + " of " + str(len(files)) + " files")
file_data, _ = lr.load(path=file,
sr=None,
mono=True)
quantized_data = quantize_data(file_data, 256).astype(np.uint8)
processed_files.append(quantized_data)
return processed_files
|
nilq/baby-python
|
python
|
from random import randint
cpu = randint(0,5)
usuario = int(input('Digite um numero entre 0 a 5: '))
if(cpu == usuario):
print('\033[33;mAcertô, mizeravi!')
else:
print('Errou Zé Ruela')
|
nilq/baby-python
|
python
|
from DBMS_Software.queryProcessor.ReadGlobalDataDictionary import readGlobalDataDictionary
from DBMS_Software.queryProcessor.ReadGlobalDataDictionary import fetchFileFromGCP
import os
def createSQLDump():
print("Enter the TableName:")
TableName = input()
tableLocation = readGlobalDataDictionary(TableName)
if(tableLocation == 'RemoteLocation'):
fetchFileFromGCP(TableName)
FileExtension = ".txt"
FileName = TableName + FileExtension # Framing the FileName
metaFileExtension = 'MetaData.txt'
metaDatafileName = TableName + metaFileExtension
FileObject = open(metaDatafileName, 'r')
Lines = FileObject.readlines()
for eachline in Lines:
filepath = os.path.join('E:/SQLDump_Extraction', metaDatafileName)
if not os.path.exists('E:/SQLDump_Extraction'):
os.makedirs('E:/SQLDump_Extraction')
f = open(filepath, "a")
f.write(eachline)
f.close()
filepath = os.path.join('E:/SQLDump_Extraction', FileName)
if not os.path.exists('E:/SQLDump_Extraction'):
os.makedirs('E:/SQLDump_Extraction')
f = open(filepath, "a")
|
nilq/baby-python
|
python
|
"""STACK Configs."""
import os
import yaml
config = yaml.load(open('stack/config.yml', 'r'), Loader=yaml.FullLoader)
PROJECT_NAME = config['PROJECT_NAME']
STAGE = config.get('STAGE') or 'dev'
# primary bucket
BUCKET = config['BUCKET']
# Additional environement variable to set in the task/lambda
TASK_ENV: dict = dict()
# Existing VPC to point ECS/LAMBDA stacks towards. Defaults to creating a new
# VPC if no ID is supplied.
VPC_ID = os.environ.get("VPC_ID") or config['VPC_ID']
################################################################################
# #
# ECS #
# #
################################################################################
# Min/Max Number of ECS images
MIN_ECS_INSTANCES: int = config['MAX_ECS_INSTANCES']
MAX_ECS_INSTANCES: int = config['MAX_ECS_INSTANCES']
# CPU value | Memory value
# 256 (.25 vCPU) | 0.5 GB, 1 GB, 2 GB
# 512 (.5 vCPU) | 1 GB, 2 GB, 3 GB, 4 GB
# 1024 (1 vCPU) | 2 GB, 3 GB, 4 GB, 5 GB, 6 GB, 7 GB, 8 GB
# 2048 (2 vCPU) | Between 4 GB and 16 GB in 1-GB increments
# 4096 (4 vCPU) | Between 8 GB and 30 GB in 1-GB increments
TASK_CPU: int = config['TASK_CPU']
TASK_MEMORY: int = config['TASK_MEMORY']
################################################################################
# #
# LAMBDA #
# #
################################################################################
TIMEOUT: int = config['TIMEOUT']
MEMORY: int = config['MEMORY']
# stack skips setting concurrency if this value is 0
# the stack will instead use unreserved lambda concurrency
MAX_CONCURRENT: int = 500 if STAGE == "prod" else config['MAX_CONCURRENT']
# Cache
CACHE_NODE_TYPE = config['CACHE_NODE_TYPE']
CACHE_ENGINE = config['CACHE_ENGINE']
CACHE_NODE_NUM = config['CACHE_NODE_NUM']
|
nilq/baby-python
|
python
|
"""
Script for testing purposes.
"""
import zmq
def run(port=5555):
context = zmq.Context()
# using zmq.ROUTER
socket = context.socket(zmq.ROUTER)
# bind socket
socket.bind('tcp://*:{}'.format(port))
while True:
msg = socket.recv_multipart()
print('Received message {}'.format(msg))
socket.send_multipart([msg[0], b'', b'RECEIVED'])
if __name__ == '__main__':
run()
|
nilq/baby-python
|
python
|
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestCounting:
def test_cumcount(self):
df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"])
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3])
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype="int64")
tm.assert_series_equal(e, ge.cumcount())
tm.assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame(
[["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
)
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=mi)
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame(
[["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
def test_ngroup(self):
df = DataFrame({"A": list("aaaba")})
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0])
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_distinct(self):
df = DataFrame({"A": list("abcde")})
g = df.groupby("A")
sg = g.A
expected = Series(range(5), dtype="int64")
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_one_group(self):
df = DataFrame({"A": [0] * 5})
g = df.groupby("A")
sg = g.A
expected = Series([0] * 5)
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_empty(self):
ge = DataFrame().groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype="int64")
tm.assert_series_equal(e, ge.ngroup())
tm.assert_series_equal(e, se.ngroup())
def test_ngroup_series_matches_frame(self):
df = DataFrame({"A": list("aaaba")})
s = Series(list("aaaba"))
tm.assert_series_equal(df.groupby(s).ngroup(), s.groupby(s).ngroup())
def test_ngroup_dupe_index(self):
df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame({"A": list("aaaba")}, index=mi)
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=mi)
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_groupby_not_col(self):
df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_descending(self):
df = DataFrame(["a", "a", "b", "a", "b"], columns=["A"])
g = df.groupby(["A"])
ascending = Series([0, 0, 1, 0, 1])
descending = Series([1, 1, 0, 1, 0])
tm.assert_series_equal(descending, (g.ngroups - 1) - ascending)
tm.assert_series_equal(ascending, g.ngroup(ascending=True))
tm.assert_series_equal(descending, g.ngroup(ascending=False))
def test_ngroup_matches_cumcount(self):
# verify one manually-worked out case works
df = DataFrame(
[["a", "x"], ["a", "y"], ["b", "x"], ["a", "x"], ["b", "y"]],
columns=["A", "X"],
)
g = df.groupby(["A", "X"])
g_ngroup = g.ngroup()
g_cumcount = g.cumcount()
expected_ngroup = Series([0, 1, 2, 0, 3])
expected_cumcount = Series([0, 0, 0, 1, 0])
tm.assert_series_equal(g_ngroup, expected_ngroup)
tm.assert_series_equal(g_cumcount, expected_cumcount)
def test_ngroup_cumcount_pair(self):
# brute force comparison for all small series
for p in product(range(3), repeat=4):
df = DataFrame({"a": p})
g = df.groupby(["a"])
order = sorted(set(p))
ngroupd = [order.index(val) for val in p]
cumcounted = [p[:i].count(val) for i, val in enumerate(p)]
tm.assert_series_equal(g.ngroup(), Series(ngroupd))
tm.assert_series_equal(g.cumcount(), Series(cumcounted))
def test_ngroup_respects_groupby_order(self):
np.random.seed(0)
df = DataFrame({"a": np.random.choice(list("abcdef"), 100)})
for sort_flag in (False, True):
g = df.groupby(["a"], sort=sort_flag)
df["group_id"] = -1
df["group_index"] = -1
for i, (_, group) in enumerate(g):
df.loc[group.index, "group_id"] = i
for j, ind in enumerate(group.index):
df.loc[ind, "group_index"] = j
tm.assert_series_equal(Series(df["group_id"].values), g.ngroup())
tm.assert_series_equal(Series(df["group_index"].values), g.cumcount())
@pytest.mark.parametrize(
"datetimelike",
[
[Timestamp(f"2016-05-{i:02d} 20:09:25+00:00") for i in range(1, 4)],
[Timestamp(f"2016-05-{i:02d} 20:09:25") for i in range(1, 4)],
[Timedelta(x, unit="h") for x in range(1, 4)],
[Period(freq="2W", year=2017, month=x) for x in range(1, 4)],
],
)
def test_count_with_datetimelike(self, datetimelike):
# test for #13393, where DataframeGroupBy.count() fails
# when counting a datetimelike column.
df = DataFrame({"x": ["a", "a", "b"], "y": datetimelike})
res = df.groupby("x").count()
expected = DataFrame({"y": [2, 1]}, index=["a", "b"])
expected.index.name = "x"
tm.assert_frame_equal(expected, res)
def test_count_with_only_nans_in_first_group(self):
# GH21956
df = DataFrame({"A": [np.nan, np.nan], "B": ["a", "b"], "C": [1, 2]})
result = df.groupby(["A", "B"]).C.count()
mi = MultiIndex(levels=[[], ["a", "b"]], codes=[[], []], names=["A", "B"])
expected = Series([], index=mi, dtype=np.int64, name="C")
tm.assert_series_equal(result, expected, check_index_type=False)
def test_count_groupby_column_with_nan_in_groupby_column(self):
# https://github.com/pandas-dev/pandas/issues/32841
df = DataFrame({"A": [1, 1, 1, 1, 1], "B": [5, 4, np.NaN, 3, 0]})
res = df.groupby(["B"]).count()
expected = DataFrame(
index=Index([0.0, 3.0, 4.0, 5.0], name="B"), data={"A": [1, 1, 1, 1]}
)
tm.assert_frame_equal(expected, res)
def test_groupby_count_dateparseerror(self):
dr = date_range(start="1/1/2012", freq="5min", periods=10)
# BAD Example, datetimes first
ser = Series(np.arange(10), index=[dr, np.arange(10)])
grouped = ser.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
ser = Series(np.arange(10), index=[np.arange(10), dr])
grouped = ser.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
tm.assert_series_equal(result, expected)
def test_groupby_timedelta_cython_count():
df = DataFrame(
{"g": list("ab" * 2), "delt": np.arange(4).astype("timedelta64[ns]")}
)
expected = Series([2, 2], index=Index(["a", "b"], name="g"), name="delt")
result = df.groupby("g").delt.count()
tm.assert_series_equal(expected, result)
def test_count():
n = 1 << 15
dr = date_range("2015-08-30", periods=n // 10, freq="T")
df = DataFrame(
{
"1st": np.random.choice(list(ascii_lowercase), n),
"2nd": np.random.randint(0, 5, n),
"3rd": np.random.randn(n).round(3),
"4th": np.random.randint(-10, 10, n),
"5th": np.random.choice(dr, n),
"6th": np.random.randn(n).round(3),
"7th": np.random.randn(n).round(3),
"8th": np.random.choice(dr, n) - np.random.choice(dr, 1),
"9th": np.random.choice(list(ascii_lowercase), n),
}
)
for col in df.columns.drop(["1st", "2nd", "4th"]):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df["9th"] = df["9th"].astype("category")
for key in ["1st", "2nd", ["1st", "2nd"]]:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
tm.assert_frame_equal(left, right)
def test_count_non_nulls():
# GH#5610
# count counts non-nulls
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, np.nan]],
columns=["A", "B", "C"],
)
count_as = df.groupby("A").count()
count_not_as = df.groupby("A", as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=["B", "C"], index=[1, 3])
expected.index.name = "A"
tm.assert_frame_equal(count_not_as, expected.reset_index())
tm.assert_frame_equal(count_as, expected)
count_B = df.groupby("A")["B"].count()
tm.assert_series_equal(count_B, expected["B"])
def test_count_object():
df = DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3})
result = df.groupby("c").a.count()
expected = Series([3, 3], index=Index([2, 3], name="c"), name="a")
tm.assert_series_equal(result, expected)
df = DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3})
result = df.groupby("c").a.count()
expected = Series([1, 3], index=Index([2, 3], name="c"), name="a")
tm.assert_series_equal(result, expected)
def test_count_cross_type():
# GH8169
vals = np.hstack(
(np.random.randint(0, 5, (100, 2)), np.random.randint(0, 2, (100, 2)))
)
df = DataFrame(vals, columns=["a", "b", "c", "d"])
df[df == 2] = np.nan
expected = df.groupby(["c", "d"]).count()
for t in ["float32", "object"]:
df["a"] = df["a"].astype(t)
df["b"] = df["b"].astype(t)
result = df.groupby(["c", "d"]).count()
tm.assert_frame_equal(result, expected)
def test_lower_int_prec_count():
df = DataFrame(
{
"a": np.array([0, 1, 2, 100], np.int8),
"b": np.array([1, 2, 3, 6], np.uint32),
"c": np.array([4, 5, 6, 8], np.int16),
"grp": list("ab" * 2),
}
)
result = df.groupby("grp").count()
expected = DataFrame(
{"a": [2, 2], "b": [2, 2], "c": [2, 2]}, index=Index(list("ab"), name="grp")
)
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
class RaisingObject:
def __init__(self, msg="I will raise inside Cython"):
super().__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({"a": [RaisingObject() for _ in range(4)], "grp": list("ab" * 2)})
result = df.groupby("grp").count()
expected = DataFrame({"a": [2, 2]}, index=Index(list("ab"), name="grp"))
tm.assert_frame_equal(result, expected)
|
nilq/baby-python
|
python
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Terser rules for Bazel
The Terser rules run the Terser JS minifier with Bazel.
Wraps the Terser CLI documented at https://github.com/terser-js/terser#command-line-usage
## Installation
Add the `@bazel/terser` npm package to your `devDependencies` in `package.json`.
## Installing with user-managed dependencies
If you didn't use the `yarn_install` or `npm_install` rule, you'll have to declare a rule in your root `BUILD.bazel` file to execute terser:
```python
# Create a terser rule to use in terser_minified#terser_bin
# attribute when using user-managed dependencies
nodejs_binary(
name = "terser_bin",
entry_point = "//:node_modules/terser/bin/uglifyjs",
# Point bazel to your node_modules to find the entry point
data = ["//:node_modules"],
)
```
"""
load(":terser_minified.bzl", _terser_minified = "terser_minified")
terser_minified = _terser_minified
|
nilq/baby-python
|
python
|
"""
Referral answer related API endpoints.
"""
from django.db.models import Q
from django.http import Http404
from django_fsm import TransitionNotAllowed
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.permissions import BasePermission, IsAuthenticated
from rest_framework.response import Response
from .. import models
from ..forms import ReferralAnswerForm
from ..serializers import ReferralAnswerSerializer
from .permissions import NotAllowed
class CanCreateAnswer(BasePermission):
"""Permission to create a ReferralAnswer through the API."""
def has_permission(self, request, view):
"""
Members of a unit related to a referral can create answers for said referral.
"""
referral = view.get_referral(request)
return (
request.user.is_authenticated
and referral.units.filter(members__id=request.user.id).exists()
)
class CanRetrieveAnswer(BasePermission):
"""Permission to retrieve a ReferralAnswer through the API."""
def has_permission(self, request, view):
"""
Members of a unit related to a referral can retrieve answers for said referral.
"""
answer = view.get_object()
return (
request.user.is_authenticated
and answer.referral.units.filter(members__id=request.user.id).exists()
)
class CanUpdateAnswer(BasePermission):
"""Permission to update a ReferralAnswer through the API."""
def has_permission(self, request, view):
"""
Only the answer's author can update a referral answer.
"""
answer = view.get_object()
return request.user == answer.created_by
class ReferralAnswerViewSet(viewsets.ModelViewSet):
"""
API endpoints for referral answers.
"""
permission_classes = [NotAllowed]
queryset = models.ReferralAnswer.objects.all()
serializer_class = ReferralAnswerSerializer
def get_permissions(self):
"""
Manage permissions for default methods separately, delegating to @action defined
permissions for other actions.
"""
if self.action == "list":
permission_classes = [IsAuthenticated]
elif self.action == "create":
permission_classes = [CanCreateAnswer]
elif self.action == "retrieve":
permission_classes = [CanRetrieveAnswer]
elif self.action == "update":
permission_classes = [CanUpdateAnswer]
else:
try:
permission_classes = getattr(self, self.action).kwargs.get(
"permission_classes"
)
except AttributeError:
permission_classes = self.permission_classes
return [permission() for permission in permission_classes]
def get_referral(self, request):
"""
Helper: get the related referral, return an error if it does not exist.
"""
referral_id = request.data.get("referral") or request.query_params.get(
"referral"
)
try:
referral = models.Referral.objects.get(id=referral_id)
except models.Referral.DoesNotExist as error:
raise Http404(
f"Referral {request.data.get('referral')} not found"
) from error
return referral
def list(self, request, *args, **kwargs):
"""
Let users get a list of referral answers. Users need to filter them by their related
referral. We use the queryset & filter to manage what a given user is allowed to see.
"""
referral_id = self.request.query_params.get("referral", None)
if referral_id is None:
return Response(
status=400,
data={
"errors": ["ReferralAnswer list requests need a referral parameter"]
},
)
queryset = (
self.get_queryset()
.filter(
# The referral author is only allowed to see published answers
Q(
referral__user=request.user,
state=models.ReferralAnswerState.PUBLISHED,
referral__id=referral_id,
)
# Members of the referral's linked units are allowed to see all answers
| Q(
referral_id=referral_id,
referral__units__members=request.user,
)
)
.distinct()
)
queryset = queryset.order_by("-created_at")
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
"""
Create a new referral answer as the client issues a POST on the referralanswers endpoint.
"""
# Make sure the referral exists and return an error otherwise.
referral = self.get_referral(request)
form = ReferralAnswerForm(
{
"content": request.data.get("content") or "",
"created_by": request.user,
"referral": referral,
"state": models.ReferralAnswerState.DRAFT,
},
)
if not form.is_valid():
return Response(status=400, data=form.errors)
referral_answer = form.save()
# Make sure the referral can support a new draft before creating attachments.
try:
referral.draft_answer(referral_answer)
referral.save()
except TransitionNotAllowed:
# If the referral cannot support a new draft answer, delete the answer
# we just created.
referral_answer.delete()
return Response(
status=400,
data={
"errors": {
f"Transition DRAFT_ANSWER not allowed from state {referral.state}."
}
},
)
for attachment_dict in request.data.get("attachments") or []:
try:
referral_answer.attachments.add(
models.ReferralAnswerAttachment.objects.get(
id=attachment_dict["id"]
)
)
referral_answer.save()
except models.ReferralAnswerAttachment.DoesNotExist:
# Since we have already created the ReferralAnswer, there's not much of a point
# in bailing out now with an error: we'd rather fail silently and let the user
# re-add the attachment if needed.
pass
return Response(status=201, data=ReferralAnswerSerializer(referral_answer).data)
def update(self, request, *args, **kwargs):
"""
Update an existing referral answer.
"""
instance = self.get_object()
# Make sure the referral exists and return an error otherwise.
referral = self.get_referral(request)
# Users can only modify their own referral answers. For other users' answers,
# they're expected to use the "Revise" feature
if not request.user.id == instance.created_by.id:
return Response(status=403)
form = ReferralAnswerForm(
{
"content": request.data.get("content") or "",
"created_by": request.user,
"referral": referral,
"state": instance.state,
},
instance=instance,
)
if not form.is_valid():
return Response(status=400, data=form.errors)
referral_answer = form.save()
return Response(status=200, data=ReferralAnswerSerializer(referral_answer).data)
@action(
detail=True,
methods=["post"],
permission_classes=[CanUpdateAnswer],
)
# pylint: disable=invalid-name
def remove_attachment(self, request, pk):
"""
Remove an attachment from this answer.
We're using an action route on the ReferralAnswer instead of a DELETE on the attachment
as the attachment can be linked to more than one answer.
"""
answer = self.get_object()
if answer.state == models.ReferralAnswerState.PUBLISHED:
return Response(
status=400,
data={
"errors": ["attachments cannot be removed from a published answer"]
},
)
try:
attachment = answer.attachments.get(id=request.data.get("attachment"))
except models.ReferralAnswerAttachment.DoesNotExist:
return Response(
status=400,
data={
"errors": [
(
f"referral answer attachment {request.data.get('attachment')} "
"does not exist"
)
]
},
)
answer.attachments.remove(attachment)
answer.refresh_from_db()
return Response(status=200, data=ReferralAnswerSerializer(answer).data)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='BookInfo',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='PeopleInfo',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=10)),
('gender', models.BooleanField()),
('book', models.ForeignKey(to='Book.BookInfo')),
],
),
]
|
nilq/baby-python
|
python
|
from tests.system.common import CondoorTestCase, StopTelnetSrv, StartTelnetSrv
from tests.dmock.dmock import SunHandler
from tests.utils import remove_cache_file
import condoor
class TestSunConnection(CondoorTestCase):
@StartTelnetSrv(SunHandler, 10023)
def setUp(self):
CondoorTestCase.setUp(self)
@StopTelnetSrv()
def tearDown(self):
pass
def test_sun_connection(self):
remove_cache_file()
urls = ["telnet://admin:admin@127.0.0.1:10023", "telnet://admin:admin@host1"]
conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
with self.assertRaises(condoor.ConnectionTimeoutError):
conn.connect(self.logfile_condoor)
conn.disconnect()
#with self.assertRaises(condoor.ConnectionTimeoutError):
# conn.reconnect(30)
def test_sun_connection_wrong_passowrd(self):
urls = ["telnet://admin:wrong@127.0.0.1:10023", "telnet://admin:admin@host1"]
conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
with self.assertRaises(condoor.ConnectionAuthenticationError):
conn.connect(self.logfile_condoor)
conn.disconnect()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# encoding=utf-8
#codeby 道长且阻
#email @ydhcui/QQ664284092
from core.plugin import BaseHostPlugin
import re
import socket
import binascii
import hashlib
import struct
import re
import time
class MongodbNoAuth(BaseHostPlugin):
bugname = "Mongodb 未授权访问"
bugrank = "高危"
def filter(self,host):
return host.port == 27017 or host.service == 'mongodb'
def verify(self,host,user='',pwd='',timeout=10):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
try:
sock.connect((host.host,int(host.port)))
data = binascii.a2b_hex("3a000000a741000000000000d4070000"
"0000000061646d696e2e24636d640000"
"000000ffffffff130000001069736d61"
"73746572000100000000")
sock.send(data)
result = sock.recv(1024)
if b"ismaster" in result:
data = binascii.a2b_hex("480000000200000000000000d40700"
"000000000061646d696e2e24636d64"
"000000000001000000210000000267"
"65744c6f6700100000007374617274"
"75705761726e696e67730000")
sock.send(data)
result = sock.recv(1024)
if b"totalLinesWritten" in result:
self.bugaddr = "%s:%s@%s:%s"%(user,pwd,host.host,host.port)
self.bugreq = "username:%s,password:%s" % (user,pwd)
self.bugres = str(result)
return True
except Exception as e:
print(e)
finally:
sock.close()
|
nilq/baby-python
|
python
|
"""
Example of how to make a MuJoCo environment using the Gym library.
"""
from pathlib import Path
from gym.envs.mujoco.mujoco_env import MujocoEnv
from gym.utils import EzPickle
class SpiderEnv(MujocoEnv, EzPickle):
"""
Spider environment for RL. The task is for the spider to move to the target button.
The agent will get a sparse reward of 1.0 for stepping on the button.
"""
def __init__(self, action_repeat=1):
"""
Constructor for :class:`SpiderEnv`.
:param action_repeat: Number of times action should be repeated in MuJoCo
between each RL time step
"""
EzPickle.__init__(self)
self._has_button_been_pressed_before = False
MujocoEnv.__init__(
self,
str(Path("../../mujoco/spider.xml").resolve()),
frame_skip=action_repeat,
)
def reset_model(self):
"""
Reset the spider's degrees of freedom:
- qpos (joint positions); and
- qvel (joint velocities)
"""
self.set_state(self.init_qpos, self.init_qvel)
self._has_button_been_pressed_before = False
return self.state_vector()
def step(self, _action):
"""
Accepts an :param:`_action`, advances the environment by a single RL time step,
and returns a tuple (observation, reward, done, info).
:param _action: An act provided by the RL agent
:return: A tuple containing an observation, a reward, whether the episode has
ended, and auxiliary information
"""
self.do_simulation(_action, self.frame_skip)
_observation = self.state_vector()
_reward = self._reward()
_done = self._has_button_been_pressed_before or self._is_button_pressed()
if not self._has_button_been_pressed_before and _done:
self._has_button_been_pressed_before = True
return _observation, _reward, _done, {}
def _is_button_pressed(self):
"""
Returns whether the button is currently being pressed .
:return: True if the button is currently pressed, False otherwise
"""
return self.data.sensordata[0] > 0
def _reward(self):
"""
Returns a sparse reward from the environment.
i.e if the button is being pressed, return 1.0 otherwise return 0.0.
:return: A reward from the environment
"""
return float(self._is_button_pressed())
# Example of how the environment could be used
if __name__ == "__main__":
env = SpiderEnv(action_repeat=20)
for episode in range(3):
observation = env.reset()
for t in range(1000):
# Image observation
# See `gym.envs.mujoco.mujoco_env.MujocoEnv` for more info about params
pixels = env.render()
print("Observation: ", observation)
# Figure out an action...
action = env.action_space.sample()
print("Action: ", action)
observation, reward, done, info = env.step(action)
if done:
print("Episode {} finished after {} timesteps".format(episode, t + 1))
break
env.close()
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import jamf
from jamf.models.computer_general import ComputerGeneral # noqa: E501
from jamf.rest import ApiException
class TestComputerGeneral(unittest.TestCase):
"""ComputerGeneral unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ComputerGeneral
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = jamf.models.computer_general.ComputerGeneral() # noqa: E501
if include_optional :
return ComputerGeneral(
name = 'Boalime',
last_ip_address = '247.185.82.186',
last_reported_ip = '247.185.82.186',
jamf_binary_version = '9.27',
platform = 'Mac',
barcode1 = '5 12345 678900',
barcode2 = '5 12345 678900',
asset_tag = '304822',
remote_management = jamf.models.computer_remote_management.ComputerRemoteManagement(
managed = True,
management_username = 'rootname',
management_password = 'example password', ),
supervised = True,
mdm_capable = jamf.models.computer_mdm_capability.ComputerMdmCapability(
capable = True,
capable_users = ["admin","rootadmin"], ),
report_date = '2018-10-31T18:04:13Z',
last_contact_time = '2018-10-31T18:04:13Z',
last_cloud_backup_date = '2018-10-31T18:04:13Z',
last_enrolled_date = '2018-10-31T18:04:13Z',
mdm_profile_expiration = '2018-10-31T18:04:13Z',
initial_entry_date = 'Wed Oct 31 00:00:00 GMT 2018',
distribution_point = 'distribution point name',
enrollment_method = jamf.models.enrollment_method.EnrollmentMethod(
id = '1',
object_name = 'user@domain.com',
object_type = 'User-initiated - no invitation', ),
site = jamf.models.v1_site.V1Site(
id = '1',
name = 'Eau Claire', ),
itunes_store_account_active = True,
enrolled_via_automated_device_enrollment = True,
user_approved_mdm = True,
extension_attributes = [
jamf.models.computer_extension_attribute.ComputerExtensionAttribute(
definition_id = '23',
name = 'Some Attribute',
description = 'Some Attribute defines how much Foo impacts Bar.',
enabled = True,
multi_value = True,
values = ["foo","bar"],
data_type = 'STRING',
options = ["foo","bar"],
input_type = 'TEXT', )
]
)
else :
return ComputerGeneral(
)
def testComputerGeneral(self):
"""Test ComputerGeneral"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_volume_simple_activate
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
class TestCephVolumeSimpleActivateModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_failure(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = 'error'
rc = 2
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_all_osds(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_all': True
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=True)
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=False)
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_activate_path_not_exists(self, m_fail_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['msg'] == '{} does not exist'.format(fake_path)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_without_systemd(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'systemd': False
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_with_container(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == [fake_container_binary,
'run', '--rm', '--privileged',
'--ipc=host', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/run/lock/lvm/:/run/lock/lvm/',
'--entrypoint=ceph-volume', fake_container_image,
'--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
|
nilq/baby-python
|
python
|
import glob
import matplotlib.pyplot as plt
import pickle
import numpy as np
import os
import sys
from argparse import ArgumentParser
from utils import get_params_dict
def parseArgs():
"""Parse command line arguments
Returns
-------
a : argparse.ArgumentParser
"""
parser = ArgumentParser(description='Post process the ROC and PRC data to generate the corresponding plots.')
parser.add_argument('-v', '--verbose',dest='verbose', action='store_true',
default=False, help="verbose output [default is quiet running]")
parser.add_argument('-o','--outDir',dest='out_dir',type=str,
action='store',help="output directory. Default: results/ directory (will be created if doesn't exists).", default='results')
parser.add_argument('-t','--type', dest='type',type=str,
action='store',help="Plot type: either ROC or PRC. Default: ROC", default='ROC')
parser.add_argument('--suffix', dest='suffix',type=str,
action='store',help="A unique suffix to add to plot name. Default '' (empty string)", default='')
parser.add_argument('--curve20',dest='useCurve20', action='store_true',
default=False, help="Plot ROC/PRC cuve at maxed at 0.2 on X-axis (zoom-in version). Default: False")
parser.add_argument('infofile',type=str,
help='The text file containing names and locations of each experiment for which the ROC/PRC curve will be generated.')
args = parser.parse_args()
return args
def roc_prc_curve(arg_space, exp_dict):
suffix = '_'+arg_space.suffix if len(arg_space.suffix) > 0 else arg_space.suffix
curve20 = '_curve20' if arg_space.useCurve20 else ''
#some colors to be used for individual curves.
colors = ['darkorange', 'saddlebrown', 'crimson', 'rebeccapurple', 'limegreen', 'teal', 'dimgray']
out_dir = arg_space.out_dir.strip('/')+'/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
pckl_text = ''
xval,yval = '',''
areaType = ''
if arg_space.type == 'ROC':
areaType = 'AUC'
pckl_text = 'roc'
xval,yval = 'fpr','tpr'
plt.plot([0,1],[0,1],'k--')
elif arg_space.type == 'PRC':
areaType = 'AUPRC'
pckl_text = 'prc'
xval,yval = 'recall','precision'
plt.plot([0,1],[0.5,0.5],'k--')
else:
print('invalid argument! --type can only have one of the following values: ROC or PRC')
return
count = 0
for key in exp_dict:
if arg_space.verbose:
print('Running for: %s', key)
label = key
with open(exp_dict[key]+'/modelRes_%s.pckl'%pckl_text, 'rb') as f:
pckl = pickle.load(f)
stats = np.loadtxt(exp_dict[key]+'/modelRes_results.txt',delimiter='\t',skiprows=1)
Xval = pckl[xval]
Yval = pckl[yval]
if arg_space.type == 'ROC':
test_stat = round(stats[-2],2)
else:
test_stat = round(stats[-1],2)
clr = colors[count]
plt.plot(Xval, Yval, lw=1, label='%s (%s = %.2f)'%(label,areaType,test_stat), color=clr)
count += 1
plt.grid(which='major',axis='both',linestyle='--', linewidth=1)
if arg_space.useCurve20:
plt.xlim(0, 0.2)
if arg_space.type == 'ROC':
plt.ylim(0, 0.6)
plt.xlabel('False positive rate',fontsize=10.5)
plt.ylabel('True positive rate',fontsize=10.5)
plt.legend(loc=4, fontsize=10.5)
else:
plt.ylim(0.5, 1)
plt.xlabel('Recall',fontsize=10.5)
plt.ylabel('Precision',fontsize=10.5)
plt.legend(loc=1, fontsize=10.5)
#plt.title('Precision-Recall curves')
else:
plt.xlim(0, 1)
plt.ylim(0, 1)
if arg_space.type == 'ROC':
plt.xlabel('False positive rate',fontsize=10.5)
plt.ylabel('True positive rate',fontsize=10.5)
plt.legend(loc=4, fontsize=10.5)
else:
plt.xlabel('Recall',fontsize=10.5)
plt.ylabel('Precision',fontsize=10.5)
plt.legend(loc=3, fontsize=10.5)
#plt.title('Precision-Recall curves')
plt.savefig(out_dir+'%s_curves_selected%s%s.pdf'%(pckl_text.upper(),curve20,suffix))
plt.savefig(out_dir+'%s_curves_selected%s%s.png'%(pckl_text.upper(),curve20,suffix))
plt.clf()
def main():
arg_space = parseArgs()
#create params dictionary
params_dict = get_params_dict(arg_space.infofile)
#print(params_dict)
roc_prc_curve(arg_space, params_dict)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from django.shortcuts import render_to_response, render
from django.contrib.auth.decorators import login_required
from grid_core.managers import GridManager
@login_required
def account_deshbord(request):
allfriends = GridManager.get_friends_user(request.user)
allgroups = GridManager.get_group_user(request.user)
return render(
request, "grid_my/dashbord-my.html",
{'friends': allfriends, 'groups': allgroups}
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pydbgen/pbclass/data_define.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pydbgen/pbclass/data_define.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n!pydbgen/pbclass/data_define.proto\x1a google/protobuf/descriptor.proto:0\n\x07is_date\x12\x1d.google.protobuf.FieldOptions\x18\xd7\x86\x03 \x01(\x08:4\n\x0bis_datetime\x12\x1d.google.protobuf.FieldOptions\x18\xd8\x86\x03 \x01(\x08\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
IS_DATE_FIELD_NUMBER = 50007
is_date = _descriptor.FieldDescriptor(
name='is_date', full_name='is_date', index=0,
number=50007, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
IS_DATETIME_FIELD_NUMBER = 50008
is_datetime = _descriptor.FieldDescriptor(
name='is_datetime', full_name='is_datetime', index=1,
number=50008, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
DESCRIPTOR.extensions_by_name['is_date'] = is_date
DESCRIPTOR.extensions_by_name['is_datetime'] = is_datetime
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(is_date)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(is_datetime)
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
from optparse import OptionParser
import sleekxmpp
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
class IBBReceiver(sleekxmpp.ClientXMPP):
"""
A basic example of creating and using an in-band bytestream.
"""
def __init__(self, jid, password):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0047', {
'auto_accept': True
}) # In-band Bytestreams
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start)
self.add_event_handler("ibb_stream_start", self.stream_opened, threaded=True)
self.add_event_handler("ibb_stream_data", self.stream_data)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
def accept_stream(self, iq):
"""
Check that it is ok to accept a stream request.
Controlling stream acceptance can be done via either:
- setting 'auto_accept' to False in the plugin
configuration. The default is True.
- setting 'accept_stream' to a function which accepts
an Iq stanza as its argument, like this one.
The accept_stream function will be used if it exists, and the
auto_accept value will be used otherwise.
"""
return True
def stream_opened(self, stream):
print('Stream opened: %s from %s' % (stream.sid, stream.peer_jid))
# You could run a loop reading from the stream using stream.recv(),
# or use the ibb_stream_data event.
def stream_data(self, event):
print(event['data'])
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
opts.jid = raw_input("Username: ")
if opts.password is None:
opts.password = getpass.getpass("Password: ")
xmpp = IBBReceiver(opts.jid, opts.password)
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
|
nilq/baby-python
|
python
|
import datetime as dt
from datetime import datetime
from datetime import timedelta
from .error import WinnowError
valid_rel_date_values = (
"last_full_week",
"last_two_full_weeks",
"last_7_days",
"last_14_days",
"last_30_days",
"last_45_days",
"last_60_days",
"next_7_days",
"next_14_days",
"next_30_days",
"next_45_days",
"next_60_days",
'next_week',
"current_week",
"current_month",
"current_and_next_month",
"current_year",
"last_month",
"next_month",
"next_year",
"past",
"past_and_today",
"future",
"future_and_today",
"yesterday",
"today",
"tomorrow",
"past_and_future",
"two_weeks_past_end_of_month",
)
def interpret_date_range(drange):
drange = drange.lower().replace(' ', '_')
today = datetime.now()
a_few_seconds = timedelta(seconds=3)
one_day = timedelta(days=1)
start_of_day = dt.time()
beginning_today = datetime.combine(today.date(), start_of_day)
end_today = beginning_today + one_day
weekstart = datetime.combine(today.date(), start_of_day) - timedelta(days=(today.isoweekday() % 7))
seven_days = timedelta(days=7)
fourteen_days = timedelta(days=14)
thirty_days = timedelta(days=30)
fortyfive_days = timedelta(days=45)
if drange == 'last_full_week':
return weekstart - seven_days, weekstart
elif drange == 'last_two_full_weeks':
return weekstart - fourteen_days, weekstart
elif drange == 'last_7_days':
return today - seven_days, today + a_few_seconds
elif drange == 'last_14_days':
return today - fourteen_days, today + a_few_seconds
elif drange == 'last_30_days':
return today - thirty_days, today + a_few_seconds
elif drange == 'last_45_days':
return today - fortyfive_days, today + a_few_seconds
elif drange == 'last_60_days':
return today - (2 * thirty_days), today + a_few_seconds
elif drange == 'next_7_days':
return today, today + seven_days
elif drange == 'next_14_days':
return today, today + fourteen_days
elif drange == 'next_30_days':
return today, today + thirty_days
elif drange == 'next_45_days':
return today, today + fortyfive_days
elif drange == 'next_60_days':
return today, today + (2 * thirty_days)
elif drange == 'next_week':
return weekstart + seven_days, weekstart + seven_days + seven_days
elif drange == 'current_week':
return weekstart, weekstart + seven_days
elif drange == 'current_month':
return first_day_of_month(today), last_day_of_month(today)
elif drange == 'current_and_next_month':
start_of_current = first_day_of_month(today)
return start_of_current, last_day_of_month(start_of_current + fortyfive_days)
elif drange == 'current_and_next_year':
next_year = last_day_of_year(today, base_month) + timedelta(days=2)
return first_day_of_year(today, base_month), last_day_of_year(next_year, base_month)
elif drange == 'two_weeks_past_end_of_month':
return first_day_of_month(today), last_day_of_month(today) + fourteen_days
elif drange == 'two_weeks_past_end_of_year':
return first_day_of_year(today, base_month), last_day_of_year(today, base_month) + fourteen_days
elif drange == 'current_year':
return (datetime(year=today.year, month=1, day=1),
datetime(year=today.year+1, month=1, day=1) - dt.datetime.resolution)
elif drange == 'next_year':
next_year = last_day_of_year(today, base_month=1)
return first_day_of_year(next_year + seven_days, base_month=1), last_day_of_year(next_year + seven_days, base_month=1)
elif drange == 'last_month':
last_month = first_day_of_month(today) - timedelta(days=2)
return first_day_of_month(last_month), last_day_of_month(last_month)
elif drange == 'next_month':
next_month = last_day_of_month(today) + timedelta(days=2)
return first_day_of_month(next_month), last_day_of_month(next_month)
elif drange == 'past':
return datetime.fromtimestamp(0), beginning_today - timedelta(microseconds=1)
elif drange == 'past_and_today':
return datetime.fromtimestamp(0), today
elif drange == 'future':
return today, datetime(year=today.year+1000, month=1, day=1)
elif drange == 'future_and_today':
return beginning_today, datetime(year=today.year+1000, month=1, day=1)
elif drange == 'past_and_future':
return datetime.fromtimestamp(0), datetime(year=today.year+1000, month=1, day=1)
elif drange == 'yesterday':
return beginning_today - one_day, beginning_today
elif drange == 'today':
return beginning_today, end_today
elif drange == 'tomorrow':
return end_today, end_today + one_day
else:
raise WinnowError("unknown date description '{}'".format(drange))
|
nilq/baby-python
|
python
|
"""
创建函数,在终端中打印矩形.
number = int(input("请输入整数:")) # 5
for row in range(number):
if row == 0 or row == number - 1:
print("*" * number)
else:
print("*%s*" % (" " * (number - 2)))
"""
def print_rectangle(number):
for row in range(number):
if row == 0 or row == number - 1:
print("*" * number)
else:
print("*%s*" % (" " * (number - 2)))
print_rectangle(8)
|
nilq/baby-python
|
python
|
import os.path
# manage descriptive name here...
def input_file_to_output_name(filename):
get_base_file = os.path.basename(filename)
base_filename = get_base_file.split('.')[0]
# base_filename = '/pipeline_data/' + base_filename
return base_filename
|
nilq/baby-python
|
python
|
# Import Modules
from module.Mask_RCNN.mrcnn import config as maskconfig
from module.Mask_RCNN.mrcnn import model as maskmodel
from module.Mask_RCNN.mrcnn import visualize
import tensorflow as tf
import numpy as np
import warnings
import json
import cv2
import os
# Ignore warnings
old_v = tf.compat.v1.logging.get_verbosity()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
warnings.filterwarnings(action='ignore')
# Initialize Directories
MODEL_DIR = "../../../data/weight/mask_rcnn_fashion_0006.h5"
LABEL_DIR = "../../../data/image/mask_rcnn/label_descriptions.json"
MASK_DIR = "../../../module/Mask_RCNN"
IMG_DIR = "test1.jpg"
# Initialize NUM_CATS, IMAGE_SIZE
NUM_CATS = 46
IMAGE_SIZE = 512
# Load Label Descriptions to label_descriptions
with open(LABEL_DIR) as f:
label_descriptions = json.load(f)
# From label_descriptions['categories'] to label_names
label_names = [x['name'] for x in label_descriptions['categories']]
# Setup Configuration
class InferenceConfig(maskconfig):
NAME = "fashion"
NUM_CLASSES = NUM_CATS + 1 # +1 for the background class
GPU_COUNT = 1
IMAGES_PER_GPU = 4
BACKBONE = 'resnet101'
IMAGE_MIN_DIM = IMAGE_SIZE
IMAGE_MAX_DIM = IMAGE_SIZE
IMAGE_RESIZE_MODE = 'none'
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256)
DETECTION_MIN_CONFIDENCE = 0.70
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Execute Inference Configuration
inference_config = InferenceConfig()
# Load Weight File
model = maskmodel.MaskRCNN(mode='inference', config=inference_config, model_dir=MASK_DIR)
model.load_weights(MODEL_DIR, by_name=True)
# Resize Image from image_path
def resize_image(image_path):
temp = cv2.imread(image_path)
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
temp = cv2.resize(temp, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA)
return temp
# Since the submission system does not permit overlapped masks, we have to fix them
def refine_masks(masks, rois):
areas = np.sum(masks.reshape(-1, masks.shape[-1]), axis=0)
mask_index = np.argsort(areas)
union_mask = np.zeros(masks.shape[:-1], dtype=bool)
for m in mask_index:
masks[:, :, m] = np.logical_and(masks[:, :, m], np.logical_not(union_mask))
union_mask = np.logical_or(masks[:, :, m], union_mask)
for m in range(masks.shape[-1]):
mask_pos = np.where(masks[:, :, m] == True)
if np.any(mask_pos):
y1, x1 = np.min(mask_pos, axis=1)
y2, x2 = np.max(mask_pos, axis=1)
rois[m, :] = [y1, x1, y2, x2]
return masks, rois
# Python code to remove duplicate elements
def remove(duplicate):
final_list = []
duplicate_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
else:
duplicate_list.append(num)
return final_list, duplicate_list
# Single Image Masking
img = cv2.imread(IMG_DIR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
result = model.detect([resize_image(IMG_DIR)], verbose=1)
r = result[0]
if r['masks'].size > 0:
masks = np.zeros((img.shape[0], img.shape[1], r['masks'].shape[-1]), dtype=np.uint8)
for m in range(r['masks'].shape[-1]):
masks[:, :, m] = cv2.resize(r['masks'][:, :, m].astype('uint8'),
(img.shape[1], img.shape[0]), interpolation=cv2.INTER_NEAREST)
y_scale = img.shape[0] / IMAGE_SIZE
x_scale = img.shape[1] / IMAGE_SIZE
rois = (r['rois'] * [y_scale, x_scale, y_scale, x_scale]).astype(int)
masks, rois = refine_masks(masks, rois)
else:
masks, rois = r['masks'], r['rois']
visualize.display_instances(img, rois, masks, r['class_ids'],
['bg'] + label_names, r['scores'],
title='camera1', figsize=(12, 12))
visualize.display_top_masks(img, masks, r['class_ids'], label_names, limit=8)
|
nilq/baby-python
|
python
|
from django.urls import path
from api import views
app_name = "api"
urlpatterns = [path("signup/", views.SignUp.as_view(), name="signup")]
|
nilq/baby-python
|
python
|
import os
from glob import glob
from os.path import join, basename
import numpy as np
from utils.data_utils import default_loader
from . import CDDataset
class OSCDDataset(CDDataset):
__BAND_NAMES = (
'B01', 'B02', 'B03', 'B04', 'B05', 'B06',
'B07', 'B08', 'B8A', 'B09', 'B10', 'B11', 'B12'
)
def __init__(
self,
root, phase='train',
transforms=(None, None, None),
repeats=1,
subset='val',
cache_level=1
):
super().__init__(root, phase, transforms, repeats, subset)
# cache_level=0 for no cache, 1 to cache labels, 2 and higher to cache all.
self.cache_level = int(cache_level)
if self.cache_level > 0:
self._pool = dict()
def _read_file_paths(self):
image_dir = join(self.root, "Onera Satellite Change Detection dataset - Images")
target_dir = join(self.root, "Onera Satellite Change Detection dataset - Train Labels")
txt_file = join(image_dir, "train.txt")
# Read cities
with open(txt_file, 'r') as f:
cities = [city.strip() for city in f.read().strip().split(',')]
if self.subset == 'train':
# For training, use the first 11 pairs
cities = cities[:-3]
else:
# For validation and test, use the remaining 3 pairs
cities = cities[-3:]
# Use resampled images
t1_list = [[join(image_dir, city, "imgs_1_rect", band+'.tif') for band in self.__BAND_NAMES] for city in cities]
t2_list = [[join(image_dir, city, "imgs_2_rect", band+'.tif') for band in self.__BAND_NAMES] for city in cities]
tar_list = [join(target_dir, city, 'cm', city+'-cm.tif') for city in cities]
return t1_list, t2_list, tar_list
def fetch_image(self, image_paths):
key = '-'.join(image_paths[0].split(os.sep)[-3:-1])
if self.cache_level >= 2:
image = self._pool.get(key, None)
if image is not None:
return image
image = np.stack([default_loader(p) for p in image_paths], axis=-1).astype(np.float32)
if self.cache_level >= 2:
self._pool[key] = image
return image
def fetch_target(self, target_path):
key = basename(target_path)
if self.cache_level >= 1:
tar = self._pool.get(key, None)
if tar is not None:
return tar
# In the tif labels, 1 stands for NC and 2 for C,
# thus a -1 offset is added.
tar = (default_loader(target_path) - 1).astype(np.bool)
if self.cache_level >= 1:
self._pool[key] = tar
return tar
|
nilq/baby-python
|
python
|
"""All the url endpoint hooks for facebook"""
import os
from sanic.response import json, text
from sanic import Blueprint
from .base import FacebookResponse
from taggo.parsers import FacebookYamlExecutor
VERIFY_TOKEN = os.environ.get("VF_TOKEN")
fb = Blueprint('facebook', url_prefix="/fb")
@fb.post('/recieve_message')
async def recieve_message(request):
data = request.json
fb_resp = FacebookResponse(page_type=data["object"],
entries=data.get("entry"),
executor=request.app.config["command"])
await fb_resp.send()
return json({
"reply": "success"
})
@fb.get("/recieve_message")
async def ping_pong(request):
if request.raw_args.get("hub.verify_token") == VERIFY_TOKEN:
return text(request.raw_args.get("hub.challenge"))
else:
return text("Error")
@fb.get('/')
async def ping(request):
return text("Hi! Nice to meet you")
|
nilq/baby-python
|
python
|
from flask import render_template, url_for, request, redirect, session, flash
from home_password.models.user import User
from home_password.models.site import Site
from flask_login import login_user, current_user, logout_user
from flask import Blueprint
main = Blueprint('main', __name__)
@main.route('/')
@main.route('/login', methods=["GET",'POST'])
def login():
if request.method == "POST":
user = User.query.filter_by(username=request.form["username"]).first()
if user is not None and user.valid_login(request.form["password"]):
login_user(user)
if user.is_admin:
return redirect(url_for('admin.home'))
else:
return redirect(url_for('users.home'))
else:
flash("incorrect login","error")
return render_template('users/login.html')
@main.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.login'))
|
nilq/baby-python
|
python
|
"""*Text handling functions*."""
import json
import subprocess
import sys
from os.path import basename, splitext
from pathlib import Path
from urllib.parse import urlparse
from loguru import logger as log
import iscc_sdk as idk
__all__ = [
"text_meta_extract",
"text_extract",
"text_name_from_uri",
]
TEXT_META_MAP = {
"custom:iscc_name": "name",
"custom:iscc_description": "description",
"custom:iscc_meta": "meta",
"dc:title": "name",
"dc:description": "description",
"dc:creator": "creator",
"dc:rights": "rights",
}
def text_meta_extract(fp):
# type: (str) -> dict
"""
Extract metadata from text document file.
:param str fp: Filepath to text document file.
:return: Metadata mapped to IsccMeta schema
:rtype: dict
"""
args = ["--metadata", "-j", "--encoding=UTF-8", fp]
result = idk.run_tika(args)
meta = json.loads(result.stdout.decode(sys.stdout.encoding, errors="ignore"))
mapped = dict()
done = set()
for tag, mapped_field in TEXT_META_MAP.items():
if mapped_field in done: # pragma nocover
continue
value = meta.get(tag)
if value:
if isinstance(value, list):
value = ", ".join(value)
log.debug(f"Mapping text metadata: {tag} -> {mapped_field} -> {value}")
mapped[mapped_field] = value
done.add(mapped_field)
return mapped
def text_extract(fp):
# type: (str) -> str
"""
Extract plaintext from a text document.
:param st fp: Filepath to text document file.
:return: Extracted plaintext
:rtype: str
"""
args = ["--text", "--encoding=UTF-8", fp]
result = idk.run_tika(args)
text = result.stdout.decode(encoding="UTF-8").strip()
if not text:
raise idk.IsccExtractionError(f"No text extracted from {basename(fp)}")
return result.stdout.decode(encoding="UTF-8")
def text_name_from_uri(uri):
# type: (str, Path) -> str
"""
Extract "filename" part of an uri without file extension to be used as fallback title for an
asset if no title information can be acquired.
:param str uri: Url or file path
:return: derived name (might be an empty string)
:rtype: str
"""
if isinstance(uri, Path):
result = urlparse(uri.as_uri())
else:
result = urlparse(uri)
base = basename(result.path) if result.path else basename(result.netloc)
name = splitext(base)[0]
name = name.replace("-", " ")
name = name.replace("_", " ")
return name
|
nilq/baby-python
|
python
|
import data_processor
import model_lib
if __name__ == "__main__":
train_set = data_processor.read_dataset("preprocessed/training_nopestudio.json")
valid_set = data_processor.read_dataset("preprocessed/validation_nopestudio.json")
combined_set = data_processor.read_dataset("preprocessed/dataset_nopestudio.json")
if train_set is None:
print("정제된 훈련 데이터가 없습니다. 새로 생성합니다.")
train_set = data_processor.process_dataset("TRAIN")
data_processor.write_dataset("training.json", train_set)
if valid_set is None:
print("정제된 검증 데이터가 없습니다. 새로 생성합니다.")
valid_set = data_processor.process_dataset("VALID")
data_processor.write_dataset("validation.json", valid_set)
if combined_set is None:
print("정제한 합본 데이터셋이 존재하지 않습니다. 새로 생성합니다.")
combined_set = data_processor.combine_dataset(
train_set,
valid_set
)
data_processor.write_dataset("dataset.json", combined_set)
combined_X = combined_set["data"]
combined_y = combined_set["target"]
while True:
print("다음 중 원하는 평가 방법을 입력")
print("1: holdout validation")
print("2: k-fold cross validation")
print("유효하지 않은 값일 경우 프로세스 종료")
evaluate_type = input()
if evaluate_type != "1" and evaluate_type != "2":
print("유효하지 않은 값 입력됨. 프로세스 종료")
break
val = input("측정을 원하는 모델을 입력(유효하지 않은 값일 경우 프로세스 종료): ")
model = model_lib.load_model(model=val, random_state=41)
if model is None:
print("유효하지 않은 값 입력됨. 프로세스 종료")
break
# pipe = make_pipeline(
# StandardScaler(),
# model
# )
if evaluate_type == "1":
model.fit(
train_set["data"],
train_set["target"],
)
model_lib.evaluate(
valid_set["data"],
valid_set["target"],
model
)
else:
model_lib.evaluate_kfold(combined_X, combined_y, model)
|
nilq/baby-python
|
python
|
import numba as nb
import numpy as np
class Zobrist(object):
MAX_RAND = pow(10, 16)
BLACK_TABLE = np.random.seed(3) or np.random.randint(MAX_RAND, size=(8, 8))
WHITE_TABLE = np.random.seed(7) or np.random.randint(MAX_RAND, size=(8, 8))
@staticmethod
def from_state(state):
return Zobrist.hash(state.board,
Zobrist.BLACK_TABLE,
Zobrist.WHITE_TABLE)
@staticmethod
def update_action(previous, action, player):
return Zobrist.update(previous, action,
Zobrist.BLACK_TABLE,
Zobrist.WHITE_TABLE,
[player])
@staticmethod
def update_flip(previous, flip):
return Zobrist.update(previous, flip,
Zobrist.BLACK_TABLE,
Zobrist.WHITE_TABLE,
[1, -1])
@staticmethod
@nb.jit(nopython=True, nogil=True, cache=True)
def hash(board, black_table, white_table):
result = 0
for row, col in zip(*np.where(board == 1)):
result ^= black_table[row, col]
for row, col in zip(*np.where(board == -1)):
result ^= white_table[row, col]
return result
@staticmethod
@nb.jit(nopython=True, nogil=True, cache=True)
def update(previous, square, black_table, white_table, players):
result = previous
row, col = square
for player in players:
if player == 1:
result ^= black_table[row, col]
elif player == -1:
result ^= white_table[row, col]
return result
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
CloudFormation Custom::FindImage resource handler.
"""
# pylint: disable=C0103
from datetime import datetime
from logging import DEBUG, getLogger
import re
from typing import Any, Dict, List, Tuple
import boto3
from iso8601 import parse_date
log = getLogger("cfntoolkit.ec2")
log.setLevel(DEBUG)
def find_image(event: Dict[str, Any]) -> Dict[str, Any]:
"""
Custom::FindImage resource
Locates the latest version of an AMI/AKI/ARI with given attributes.
"""
if event["RequestType"] not in ("Create", "Update"):
return {}
rp = dict(event["ResourceProperties"])
filters = {} # type: Dict[str, Any]
try:
owner = rp["Owner"]
except KeyError:
raise ValueError("Owner must be specified")
add_filters(rp, filters)
# Convert the filters dict to a list of {Name: key, Value: values} dicts
ec2_filters = [{"Name": key, "Values": values}
for key, values in filters.items()]
ec2 = boto3.client("ec2")
result = ec2.describe_images(Owners=[owner], Filters=ec2_filters)
images = result.get("Images")
if not images:
raise ValueError("No AMIs found that match the filters applied.")
images = filter_names_and_descriptions(images, rp)
preferred_virtualization_type = rp.get("PreferredVirtualizationType")
preferred_root_device_type = rp.get("PreferredRootDeviceType")
def sort_key(image: Dict[str, Any]) -> Tuple[bool, bool, datetime]:
"""
Prioritize AMI preferences.
"""
date = parse_date(image["CreationDate"])
is_preferred_virtualization_type = (
preferred_virtualization_type is None or
image["VirtualizationType"] == preferred_virtualization_type)
is_preferred_root_device_type = (
preferred_root_device_type is None or
image["RootDeviceType"] == preferred_root_device_type)
return (is_preferred_virtualization_type,
is_preferred_root_device_type,
date)
images.sort(key=sort_key, reverse=True)
image_ids = [image["ImageId"] for image in images]
return {
"ImageId": image_ids[0],
"MatchingImageIds": image_ids,
}
# EC2 instance families that only support paravirtualization.
PV_ONLY_INSTANCE_FAMILIES = {"c1", "m1", "m2", "t1",}
# EC2 instance families that support either paravirtualization or HVM.
PV_HVM_INSTANCE_FAMILIES = {"c3", "hi1", "hs1", "m3",}
# EC2 instance families that have instance storage.
INSTANCE_STORE_FAMILIES = {
"c1", "c3", "cc2", "cg1", "cr1", "d2", "g2", "f1", "hi1", "hs1", "i2",
"i3", "m1", "m2", "m3", "r3", "x1",
}
# Keys for various fields so we catch subtle misspellings
KEY_REQPROP_ARCHITECTURE = "Architecture"
KEY_REQPROP_ENA_SUPPORT = "EnaSupport"
KEY_REQPROP_PLATFORM = "Platform"
KEY_REQPROP_ROOT_DEVICE_TYPE = "RootDeviceType"
KEY_REQPROP_VIRTUALIZATION_TYPE = "VirtualizationType"
KEY_EC2_ARCHITECTURE = "architecture"
KEY_EC2_ENA_SUPPORT = "ena-support"
KEY_EC2_PLATFORM = "platform"
KEY_EC2_ROOT_DEVICE_TYPE = "root-device-type"
KEY_EC2_VIRTUALIZATION_TYPE = "virtualization-type"
HVM = "hvm"
PARAVIRTUAL = "paravirtual"
EBS = "ebs"
# These request properties are embedded in the filter directly (though
# renamed), with the value encapsulated as a list.
DIRECT_FILTERS = {
KEY_REQPROP_ARCHITECTURE: KEY_EC2_ARCHITECTURE,
KEY_REQPROP_ENA_SUPPORT: KEY_EC2_ENA_SUPPORT,
KEY_REQPROP_PLATFORM: KEY_EC2_PLATFORM,
KEY_REQPROP_ROOT_DEVICE_TYPE: KEY_EC2_ROOT_DEVICE_TYPE,
KEY_REQPROP_VIRTUALIZATION_TYPE: KEY_EC2_VIRTUALIZATION_TYPE,
}
def add_filters(
request_properties: Dict[str, Any],
filters: Dict[str, List]) -> None:
"""
add_filters(request_properties: Dict[Str, Any],
filters: Dict[str, Any]) -> None:
Examine request_properties for appropriate values and apply them to the
filters list.
"""
for key in DIRECT_FILTERS:
if key in request_properties:
value = request_properties.pop(key)
filter_key = DIRECT_FILTERS.get(key)
filters[filter_key] = listify(value)
add_instance_type_filter(request_properties, filters)
return
def add_instance_type_filter(
request_properties: Dict[str, Any], filters: Dict[str, List]) -> None:
"""
add_instance_type_filter(
request_properties: Dict[str, Any], filters: List) -> None
Examine request_properties for an instance_type filter
"""
instance_type = request_properties.pop("InstanceType", None)
if instance_type is None:
return
if "." in instance_type:
instance_family = instance_type[:instance_type.find(".")]
else:
instance_family = instance_type
if instance_family in PV_ONLY_INSTANCE_FAMILIES:
# PV-only instance types
log.debug("instance_family=%s filters=%s", instance_family, filters)
if (filters.get(KEY_EC2_VIRTUALIZATION_TYPE, [PARAVIRTUAL]) !=
[PARAVIRTUAL]):
raise ValueError(
"VirtualizationType must be paravirtual for %s instance "
"types" % (instance_type,))
filters[KEY_EC2_VIRTUALIZATION_TYPE] = [PARAVIRTUAL]
# Ignore Switch hitting instance types (c3, etc.); assume all newer
# instance families are HVM-only.
elif instance_family not in PV_HVM_INSTANCE_FAMILIES:
if filters.get(KEY_EC2_VIRTUALIZATION_TYPE, [HVM]) != [HVM]:
raise ValueError(
"VirtualizationType must be hvm for %s instance types" %
(instance_type,))
filters[KEY_EC2_VIRTUALIZATION_TYPE] = [HVM]
if instance_family not in INSTANCE_STORE_FAMILIES:
# EBS-only root volume types.
if filters.get(KEY_EC2_ROOT_DEVICE_TYPE, [EBS]) != [EBS]:
raise ValueError(
"RootDeviceType must be ebs for %s instance types" %
(instance_type,))
filters["root-device-type"] = ["ebs"]
return
def filter_names_and_descriptions(
images: List, request_properties: Dict[str, Any]) -> List:
"""
filter_names_and_descriptions(
images: List, request_properties: Dict[str, Any]) -> List:
Filter image names and descriptions according to the rules given in
request_properties.
"""
for include_exclude in ["Included", "Excluded"]:
for param in ["Description", "Name"]:
key = "%s%ss" % (include_exclude, param)
value = request_properties.get(key)
if not value:
continue
regex = regex_string_list(listify(value))
# maybe_not is a passthrough when including, reverses the logic
# test when excluding.
if include_exclude == "Included":
maybe_not = lambda x: x
else:
maybe_not = lambda x: not x
images = [im for im in images
if maybe_not(regex.search(im[param]))]
if not images:
raise ValueError(
"No AMIs found that passed the %s filter" % key)
return images
def listify(value):
"""
Encapsulate value in a list if it isn't already.
"""
if isinstance(value, list):
return value
return [value]
def regex_string_list(sl: List[str]):
"""
Compile a list of strings into a regular expression.
"""
return re.compile("|".join(["(?:%s)" % el for el in sl]))
|
nilq/baby-python
|
python
|
import rsa
from django.db import models
import base64
class RSAFieldMixin(object):
def loadKeys(self, keys=[]):
if len(keys) == 0:
(pubkey, privkey) = rsa.newkeys(512)
keys.append(pubkey)
keys.append(privkey)
elif len(keys) == 2:
pubkey = keys[0]
privkey = keys[1]
else:
raise Exception("Invaild key array passed")
keys[0] = pubkey
keys[1] = privkey
return keys
def encrypt(self, value):
cryptoText = value.encode('utf8')
crypt = rsa.encrypt(cryptoText, self.loadKeys()[0])
return crypt.hex()
def decrypt(self, value):
value = bytes.fromhex(value)
text = rsa.decrypt(value, self.loadKeys()[1])
return text
def get_internal_type(self):
"""
To treat everything as text
"""
return 'CharField'
def get_prep_value(self, value):
if value:
return self.encrypt(value)
return None
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value
def from_db_value(self, value, expression, connection):
return self.to_python(value)
def to_python(self, value):
if value is None:
return value
value = self.decrypt(value)
return super(RSAFieldMixin, self).to_python(value.decode('utf8'))
class RSACharField(RSAFieldMixin, models.CharField):
pass
class RSATextField(RSAFieldMixin, models.TextField):
pass
class RSADateTimeField(RSAFieldMixin, models.DateTimeField):
pass
class RSAIntegerField(RSAFieldMixin, models.IntegerField):
pass
class RSADateField(RSAFieldMixin, models.DateField):
pass
class RSAFloatField(RSAFieldMixin, models.FloatField):
pass
class RSAEmailField(RSAFieldMixin, models.EmailField):
pass
class RSABooleanField(RSAFieldMixin, models.BooleanField):
pass
class RSABinaryField(RSAFieldMixin, models.BinaryField):
pass
|
nilq/baby-python
|
python
|
import tensorflow as tf # for deep learning
import pathlib # for loading path libs
# data loader class
class DataLoader():
# init method
def __init__(self, path_to_dir):
self.__path_to_dir = pathlib.Path(path_to_dir)
# proecess image method
# @tf.function
def process_image(self, image_data):
image_raw = tf.io.read_file(image_data)
image_decoded = tf.image.decode_jpeg(image_raw) # decode a raw image
return (
tf.image.resize(image_decoded, [192, 192]) / 255.0
) # normalize and resize an image
# retrive root labels
def retrive_root_labels(self):
all_image_list = self.__path_to_dir.glob("*/*")
# convert image labels to str
self.__all_image_paths = [str(image) for image in all_image_list]
# extract all the labels
root_labels = [
label.name for label in self.__path_to_dir.glob("*/") if label.is_dir()
]
# encode root labels into dic
root_labels = dict((name, index) for index, name in enumerate(root_labels))
# extract the labels of each images
all_images_labels =[
root_labels[pathlib.Path(image).parent.name] for image in self.__all_image_paths
]
# return all the labels and root labels
return all_images_labels, self.__all_image_paths, root_labels
|
nilq/baby-python
|
python
|
import json
from wtforms import widgets
class CheckboxInput(widgets.CheckboxInput):
def __call__(self, field, **kwargs):
kwargs.update({"class_": "checkbox-field"})
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
%s<label class="state" for="%s"> </label>
"""
% (rendered_field, field.id)
)
class FileInput(widgets.FileInput):
def __call__(self, field, **kwargs):
kwargs.update(
{"@change": "count = $event.target.files.length", "class": "d-hidden"}
)
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<label x-data="{count: 0}" class="file-field input-group">
<div class="info" x-text="count ? count + ' files(s) selected' : 'Choose file(s)'"></div>
%s
<span class="button button-secondary input-group-addon">Browse</span>
</label>
"""
% rendered_field
)
class HorizontalSelect(widgets.Select):
def __init__(self):
self.multiple = True
def __call__(self, field, **kwargs):
kwargs.update(
{"x-ref": "field", "class": "d-hidden", "@change": "ev = $event.timeStamp"}
)
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<div class="select-multi-field"
x-data="{ ev: null }"
@set-one="
$refs.field.options[$event.detail.key].selected = $event.detail.selected;
$dispatch('propagate');
"
@set-all="
Object.keys($refs.field.options).forEach(key => $refs.field.options[key].selected = $event.detail);
$dispatch('propagate');
"
@propagate="$refs.field.dispatchEvent(new Event('change'))"
>
%s
<div class="row">
<div class="col-12 col-sm-6 col-md-5 col-lg-4">
<div class="title">
<a href="#" class="pull-right" @click.prevent="$dispatch('set-all', true)">Choose all</a>
Available
</div>
<ul>
<template x-for="key in Object.keys($refs.field.options)" :key="key">
<li x-show="!$refs.field.options[key].selected">
<a href="#"
@click.prevent="$dispatch('set-one', {key, selected: true})"
x-text="$refs.field.options[key].label"
></a>
</li>
</template>
</ul>
</div>
<div class="col-12 col-sm-6 col-md-5 col-lg-4">
<div class="title">
<a href="#" class="pull-right" @click.prevent="$dispatch('set-all', false)">Remove all</a>
Selected
</div>
<ul>
<template x-for="key in Object.keys($refs.field.options)" :key="key">
<li x-show="$refs.field.options[key].selected">
<a href="#"
@click.prevent="$dispatch('set-one', {key, selected: false})"
x-text="$refs.field.options[key].label"
></a>
</li>
</template>
</ul>
</div>
</div>
</div>
"""
% rendered_field
)
class PasswordInput(widgets.PasswordInput):
def __call__(self, field, **kwargs):
kwargs.update({":type": "show ? 'text' : 'password'"})
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<div class="password-field icon-input" x-data="{ show: false }">
%s
<span class="fa" :class="{'fa-eye': !show, 'fa-eye-slash': show}" @click="show = !show"></span>
</div>
"""
% rendered_field
)
class RadioInput(widgets.RadioInput):
def __call__(self, field, **kwargs):
kwargs.update({"class_": "radio-field"})
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
%s<label class="state" for="%s"> </label>
"""
% (rendered_field, field.id)
)
class Select(widgets.Select):
def __call__(self, field, **kwargs):
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<div class="select-field icon-input">
%s
<span class="fa fa-caret-down"></span>
</div>
"""
% rendered_field
)
class TagsInput(widgets.TextInput):
def __call__(self, field, **kwargs):
kwargs.update({":value": "JSON.stringify(tags)", "class": "d-hidden"})
rendered_field = super().__call__(field, **kwargs)
return widgets.HTMLString(
"""
<div x-data='{ tags: %s, newTag: "" }'>
%s
<div class="tags-field">
<template x-for="tag in tags" :key="tag">
<span class="tag">
<span x-text="tag"></span>
<a href="#"
@click.prevent="tags = tags.filter(i => i !== tag)">
<i class="fa fa-times"></i>
</a>
</span>
</template>
<input placeholder="add a new tag ..."
x-model="newTag"
@keydown.enter.prevent="
if (newTag.trim() !== ''
&& tags.indexOf(newTag.trim()) == -1
) tags.push(newTag.trim()); newTag = ''"
@keydown.backspace="if (newTag === '') tags.pop()"
>
</div>
</div>
"""
% (json.dumps(field.data), rendered_field)
)
|
nilq/baby-python
|
python
|
import heterocl as hcl
import numpy as np
def test_zero_allocate():
def kernel(A):
with hcl.for_(0, 10) as i:
with hcl.for_(i, 10) as j:
A[j] += i
return hcl.compute((0,), lambda x: A[x], "B")
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
p = hcl.Platform.aws_f1
p.config(compiler="vitis", mode="debug", backend="vhls")
try:
f = hcl.build(s, p)
except:
print("passed")
|
nilq/baby-python
|
python
|
import abc
class LayerBase(object):
"""Base class for most layers; each layer contains information which is
added on top of the regulation, such as definitions, internal citations,
keyterms, etc."""
__metaclass__ = abc.ABCMeta
# @see layer_type
INLINE = 'inline'
PARAGRAPH = 'paragraph'
SEARCH_REPLACE = 'search_replace'
@abc.abstractproperty
def shorthand(self):
"""A short description for this layer. This is used in query strings
and the like to define which layers should be used"""
raise NotImplementedError
@abc.abstractproperty
def data_source(self):
"""Data is pulled from the API; this field indicates the name of the
endpoint to pull data from"""
raise NotImplementedError
@abc.abstractproperty
def layer_type(self):
"""Layer data can be applied in a few ways, attaching itself to a
node, replacing text based on offset, or replacing text based on
searching. Which type is this layer?"""
raise NotImplementedError
class InlineLayer(LayerBase):
"""Represents a layer which replaces text by looking at offsets"""
layer_type = LayerBase.INLINE
@abc.abstractmethod
def replacement_for(self, original, data):
"""Given the original text and the relevant data from a layer, create
a (string) replacement, by, for example, running the data through a
template"""
raise NotImplementedError
def apply_layer(self, text, label_id):
"""Entry point when processing the regulation tree. Given the node's
text and its label_id, yield all replacement text"""
data_with_offsets = ((entry, start, end)
for entry in self.layer.get(label_id, [])
for (start, end) in entry['offsets'])
for data, start, end in data_with_offsets:
start, end = int(start), int(end)
original = text[start:end]
replacement = self.replacement_for(original, data)
yield (original, replacement, (start, end))
class SearchReplaceLayer(LayerBase):
"""Represents a layer which replaces text by searching for and replacing a
specific substring. Also accounts for the string appearing multiple times
(via the 'locations' field)"""
layer_type = LayerBase.SEARCH_REPLACE
_text_field = 'text' # All but key terms follow this convention...
@abc.abstractmethod
def replacements_for(self, text, data):
"""Given the original text and the relevant data from a layer, create
a (string) replacement, by, for example, running the data through a
template. Returns a generator"""
raise NotImplementedError
def apply_layer(self, label_id):
"""Entry point when processing the regulation tree. Given the node's
label_id, attempt to find relevant layer data in self.layer"""
for entry in self.layer.get(label_id, []):
text = entry[self._text_field]
for replacement in self.replacements_for(text, entry):
yield (text, replacement, entry['locations'])
|
nilq/baby-python
|
python
|
import os
import hashlib
from download.url_image_downloader import UrlImageDownloader
def test_download_image_from_url():
url = ('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9f/RacingFlagsJune2007.jpg/575px-'
'RacingFlagsJune2007.jpg')
image_path = 'test.jpg'
# download the image
downloader = UrlImageDownloader(url, image_path)
downloader.download()
md5 = hashlib.md5()
# calculate md5 hash of the downloaded image
with open(image_path, "rb") as file:
for chunk in iter(lambda: file.read(4096), b""):
md5.update(chunk)
assert os.path.isfile(image_path)
assert md5.hexdigest() == '82a8ebf6719a24b52dec3fa6856d4870'
# remove the downloaded image
os.remove(image_path)
|
nilq/baby-python
|
python
|
#!/router/bin/python
from trex_general_test import CTRexGeneral_Test
from tests_exceptions import *
from interfaces_e import IFType
from nose.tools import nottest
from misc_methods import print_r
class CTRexNbar_Test(CTRexGeneral_Test):
"""This class defines the NBAR testcase of the T-Rex traffic generator"""
def __init__(self, *args, **kwargs):
super(CTRexNbar_Test, self).__init__(*args, **kwargs)
self.unsupported_modes = ['loopback'] # obviously no NBar in loopback
pass
def setUp(self):
super(CTRexNbar_Test, self).setUp() # launch super test class setUp process
# self.router.kill_nbar_flows()
self.router.clear_cft_counters()
self.router.clear_nbar_stats()
def match_classification (self):
nbar_benchmark = self.get_benchmark_param("nbar_classification")
test_classification = self.router.get_nbar_stats()
print "TEST CLASSIFICATION:"
print test_classification
missmatchFlag = False
missmatchMsg = "NBAR classification contians a missmatch on the following protocols:"
fmt = '\n\t{0:15} | Expected: {1:>3.2f}%, Got: {2:>3.2f}%'
noise_level = 0.045 # percents
for cl_intf in self.router.get_if_manager().get_if_list(if_type = IFType.Client):
client_intf = cl_intf.get_name()
# removing noise classifications
for key, value in test_classification[client_intf]['percentage'].items():
if value <= noise_level:
print 'Removing noise classification: %s' % key
del test_classification[client_intf]['percentage'][key]
if len(test_classification[client_intf]['percentage']) != (len(nbar_benchmark) + 1): # adding 'total' key to nbar_benchmark
raise ClassificationMissmatchError ('The total size of classification result does not match the provided benchmark.')
for protocol, bench in nbar_benchmark.iteritems():
if protocol != 'total':
try:
bench = float(bench)
protocol = protocol.replace('_','-')
protocol_test_res = test_classification[client_intf]['percentage'][protocol]
deviation = 100 * abs(bench/protocol_test_res - 1) # percents
difference = abs(bench - protocol_test_res)
if (deviation > 10 and difference > noise_level): # allowing 10% deviation and 'noise_level'% difference
missmatchFlag = True
missmatchMsg += fmt.format(protocol, bench, protocol_test_res)
except KeyError as e:
missmatchFlag = True
print e
print "Changes missmatchFlag to True. ", "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
missmatchMsg += "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
except ZeroDivisionError as e:
print "ZeroDivisionError: %s" % protocol
pass
if missmatchFlag:
self.fail(missmatchMsg)
def test_nbar_simple(self):
# test initializtion
deviation_compare_value = 0.03 # default value of deviation - 3%
self.router.configure_basic_interfaces()
self.router.config_pbr(mode = "config")
self.router.config_nbar_pd()
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
ret = self.trex.start_trex(
c = core,
m = mult,
p = True,
nc = True,
d = 100,
f = 'avl/sfr_delay_10_1g.yaml',
l = 1000)
trex_res = self.trex.sample_to_run_finish()
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
print ("\nLATEST RESULT OBJECT:")
print trex_res
print ("\nLATEST DUMP:")
print trex_res.get_latest_dump()
self.check_general_scenario_results(trex_res, check_latency = False)
# test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization']))
trex_tx_pckt = trex_res.get_last_value("trex-global.data.m_total_tx_pkts")
cpu_util = trex_res.get_last_value("trex-global.data.m_cpu_util")
cpu_util_hist = trex_res.get_value_list("trex-global.data.m_cpu_util")
print "cpu util is:", cpu_util
print cpu_util_hist
test_norm_cpu = 2 * trex_tx_pckt / (core * cpu_util)
print "test_norm_cpu is:", test_norm_cpu
if self.get_benchmark_param('cpu2core_custom_dev'):
# check this test by custom deviation
deviation_compare_value = self.get_benchmark_param('cpu2core_dev')
print "Comparing test with custom deviation value- {dev_val}%".format( dev_val = int(deviation_compare_value*100) )
# need to be fixed !
#if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > deviation_compare_value):
# raise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds benchmark boundaries')
self.match_classification()
assert True
@nottest
def test_rx_check (self):
# test initializtion
self.router.configure_basic_interfaces()
self.router.config_pbr(mode = "config")
self.router.config_nbar_pd()
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
sample_rate = self.get_benchmark_param('rx_sample_rate')
ret = self.trex.start_trex(
c = core,
m = mult,
p = True,
nc = True,
rx_check = sample_rate,
d = 100,
f = 'cap2/sfr.yaml',
l = 1000)
trex_res = self.trex.sample_to_run_finish()
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
print ("\nLATEST RESULT OBJECT:")
print trex_res
print ("\nLATEST DUMP:")
print trex_res.get_latest_dump()
self.check_general_scenario_results(trex_res)
self.check_CPU_benchmark(trex_res, 10)
# if trex_res.result['rx_check_tx']==trex_res.result['rx_check_rx']: # rx_check verification shoud pass
# assert trex_res.result['rx_check_verification'] == "OK"
# else:
# assert trex_res.result['rx_check_verification'] == "FAIL"
# the name intentionally not matches nose default pattern, including the test should be specified explicitly
def NBarLong(self):
self.router.configure_basic_interfaces()
self.router.config_pbr(mode = "config")
self.router.config_nbar_pd()
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
ret = self.trex.start_trex(
c = core,
m = mult,
p = True,
nc = True,
d = 18000, # 5 hours
f = 'avl/sfr_delay_10_1g.yaml',
l = 1000)
trex_res = self.trex.sample_to_run_finish()
# trex_res is a CTRexResult instance- and contains the summary of the test results
# you may see all the results keys by simply calling here for 'print trex_res.result'
print ("\nLATEST RESULT OBJECT:")
print trex_res
self.check_general_scenario_results(trex_res, check_latency = False)
def tearDown(self):
CTRexGeneral_Test.tearDown(self)
pass
if __name__ == "__main__":
pass
|
nilq/baby-python
|
python
|
from rest_framework import permissions
from rest_framework.reverse import reverse
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
class IsOwnerCheck(permissions.BasePermission):
def has_permission(self, request, view):
"""
map={"view_name":{"path_info","method "}
}
"""
maps = {
'book_list': {'url': '/demo-service/api/v1/book/', 'method': 'GET'},
'book_create': {'url': '/api/v1/book/', 'method': 'POST'}
}
results = False
view_name = view.get_view_name()
print(view_name,"xxxxxxxxxxx")
if view_name in maps.keys() and request.method in permissions.SAFE_METHODS:
mapper = maps.get(view_name)
user_role_url = mapper.get('url',None)
user_role_url_method = 'GET'
# user_role_url = request.user.permission.url
# user_role_url_method = request.user.permission.method.upper()
print(request.method,request.path_info)
if user_role_url == request.path_info and user_role_url_method ==request.method:
return True
else:
return False
else:
return False
def has_object_permission(self, request, view, obj):
""" view表示当前视图, obj为数据对象 """
return True
|
nilq/baby-python
|
python
|
from ms_deisotope.peak_dependency_network.intervals import Interval, IntervalTreeNode
from glycan_profiling.task import TaskBase
from .chromatogram import Chromatogram
class ChromatogramForest(TaskBase):
"""An an algorithm for aggregating chromatograms from peaks of close mass
weighted by intensity.
This algorithm assumes that mass accuracy is correlated with intensity, so
the most intense peaks should most accurately reflect their true neutral mass.
The expected input is a list of (scan id, peak) pairs. This list is sorted by
descending peak intensity. For each pair, using binary search, locate the nearest
existing chromatogram in :attr:`chromatograms`. If the nearest chromatogram is within
:attr:`error_tolerance` ppm of the peak's neutral mass, add this peak to that
chromatogram, otherwise create a new chromatogram containing this peak and insert
it into :attr:`chromatograms` while preserving the overall sortedness. This algorithm
is carried out by :meth:`aggregate_unmatched_peaks`
This process may produce chromatograms with large gaps in them, which
may or may not be acceptable. To break gapped chromatograms into separate
entities, the :class:`ChromatogramFilter` type has a method :meth:`split_sparse`.
Attributes
----------
chromatograms : list of Chromatogram
A list of growing Chromatogram objects, ordered by neutral mass
count : int
The number of peaks accumulated
error_tolerance : float
The mass error tolerance between peaks and possible chromatograms (in ppm)
scan_id_to_rt : callable
A callable object to convert scan ids to retention time.
"""
def __init__(self, chromatograms=None, error_tolerance=1e-5, scan_id_to_rt=lambda x: x):
if chromatograms is None:
chromatograms = []
self.chromatograms = sorted(chromatograms, key=lambda x: x.neutral_mass)
self.error_tolerance = error_tolerance
self.scan_id_to_rt = scan_id_to_rt
self.count = 0
def __len__(self):
return len(self.chromatograms)
def __iter__(self):
return iter(self.chromatograms)
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.chromatograms[i]
else:
return [self.chromatograms[j] for j in i]
def find_insertion_point(self, peak):
index, matched = binary_search_with_flag(
self.chromatograms, peak.neutral_mass, self.error_tolerance)
return index, matched
def find_minimizing_index(self, peak, indices):
best_index = None
best_error = float('inf')
for index_case in indices:
chroma = self[index_case]
err = abs(chroma.neutral_mass - peak.neutral_mass) / peak.neutral_mass
if err < best_error:
best_index = index_case
best_error = err
return best_index
def handle_peak(self, scan_id, peak):
if len(self) == 0:
index = [0]
matched = False
else:
index, matched = self.find_insertion_point(peak)
if matched:
chroma = self.chromatograms[self.find_minimizing_index(peak, index)]
most_abundant_member = chroma.most_abundant_member
chroma.insert(scan_id, peak, self.scan_id_to_rt(scan_id))
if peak.intensity < most_abundant_member:
chroma.retain_most_abundant_member()
else:
chroma = Chromatogram(None)
chroma.created_at = "forest"
chroma.insert(scan_id, peak, self.scan_id_to_rt(scan_id))
self.insert_chromatogram(chroma, index)
self.count += 1
def insert_chromatogram(self, chromatogram, index):
# TODO: Review this index arithmetic, the output isn't sorted.
index = index[0] # index is (index, matched) from binary_search_with_flag
if index != 0:
self.chromatograms.insert(index + 1, chromatogram)
else:
if len(self) == 0:
new_index = index
else:
x = self.chromatograms[index]
if x.neutral_mass < chromatogram.neutral_mass:
new_index = index + 1
else:
new_index = index
self.chromatograms.insert(new_index, chromatogram)
def aggregate_unmatched_peaks(self, *args, **kwargs):
import warnings
warnings.warn("Instead of calling aggregate_unmatched_peaks, call aggregate_peaks", stacklevel=2)
self.aggregate_peaks(*args, **kwargs)
def aggregate_peaks(self, scan_id_peaks_list, minimum_mass=300, minimum_intensity=1000.):
unmatched = sorted(scan_id_peaks_list, key=lambda x: x[1].intensity, reverse=True)
for scan_id, peak in unmatched:
if peak.neutral_mass < minimum_mass or peak.intensity < minimum_intensity:
continue
self.handle_peak(scan_id, peak)
class ChromatogramMerger(TaskBase):
def __init__(self, chromatograms=None, error_tolerance=1e-5):
if chromatograms is None:
chromatograms = []
self.chromatograms = sorted(chromatograms, key=lambda x: x.neutral_mass)
self.error_tolerance = error_tolerance
self.count = 0
self.verbose = False
def __len__(self):
return len(self.chromatograms)
def __iter__(self):
return iter(self.chromatograms)
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.chromatograms[i]
else:
return [self.chromatograms[j] for j in i]
def find_candidates(self, new_chromatogram):
index, matched = binary_search_with_flag(
self.chromatograms, new_chromatogram.neutral_mass, self.error_tolerance)
return index, matched
def merge_overlaps(self, new_chromatogram, chromatogram_range):
has_merged = False
query_mass = new_chromatogram.neutral_mass
for chroma in chromatogram_range:
cond = (chroma.overlaps_in_time(new_chromatogram) and abs(
(chroma.neutral_mass - query_mass) / query_mass) < self.error_tolerance and
not chroma.common_nodes(new_chromatogram))
if cond:
chroma.merge(new_chromatogram)
has_merged = True
break
return has_merged
def find_insertion_point(self, new_chromatogram):
return binary_search_exact(
self.chromatograms, new_chromatogram.neutral_mass)
def handle_new_chromatogram(self, new_chromatogram):
if len(self) == 0:
index = [0]
matched = False
else:
index, matched = self.find_candidates(new_chromatogram)
if matched:
chroma = self[index]
has_merged = self.merge_overlaps(new_chromatogram, chroma)
if not has_merged:
insertion_point = self.find_insertion_point(new_chromatogram)
self.insert_chromatogram(new_chromatogram, [insertion_point])
else:
self.insert_chromatogram(new_chromatogram, index)
self.count += 1
def insert_chromatogram(self, chromatogram, index):
if index[0] != 0:
self.chromatograms.insert(index[0] + 1, chromatogram)
else:
if len(self) == 0:
new_index = index[0]
else:
x = self.chromatograms[index[0]]
if x.neutral_mass < chromatogram.neutral_mass:
new_index = index[0] + 1
else:
new_index = index[0]
self.chromatograms.insert(new_index, chromatogram)
def aggregate_chromatograms(self, chromatograms):
unmatched = sorted(chromatograms, key=lambda x: x.total_signal, reverse=True)
for chroma in unmatched:
self.handle_new_chromatogram(chroma)
def flatten_tree(tree):
output_queue = []
input_queue = [tree]
while input_queue:
next_node = input_queue.pop()
output_queue.append(next_node)
next_right = next_node.right
if next_right is not None:
input_queue.append(next_right)
next_left = next_node.left
if next_left is not None:
input_queue.append(next_left)
return output_queue[::-1]
def layered_traversal(nodes):
return sorted(nodes, key=lambda x: (x.level, x.center), reverse=True)
class ChromatogramOverlapSmoother(object):
def __init__(self, chromatograms, error_tolerance=1e-5):
self.retention_interval_tree = build_rt_interval_tree(chromatograms)
self.error_tolerance = error_tolerance
self.solution_map = {None: []}
self.chromatograms = self.smooth()
def __iter__(self):
return iter(self.chromatograms)
def __getitem__(self, i):
return self.chromatograms[i]
def __len__(self):
return len(self.chromatograms)
def aggregate_interval(self, tree):
chromatograms = [interval[0] for interval in tree.contained]
chromatograms.extend(self.solution_map[tree.left])
chromatograms.extend(self.solution_map[tree.right])
merger = ChromatogramMerger(error_tolerance=self.error_tolerance)
merger.aggregate_chromatograms(chromatograms)
self.solution_map[tree] = list(merger)
return merger
def smooth(self):
nodes = layered_traversal(flatten_tree(self.retention_interval_tree))
for node in nodes:
self.aggregate_interval(node)
final = self.solution_map[self.retention_interval_tree]
result = ChromatogramMerger()
result.aggregate_chromatograms(final)
return list(result)
def binary_search_with_flag(array, mass, error_tolerance=1e-5):
lo = 0
n = hi = len(array)
while hi != lo:
mid = (hi + lo) // 2
x = array[mid]
err = (x.neutral_mass - mass) / mass
if abs(err) <= error_tolerance:
i = mid - 1
# Begin Sweep forward
while i > 0:
x = array[i]
err = (x.neutral_mass - mass) / mass
if abs(err) <= error_tolerance:
i -= 1
continue
else:
break
low_end = i
i = mid + 1
# Begin Sweep backward
while i < n:
x = array[i]
err = (x.neutral_mass - mass) / mass
if abs(err) <= error_tolerance:
i += 1
continue
else:
break
high_end = i
return list(range(low_end, high_end)), True
elif (hi - lo) == 1:
return [mid], False
elif err > 0:
hi = mid
elif err < 0:
lo = mid
return 0, False
def binary_search_exact(array, mass):
lo = 0
hi = len(array)
while hi != lo:
mid = (hi + lo) // 2
x = array[mid]
err = (x.neutral_mass - mass)
if err == 0:
return mid
elif (hi - lo) == 1:
return mid
elif err > 0:
hi = mid
else:
lo = mid
def smooth_overlaps(chromatogram_list, error_tolerance=1e-5):
chromatogram_list = sorted(chromatogram_list, key=lambda x: x.neutral_mass)
out = []
last = chromatogram_list[0]
i = 1
while i < len(chromatogram_list):
current = chromatogram_list[i]
mass_error = abs((last.neutral_mass - current.neutral_mass) / current.neutral_mass)
if mass_error <= error_tolerance:
if last.overlaps_in_time(current):
last = last.merge(current)
last.created_at = "smooth_overlaps"
else:
out.append(last)
last = current
else:
out.append(last)
last = current
i += 1
out.append(last)
return out
class ChromatogramRetentionTimeInterval(Interval):
def __init__(self, chromatogram):
super(ChromatogramRetentionTimeInterval, self).__init__(
chromatogram.start_time, chromatogram.end_time, [chromatogram])
self.neutral_mass = chromatogram.neutral_mass
self.start_time = self.start
self.end_time = self.end
self.data['neutral_mass'] = self.neutral_mass
def build_rt_interval_tree(chromatogram_list, interval_tree_type=IntervalTreeNode):
intervals = list(map(ChromatogramRetentionTimeInterval, chromatogram_list))
interval_tree = interval_tree_type.build(intervals)
return interval_tree
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='InformationDocument',
fields=[
('document_ptr', models.OneToOneField(primary_key=True, auto_created=True, to='documents.Document', serialize=False, parent_link=True)),
],
options={
'verbose_name_plural': 'Information documents',
'verbose_name': 'Information document',
'abstract': False,
'permissions': (('view_informationdocument', 'User/Group is allowed to view that document'),),
},
bases=('documents.document',),
),
]
|
nilq/baby-python
|
python
|
import re
class Command:
def __init__(self, name, register, jump_addr=None):
self.name = name
self.register = register
self.jump_addr = jump_addr
class Program:
def __init__(self, commands, registers):
self.commands = commands
self.registers = registers
self.instr_ptr = 0
def exec_next_command(self):
cmd = self.commands[self.instr_ptr]
if cmd.name == "hlf":
self.registers[cmd.register] //= 2
self.instr_ptr += 1
elif cmd.name == "tpl":
self.registers[cmd.register] *= 3
self.instr_ptr += 1
elif cmd.name == "inc":
self.registers[cmd.register] += 1
self.instr_ptr += 1
elif cmd.name == "jmp":
self.instr_ptr += cmd.jump_addr
elif cmd.name == "jie":
self.instr_ptr += cmd.jump_addr if self.registers[cmd.register] % 2 == 0 else 1
elif cmd.name == "jio":
self.instr_ptr += cmd.jump_addr if self.registers[cmd.register] == 1 else 1
else:
raise ValueError("Unsupported command: ", cmd.name)
def run(self):
while self.instr_ptr < len(self.commands):
self.exec_next_command()
def solve(commands):
pgm = Program(commands, {"a": 0, "b": 0})
pgm.run()
return pgm.registers["b"]
def parse(file_name):
with open(file_name, "r") as f:
commands = []
for line in f.readlines():
if any([cmd in line for cmd in ["inc", "tpl", "hlf"]]):
_, cmd, r, _ = re.split(r"([a-z]+) ([a|b])", line)
commands.append(Command(cmd, r))
elif "jmp" in line:
_, cmd, jmp_addr, _ = re.split(r"([a-z]+) ([+|-][0-9]+)", line)
commands.append(Command(cmd, None, int(jmp_addr)))
if any([cmd in line for cmd in ["jie", "jio"]]):
_, cmd, r, jmp_addr, _ = re.split(r"([a-z]+) ([a|b]), ([+\-0-9]+)", line)
commands.append(Command(cmd, r, int(jmp_addr)))
return commands
if __name__ == '__main__':
print(solve(parse("data.txt")))
|
nilq/baby-python
|
python
|
if __name__ == "__main__":
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
from mnistconvnet import MNISTConvNet
from mnist_data_loader import mnist_data_loader
from sgdol import SGDOL
# Parse input arguments.
parser = argparse.ArgumentParser(description='MNIST CNN SGDOL')
parser.add_argument('--use-cuda', action='store_true', default=False,
help='allow the use of CUDA (default: False)')
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--train-epochs', type=int, default=30, metavar='N',
help='number of epochs to train (default: 30)')
parser.add_argument('--train-batchsize', type=int, default=100,
help='batchsize in training (default: 100)')
parser.add_argument('--dataroot', type=str, default='./data',
help='location to save the dataset (default: ./data)')
parser.add_argument('--optim-method', type=str, default='SGDOL',
choices=['SGDOL', 'Adam', 'SGD', 'Adagrad'],
help='the optimizer to be employed (default: SGDOL)')
parser.add_argument('--smoothness', type=float, default=10.0, metavar='M',
help='to be used in SGDOL (default: 10)')
parser.add_argument('--alpha', type=float, default=10.0,
help='to be used in SGDOL (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate of the chosen optimizer (default: 0.001)')
args = parser.parse_args()
# Set the random seed for reproducibility.
torch.manual_seed(args.seed)
# Load data.
kwargs = {}
dataset_info = mnist_data_loader(root_dir=args.dataroot,
batch_size=args.train_batchsize,
valid_ratio=0,
**kwargs)
train_loader = dataset_info[0]
test_loader = dataset_info[4]
# Check the availability of GPU.
use_cuda = args.use_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Initialize the neural network model and move it to GPU if needed.
net = MNISTConvNet()
net.to(device)
# Define the loss function.
criterion = nn.CrossEntropyLoss()
# Select optimizer.
optim_method = args.optim_method
if optim_method == 'SGDOL':
optimizer = SGDOL(net.parameters(),
smoothness=args.smoothness,
alpha=args.alpha)
elif optim_method == 'SGD':
optimizer = optim.SGD(net.parameters(),
lr=args.lr)
elif optim_method == 'Adagrad':
optimizer = optim.Adagrad(net.parameters(),
lr=args.lr)
elif optim_method == 'Adam':
optimizer = optim.Adam(net.parameters(),
lr=args.lr)
else:
raise ValueError("Invalid optimization method: {}".format(optim_method))
# Train the model.
all_train_losses = []
for epoch in range(args.train_epochs):
# Train the model for one epoch.
net.train()
for data in train_loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
num_grads = 1 if args.optim_method != 'SGDOL' else 2
for _ in range(num_grads):
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Evaluate the trained model over all training samples.
net.eval()
running_loss = 0.0
with torch.no_grad():
for data in train_loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item()
avg_train_loss = running_loss / len(train_loader)
all_train_losses.append(avg_train_loss)
print('Epoch %d: Training Loss: %.4f' % (epoch + 1, avg_train_loss))
# Evaluate the test error of the final model.
net.eval()
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels).sum().item()
total += labels.size(0)
test_accu = 1.0 * correct / total
print('Final Test Accuracy: %.4f\n' % (test_accu))
# Write log files.
if optim_method == 'SGDOL':
opt_para = args.smoothness
else:
opt_para = args.lr
if not os.path.exists('logs'):
os.makedirs('logs')
train_loss_fname = ''.join(['logs/',
'{0}'.format(optim_method),
'_training_loss.txt'])
with open(train_loss_fname, 'a') as f:
f.write('{0}, {1}\n'.format(opt_para, all_train_losses))
test_error_fname = ''.join(['logs/',
'{0}'.format(optim_method),
'_test_error.txt'])
with open(test_error_fname, 'a') as f:
f.write('{0}, {1}\n'.format(opt_para, test_accu))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2019/9/8 14:18
# @Author : zhoujun
import os
import cv2
import torch
import subprocess
import numpy as np
import pyclipper
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def de_shrink(poly, r=1.5):
d_i = cv2.contourArea(poly) * r / cv2.arcLength(poly, True)
pco = pyclipper.PyclipperOffset()
pco.AddPath(poly, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
shrinked_poly = np.array(pco.Execute(d_i))
return shrinked_poly
def decode(preds, threshold=0.2, min_area=5):
"""
在输出上使用sigmoid 将值转换为置信度,并使用阈值来进行文字和背景的区分
:param preds: 网络输出
:param scale: 网络的scale
:param threshold: sigmoid的阈值
:return: 最后的输出图和文本框
"""
if subprocess.call(['make', '-C', BASE_DIR]) != 0: # return value
raise RuntimeError('Cannot compile pse: {}'.format(BASE_DIR))
from .pse import get_points, get_num
shrink_map = preds[0, :, :].detach().cpu().numpy()
score_map = shrink_map.astype(np.float32)
shrink_map = shrink_map > threshold
label_num, label = cv2.connectedComponents(shrink_map.astype(np.uint8), connectivity=4)
bbox_list = []
label_points = get_points(label, score_map, label_num)
for label_value, label_point in label_points.items():
score_i = label_point[0]
label_point = label_point[2:]
points = np.array(label_point, dtype=int).reshape(-1, 2)
if points.shape[0] < min_area:
continue
# if score_i < 0.93:
# continue
rect = cv2.minAreaRect(points)
poly = cv2.boxPoints(rect)
shrinked_poly = de_shrink(poly)
if shrinked_poly.size == 0:
continue
rect = cv2.minAreaRect(shrinked_poly)
shrinked_poly = cv2.boxPoints(rect).astype(int)
if cv2.contourArea(shrinked_poly) < 100:
continue
bbox_list.append([shrinked_poly[1], shrinked_poly[2], shrinked_poly[3], shrinked_poly[0]])
return label, np.array(bbox_list)
def decode_py(preds, threshold=0.2, min_area=5):
shrink_map = preds[0, :, :].detach().cpu().numpy()
# score_map = shrink_map.astype(np.float32)
shrink_map = shrink_map > threshold
label_num, label = cv2.connectedComponents(shrink_map.astype(np.uint8), connectivity=4)
bbox_list = []
for label_idx in range(1, label_num):
points = np.array(np.where(label == label_idx)).transpose((1, 0))[:, ::-1]
if points.shape[0] < min_area:
continue
# score_i = np.mean(score_map[label == label_idx])
# if score_i < 0.93:
# continue
rect = cv2.minAreaRect(points)
poly = cv2.boxPoints(rect).astype(int)
shrinked_poly = de_shrink(poly)
if shrinked_poly.size == 0:
continue
rect = cv2.minAreaRect(shrinked_poly)
shrinked_poly = cv2.boxPoints(rect).astype(int)
if cv2.contourArea(shrinked_poly) < 100:
continue
bbox_list.append([shrinked_poly[1], shrinked_poly[2], shrinked_poly[3], shrinked_poly[0]])
return label, np.array(bbox_list)
|
nilq/baby-python
|
python
|
count = 0
print('Before', count)
for thing in [9, 41, 12, 3, 74, 15]:
count += 1
# zork = zork + 1
print(count, thing)
print('After', count)
|
nilq/baby-python
|
python
|
# import src.stacking.argus_models
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2017 by ExopyPulses Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Gaussian shapes
"""
import numpy as np
from atom.api import Callable, Str
from ..utils.entry_eval import exec_entry
from exopy_pulses.pulses.shapes.base_shape import AbstractShape
DEFAULT_FORMULA = \
'''def c(self, time, unit):
return 0.5*np.ones(len(time))'''
class ArbitraryShape(AbstractShape):
""" Shape defined entirely by the user.
"""
#: Formula used to compute the shape of the pulse. It is compiled as
#: a function using exec which must be of the following signature:
#: c(self, time, unit) and return the pulse amplitude as a numpy array.
#: 'time' is a numpy array which represents the times at which to compute
#: the pulse
#: 'unit' is the unit in which the time is expressed.
#: During compilation, all the sequence local variables can be accessed
#: (using the {} notation).
formula = Str(DEFAULT_FORMULA).tag(pref=True)
def eval_entries(self, root_vars, sequence_locals, missing, errors):
""" Evaluate the amplitude of the pulse.
Parameters
----------
root_vars : dict
Global variables. As shapes and modulation cannot update them an
empty dict is passed.
sequence_locals : dict
Known locals variables for the pulse sequence.
missing : set
Set of variables missing to evaluate some entries in the sequence.
errors : dict
Errors which occurred when trying to compile the pulse sequence.
Returns
-------
result : bool
Flag indicating whether or not the evaluation succeeded.
"""
# Executing the formula :
res, err = self.build_compute_function(sequence_locals, missing)
return res
def compute(self, time, unit):
""" Computes the shape of the pulse at a given time.
Parameters
----------
time : ndarray
Times at which to compute the modulation.
unit : str
Unit in which the time is expressed.
Returns
-------
shape : ndarray
Amplitude of the pulse.
"""
shape = self._shape_factory(self, time, unit)
assert np.max(shape) <= 1.0
assert np.min(shape) >= -1.0
return shape
def build_compute_function(self, sequence_locals, missing):
"""Build the compute function from the formula.
"""
try:
loc = exec_entry(self.formula, sequence_locals, missing)
if not loc:
return False, {}
self._shape_factory = loc['c']
except Exception:
return False, {}
return True, {}
# --- Private API ---------------------------------------------------------
#: Runtime build shape computer.
_shape_factory = Callable()
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
"""
initial
"""
class InitialBlock(nn.Module):
def __init__(self, in_channels, out_channels, bias=False, relu=True):
super(InitialBlock, self).__init__()
if (relu):
activation = nn.ReLU
else:
activation = nn.PReLU
# maini branch
self.main_branch = nn.Conv2d(in_channels, out_channels - 3, kernel_size=3, stride=2, padding=1, bias=bias)
# another branch
self.ext_branch = nn.MaxPool2d(3, stride=2, padding=1)
self.bn = nn.BatchNorm2d(out_channels)
self.out_relu = activation()
def forward(self, x):
x1 = self.main_branch(x)
x2 = self.ext_branch(x)
out = torch.cat((x1, x2), 1)
out = self.bn(out)
return self.out_relu(out)
"""
Bottleneck with downsample
"""
class Bottleneck(nn.Module):
def __init__(self,
channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dilation=1,
asymmetric=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
"""
internal_ratio check
"""
if internal_ratio <= 1 or internal_ratio > channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}."
.format(channels, internal_ratio))
internal_channels = channels // internal_ratio
if (relu):
activation = nn.ReLU
else:
activation = nn.PReLU
"""
Main branch first 1x1
"""
self.ext_conv1 = nn.Sequential(
nn.Conv2d(channels, internal_channels, kernel_size=1, stride=1, bias=bias),
nn.BatchNorm2d(internal_channels),
activation())
"""
using symmetric
"""
if asymmetric:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(padding, 0),
dilation=dilation,
bias=bias),
nn.BatchNorm2d(internal_channels),
activation(),
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, padding),
dilation=dilation,
bias=bias),
nn.BatchNorm2d(internal_channels),
activation())
else:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
bias=bias),
nn.BatchNorm2d(internal_channels),
activation())
"""
1x1
"""
self.ext_conv3 = nn.Sequential(
nn.Conv2d(internal_channels, channels, kernel_size=1, stride=1, bias=bias),
nn.BatchNorm2d(channels),
activation())
"""
regu
"""
self.ext_regul = nn.Dropout2d(p=dropout_prob)
"""
activation
"""
self.out_activation = activation()
def forward(self, x):
main = x
# print(type(x))
# print("==========")
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
out = main + ext
return self.out_activation(out)
"""
Bottleneck with downsample
"""
class DownsamplingBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, internal_ratio=4, return_indices=False, dropout_prob=0, bias=False,
relu=True):
super(DownsamplingBottleneck, self).__init__()
self.return_indices = return_indices
"""
internal_ratio check
"""
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}."
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if (relu):
activation = nn.ReLU
else:
activation = nn.PReLU
"""
MaxPool2d
"""
self.main_max1 = nn.MaxPool2d(2, stride=2, return_indices=return_indices)
"""
2x2 2 downsample
"""
self.ext_conv1 = nn.Sequential(
nn.Conv2d(in_channels, internal_channels, kernel_size=2, stride=2, bias=bias),
nn.BatchNorm2d(internal_channels),
activation())
self.ext_conv2 = nn.Sequential(
nn.Conv2d(internal_channels, internal_channels, kernel_size=3, stride=1, padding=1, bias=bias),
nn.BatchNorm2d(internal_channels),
activation())
self.ext_conv3 = nn.Sequential(
nn.Conv2d(internal_channels, out_channels, kernel_size=1, stride=1, bias=bias),
nn.BatchNorm2d(out_channels),
activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
self.out_activation = activation()
def forward(self, x):
if (self.return_indices):
main, max_indices = self.main_max1(x)
else:
main = self.main_max1(x)
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Main branch channel padding
n, ch_ext, h, w = ext.size()
ch_main = main.size()[1]
padding = torch.zeros(n, ch_ext - ch_main, h, w)
# Before concatenating, check if main is on the CPU or GPU and
# convert padding accordingly
if main.is_cuda:
padding = padding.cuda()
# Concatenate
main = torch.cat((main, padding), 1)
# Add main and extension branches
out = main + ext
return self.out_activation(out), max_indices
"""
Bottleneck with upsampling
"""
class UpsamplingBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, internal_ratio=4, dropout_prob=0, bias=False, relu=True):
super(UpsamplingBottleneck, self).__init__()
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
self.main_conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels))
self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels), activation())
"""
Transposed convolution
"""
self.ext_tconv1 = nn.ConvTranspose2d(
internal_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias)
self.ext_tconv1_bnorm = nn.BatchNorm2d(internal_channels)
self.ext_tconv1_activation = activation()
# 1x1 expansion convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x, max_indices, output_size):
# Main branch shortcut
main = self.main_conv1(x)
main = self.main_unpool1(main, max_indices, output_size=output_size)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_tconv1(ext, output_size=output_size)
ext = self.ext_tconv1_bnorm(ext)
ext = self.ext_tconv1_activation(ext)
ext = self.ext_conv2(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_activation(out)
class ENet(nn.Module):
def __init__(self):
super(ENet, self).__init__()
binary_seg=2
embedding_dim=5
num_classes=8
encoder_relu = False
decoder_relu = True
## init
self.initial_block = InitialBlock(3, 16, relu=encoder_relu)
# Stage 1 - Encoder -share
self.downsample1_0 = DownsamplingBottleneck(16, 64, return_indices=True, dropout_prob=0.01, relu=encoder_relu)
self.regular1_1 = Bottleneck(64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_2 = Bottleneck(64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_3 = Bottleneck(64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_4 = Bottleneck(64, padding=1, dropout_prob=0.01, relu=encoder_relu)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(64, 128, return_indices=True, dropout_prob=0.1, relu=encoder_relu)
self.regular2_1 = Bottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_2 = Bottleneck(128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_3 = Bottleneck(128, kernel_size=5, padding=2, asymmetric=True, dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_4 = Bottleneck(128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular2_5 = Bottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_6 = Bottleneck(128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_7 = Bottleneck(128, kernel_size=5, asymmetric=True, padding=2, dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_8 = Bottleneck(128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder -for binary
self.b_regular3_0 = Bottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.b_dilated3_1 = Bottleneck(128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.b_asymmetric3_2 = Bottleneck(128, kernel_size=5, padding=2, asymmetric=True, dropout_prob=0.1,
relu=encoder_relu)
self.b_dilated3_3 = Bottleneck(128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.b_regular3_4 = Bottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.b_dilated3_5 = Bottleneck(128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.b_asymmetric3_6 = Bottleneck(128, kernel_size=5, asymmetric=True, padding=2, dropout_prob=0.1,
relu=encoder_relu)
self.b_dilated3_7 = Bottleneck(128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder -for embedded
self.e_regular3_0 = Bottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.e_dilated3_1 = Bottleneck(128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.e_asymmetric3_2 = Bottleneck(128, kernel_size=5, padding=2, asymmetric=True, dropout_prob=0.1,
relu=encoder_relu)
self.e_dilated3_3 = Bottleneck(128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.e_regular3_4 = Bottleneck(128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.e_dilated3_5 = Bottleneck(128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.e_asymmetric3_6 = Bottleneck(128, kernel_size=5, asymmetric=True, padding=2, dropout_prob=0.1,
relu=encoder_relu)
self.e_dilated3_7 = Bottleneck(128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# binary branch
self.upsample_binary_4_0 = UpsamplingBottleneck(128, 64, dropout_prob=0.1, relu=decoder_relu)
self.regular_binary_4_1 = Bottleneck(64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular_binary_4_2 = Bottleneck(64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.upsample_binary_5_0 = UpsamplingBottleneck(64, 16, dropout_prob=0.1, relu=decoder_relu)
self.regular_binary_5_1 = Bottleneck(16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.binary_transposed_conv = nn.ConvTranspose2d(16, binary_seg, kernel_size=3, stride=2, padding=1, bias=False)
# embedding branch
self.upsample_embedding_4_0 = UpsamplingBottleneck(128, 64, dropout_prob=0.1, relu=decoder_relu)
self.regular_embedding_4_1 = Bottleneck(64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular_embedding_4_2 = Bottleneck(64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.upsample_embedding_5_0 = UpsamplingBottleneck(64, 16, dropout_prob=0.1, relu=decoder_relu)
self.regular_embedding_5_1 = Bottleneck(16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.embedding_transposed_conv = nn.ConvTranspose2d(16, embedding_dim, kernel_size=3, stride=2, padding=1,
bias=False)
def forward(self, x):
# TODO
# Initial block
##256x512
input_size = x.size()
##batch_size, 16, 128x256
x = self.initial_block(x)
# Stage 1 - Encoder-share
##64x128
stage1_input_size = x.size()
x, max_indices1_0 = self.downsample1_0(x)
#->2,64,64,128
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x = self.regular1_4(x)
# Stage 2 - Encoder -share
##2,128,32,64
stage2_input_size = x.size()
x, max_indices2_0 = self.downsample2_0(x)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x = self.asymmetric2_3(x)
x = self.dilated2_4(x)
x = self.regular2_5(x)
x = self.dilated2_6(x)
x = self.asymmetric2_7(x)
x = self.dilated2_8(x)
# Stage 3 - Encoder
##2,128, 32x64
b_x = self.b_regular3_0(x)
b_x = self.b_dilated3_1(b_x)
b_x = self.b_asymmetric3_2(b_x)
b_x = self.b_dilated3_3(b_x)
b_x = self.b_regular3_4(b_x)
b_x = self.b_dilated3_5(b_x)
b_x = self.b_asymmetric3_6(b_x)
b_x = self.b_dilated3_7(b_x)
e_x = self.e_regular3_0(x)
e_x = self.e_dilated3_1(e_x)
e_x = self.e_asymmetric3_2(e_x)
e_x = self.e_dilated3_3(e_x)
e_x = self.e_regular3_4(e_x)
e_x = self.e_dilated3_5(e_x)
e_x = self.e_asymmetric3_6(e_x)
e_x = self.e_dilated3_7(e_x)
# binary branch 2,64,64,128
x_binary = self.upsample_binary_4_0(b_x, max_indices2_0, output_size=stage2_input_size)
x_binary = self.regular_binary_4_1(x_binary)
x_binary = self.regular_binary_4_2(x_binary)
x_binary = self.upsample_binary_5_0(x_binary, max_indices1_0, output_size=stage1_input_size)# 2,16,128,256
x_binary = self.regular_binary_5_1(x_binary)
binary_final_logits = self.binary_transposed_conv(x_binary, output_size=input_size)#2,1,256,512
# embedding branch
x_embedding = self.upsample_embedding_4_0(e_x, max_indices2_0, output_size=stage2_input_size)
x_embedding = self.regular_embedding_4_1(x_embedding)
x_embedding = self.regular_embedding_4_2(x_embedding)
x_embedding = self.upsample_embedding_5_0(x_embedding, max_indices1_0, output_size=stage1_input_size)
x_embedding = self.regular_embedding_5_1(x_embedding)
instance_notfinal_logits = self.embedding_transposed_conv(x_embedding, output_size=input_size)
return binary_final_logits, instance_notfinal_logits
|
nilq/baby-python
|
python
|
'''
A flask application for controlled experiment on
the attention on clickbait healdines
'''
# imports
from flask import Flask, render_template, url_for, redirect, request, jsonify, session
from flask_session import Session
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime, date, timedelta
import random , string
import json
import datetime
import requests
# import os
# initializing the App and database
app = Flask(__name__)
SESSION_TYPE = 'filesystem'
app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///store.db'
db = SQLAlchemy(app)
app.config.from_object(__name__)
Session(app)
#-------------------------------------------------
# model for storage of page transactions
class Transactions(db.Model):
timestamp = db.Column(db.String)
ip=db.Column(db.String)
tran_id = db.Column(db.String, primary_key=True)
u_id = db.Column(db.String)
article_id = db.Column(db.String)
position = db.Column(db.Integer)
time_before_click = db.Column(db.String)
time_on_page = db.Column(db.String)
sequence = db.Column(db.Integer)
class Users(db.Model):
timestamp = db.Column(db.String)
u_id = db.Column(db.String, primary_key=True)
age = db.Column(db.String)
gender = db.Column(db.String)
residence = db.Column(db.String)
edu_level = db.Column(db.String)
edu_stream = db.Column(db.String)
news_source = db.Column(db.String)
news_interest = db.Column(db.String)
#-------------------------------------------------
# function for generation of random string
def generate_random_string(stringLength=10):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
# to generate 6 news objects
def generate_news_objects():
news = []
choices = [0,0,0,1,1,1]
random.shuffle(choices)
for i in range(0,6):
if(choices[i] == 0) :
headline = json_data['articles'][i]['cb_headline']
article_id = str(i)+'0'
else:
headline = json_data['articles'][i]['ncb_headline']
article_id = str(i)+'1'
paragraphs = json_data['articles'][i]['paragraphs']
news.append({
'headline':headline,
'paragraphs':paragraphs,
'article_id':article_id
})
random.shuffle(news)
return news
# read data json file
with open('data.json') as file:
json_file = file.read()
json_data = json.loads(json_file)
#-------------------------------------------------
# PAGE 1
# app route : root
@app.route('/')
def index():
session['articles_visited'] = []
session['sequence'] = 0
session['u_id'] = generate_random_string(10)
return render_template('index.html')
# PAGE 2
# app route : launch
@app.route('/launch')
def launch():
session['news_objects'] = generate_news_objects()
return render_template('launch.html')
# PAGE 3
# app route : headlines
@app.route('/headlines')
def headlines():
news_objects = session.get('news_objects')
sequence = session.get('sequence')
h0 = news_objects[0]['headline']
h1 = news_objects[1]['headline']
h2 = news_objects[2]['headline']
h3 = news_objects[3]['headline']
h4 = news_objects[4]['headline']
h5 = news_objects[5]['headline']
return render_template('headlines.html', h0=h0, h1=h1, h2=h2, h3=h3, h4=h4, h5=h5, sequence=sequence)
# PAGE 4
# app route : article
@app.route('/article')
def article():
news_objects = session.get('news_objects')
# generate transaction id
session['transaction_id'] = generate_random_string(15)
# position of news link on web matrix
session['position'] = request.args.get('position')
# time spent on page before clicking on news link
session['time_spent'] = request.args.get('time_spent')
news_piece = news_objects[int(session.get('position'))]
session['article_id'] = news_piece['article_id']
headline = news_piece['headline']
paragraphs = news_piece['paragraphs']
# add article id to visited array, for recall test
session['articles_visited'].append(session.get('article_id'))
return render_template('article.html', headline=headline, paragraphs=paragraphs)
# PAGE 5
# app route : log_transactions
@app.route('/log_transaction')
def log_transaction():
u_id = session.get('u_id')
sequence = session.get('sequence')
position = session.get('position')
time_spent = session.get('time_spent')
article_id = session.get('article_id')
transaction_id = session.get('transaction_id')
session['sequence'] = sequence + 1
sequence = sequence = session.get('sequence')
ts = datetime.datetime.now().timestamp()
read_time = request.args.get('read_time')
ip = request.remote_addr
new_transaction = Transactions(timestamp=ts,ip=ip,tran_id=transaction_id,u_id=u_id,article_id=article_id,\
position=position,time_before_click=time_spent,time_on_page=read_time, sequence=sequence)
db.session.add(new_transaction)
db.session.commit()
if sequence == 3:
sequence = 0
# return redirect('/recall_test')
return redirect('/details')
else:
return redirect('/headlines')
# app route : end
@app.route('/end')
def end():
return render_template('end.html')
@app.route('/details')
def details():
return render_template('details.html')
# save demographic form data submission
@app.route('/form_data', methods=['GET', 'POST'])
def form_data():
u_id = session.get('u_id')
age = request.args.get('age')
gender = request.args.get('gender')
residence = request.args.get('residence')
edu_level = request.args.get('education_level')
edu_stream = request.args.get('education_stream')
news_source = request.args.get('newsSource')
news_interest = request.args.get('newsInterest')
ts = datetime.datetime.now().timestamp()
new_user = Users(timestamp=ts,u_id=u_id,age=age,gender=gender,residence=residence, edu_level=edu_level, edu_stream=edu_stream,news_source=news_source, news_interest=news_interest)
db.session.add(new_user)
db.session.commit()
return redirect('/end')
# ---------------------------------------
if __name__ == "__main__":
app.run(debug=True)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Scraping all the 10 qoutes here:http://quotes.toscrape.com/
# All the authors,tags and text
# follow pagination link with scarpy
import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
allowed_domains = ["toscrape.com"]
start_urls = ['http://quotes.toscrape.com']
def parse(self, response):
self.log('I just visited: ' + response.url)
for quote in response.css('div.quote'):
item = {
'author_name':quote.css('small.author::text').extract_first(),
'text':quote.css('span.text::text').extract_first(),
'tags':quote.css('a.tag::text').extract(),
}
yield item
#follow pagination link
next_page_url = response.css('li.next > a::attr(href)').extract_first()
if next_page_url:
next_page_url = response.urljoin(next_page_url)
yield scarpy.Request(url=next_page_url, callback=self.parse)
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
from fmow_helper import (
BASELINE_CATEGORIES, MIN_WIDTHS, WIDTHS, centrality, softmax, lerp, create_submission,
csv_parse, read_merged_Plog
)
BASELINE_CNN_NM = 'baseline/data/output/predictions/soft-predictions-cnn-no_metadata.txt'
BASELINE_CNN = 'baseline/data/output/predictions/soft-predictions-cnn-with_metadata.txt'
BASELINE_LSTM = 'baseline/data/output/predictions/soft-predictions-lstm-with_metadata.txt'
def P_baseline():
"""
Baseline predicted probabilities, ensembled from:
- CNN, no metadata
- CNN, with metadata
- LSTM, with metadata
"""
nP_nm_cnn = pd.read_csv(BASELINE_CNN_NM, names=BASELINE_CATEGORIES, index_col=0).sort_index()
nP_cnn = pd.read_csv(BASELINE_CNN, names=BASELINE_CATEGORIES, index_col=0).sort_index()
P_lstm = pd.read_csv(BASELINE_LSTM, names=BASELINE_CATEGORIES, index_col=0).sort_index()
P_cnn = nP_cnn.div(nP_cnn.sum(1).round(), 0)
P_nm_cnn = nP_nm_cnn.div(nP_nm_cnn.sum(1).round(), 0)
P_m_test = lerp(0.56, P_cnn, P_lstm)
P_test = lerp(0.07, P_m_test, P_nm_cnn)
return P_test
def P_no_baseline():
"""
Predicted probabilities before ensembling with baseline.
"""
test = csv_parse('working/metadata/boxes-test-rgb.csv')
Plog_test = read_merged_Plog()
Plog = Plog_test.groupby(test.ID).mean()
df = test.groupby('ID').first()
# The prediction above doesn't use any image metadata.
# We remedy that by applying basic priors about the dataset.
assert Plog.index.isin(df.index).all()
assert df.width_m.isin([500, 1500, 5000]).all()
Plog = Plog.apply(lambda ser:
ser.where(df.width_m >= MIN_WIDTHS[ser.name], -np.inf) - 1.2 * ~df.width_m.loc[ser.index].isin(WIDTHS[ser.name])
if ser.name!='false_detection' else ser)
df2 = df.loc[Plog.index]
r = centrality(df2)
Plog['false_detection'] += (.5 + .7 * (df2.width_m==500)) * (2. * (r>=.3) - .5) - 1
return softmax(Plog)
def P_ensemble():
"""
Predicted probabilities for each class.
"""
eps = 1e-6
Plog_mix = lerp(0.71, np.log(P_baseline()+eps), np.log(P_no_baseline()+eps))
Plog_mix['false_detection'] -= 0.43
P_mix = softmax(Plog_mix)
P_mix['flooded_road'] = lerp(0.4, P_mix['flooded_road']**.5, pd.read_csv(BASELINE_LSTM, names=BASELINE_CATEGORIES, index_col=0).sort_index()['flooded_road']**.5)**2
P_mix = P_mix.div(P_mix.sum(1), 0)
return P_mix
def submission():
"""
Returns a single prediction for each object.
"""
return create_submission(P_ensemble())
if __name__ == '__main__':
import sys
output_file, = sys.argv[1:]
submission().to_csv(output_file)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import signal
import argparse
import cgi
import BaseHTTPServer
import base64
import json
import errno
import requests
import threading
import psutil
import socket
import subprocess
import shlex
import time
import copy
import binascii
from Crypto.Hash import SHA256 as HashAlg
from Crypto.PublicKey import RSA as CryptoKey
from Crypto import Random
from Crypto.Signature import PKCS1_PSS as CryptoSigner
import logging
logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
log = logging.getLogger()
log.setLevel( logging.INFO )
import syndicate
import syndicate.ms.syntool as syntool
import syndicate.util.watchdog as watchdog
import syndicate.util.provisioning as provisioning
import syndicate.observer.cred as observer_cred
# watchdog names
SYNDICATE_UG_WATCHDOG_NAME = "syndicate-ug"
SYNDICATE_RG_WATCHDOG_NAME = "syndicate-rg"
SYNDICATE_AG_WATCHDOG_NAME = "syndicate-ag"
#-------------------------------
def make_UG_argv( program, syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, mountpoint, hostname=None, debug=False ):
# NOTE: run in foreground; watchdog handles the rest
hostname_str = ""
if hostname is not None:
hostname_str = "-H %s" % hostname
debug_str = ""
if debug:
debug_str = "-d2"
return "%s -f %s -m %s -u %s -v %s -g %s -K %s -P '%s' %s %s" % (program, debug_str, syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, hostname_str, mountpoint )
#-------------------------------
def make_RG_argv( program, syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, hostname=None, debug=False ):
hostname_str = ""
if hostname is not None:
hostname_str = "-H %s" % hostname
debug_str = ""
if debug:
debug_str = "-d2"
return "%s %s -m %s -u %s -v %s -g %s -K %s -P '%s' %s" % (program, debug_str, syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, hostname_str)
#-------------------------------
def start_UG( syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, mountpoint, uid_name=None, gid_name=None, hostname=None, debug=False ):
# generate the command, and pipe it over
# NOTE: do NOT execute the command directly! it contains sensitive information on argv,
# which should NOT become visible to other users via /proc
command_str = make_UG_argv( SYNDICATE_UG_WATCHDOG_NAME, syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, mountpoint, hostname=hostname, debug=debug )
log.info("Starting UG (%s)" % SYNDICATE_UG_WATCHDOG_NAME )
# start the watchdog
pid = watchdog.run( SYNDICATE_UG_WATCHDOG_NAME, [SYNDICATE_UG_WATCHDOG_NAME, '-v', volume_name, '-m', mountpoint], command_str, uid_name=uid_name, gid_name=gid_name )
if pid < 0:
log.error("Failed to make UG watchdog %s, rc = %s" % (SYNDICATE_UG_WATCHDOG_NAME, pid))
return pid
#-------------------------------
def start_RG( syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, uid_name=None, gid_name=None, hostname=None, debug=False ):
# generate the command, and pipe it over
# NOTE: do NOT execute the command directly! it contains sensitive information on argv,
# which should NOT become visible to other users via /proc
command_str = make_RG_argv( SYNDICATE_RG_WATCHDOG_NAME, syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, hostname=hostname, debug=debug )
log.info("Starting RG (%s)" % SYNDICATE_RG_WATCHDOG_NAME )
# start the watchdog
pid = watchdog.run( SYNDICATE_RG_WATCHDOG_NAME, [SYNDICATE_RG_WATCHDOG_NAME, '-R', '-v', volume_name], command_str, uid_name=uid_name, gid_name=gid_name )
if pid < 0:
log.error("Failed to make RG watchdog %s, rc = %s" % (SYNDICATE_RG_WATCHDOG_NAME, pid))
return pid
#-------------------------------
def stop_gateway_watchdog( pid ):
# stop a watchdog, given a PID.
# return 0 on success, -1 on error
# tell the watchdog to die, so it shuts down the UG
try:
os.kill( pid, signal.SIGTERM )
except OSError, oe:
if oe.errno != errno.ESRCH:
# NOT due to the process dying after we checked for it
log.exception(oe)
return -1
except Exception, e:
log.exception(e)
return -1
return 0
#-------------------------------
def stop_UG( volume_name, mountpoint=None ):
# stop a UG, given its mountpoint and volume name
# this method is idempotent
query_attrs = { "volume": volume_name }
if mountpoint is not None:
query_attrs["mountpoint"] = mountpoint
mounted_UGs = watchdog.find_by_attrs( SYNDICATE_UG_WATCHDOG_NAME, query_attrs )
if len(mounted_UGs) > 0:
for proc in mounted_UGs:
rc = stop_gateway_watchdog( proc.pid )
if rc != 0:
return rc
return 0
#-------------------------------
def stop_RG( volume_name ):
# stop an RG
running_RGs = watchdog.find_by_attrs( SYNDICATE_RG_WATCHDOG_NAME, {"volume": volume_name} )
if len(running_RGs) > 0:
for proc in running_RGs:
rc = stop_gateway_watchdog( proc.pid )
if rc != 0:
return rc
return 0
#-------------------------------
def ensure_UG_running( syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, mountpoint=None, check_only=False, uid_name=None, gid_name=None, hostname=None, debug=False ):
"""
Ensure that a User Gateway is running on a particular mountpoint.
Return 0 on success
Return negative on error.
"""
if mountpoint is None:
log.error("Missing mountpout. Pass mountpoint=...")
return -errno.EINVAL
# make sure a mountpoint exists
rc = ensure_UG_mountpoint_exists( mountpoint, uid_name=uid_name, gid_name=gid_name )
if rc != 0:
log.error("Failed to ensure mountpoint %s exists" % mountpoint)
return rc
# is there a UG running at this mountpoint?
mounted_UGs = watchdog.find_by_attrs( SYNDICATE_UG_WATCHDOG_NAME, {"volume": volume_name, "mountpoint": mountpoint} )
if len(mounted_UGs) == 1:
# we're good!
logging.info("UG for %s at %s already running; PID = %s" % (volume_name, mountpoint, mounted_UGs[0].pid))
return mounted_UGs[0].pid
elif len(mounted_UGs) > 1:
# too many! probably in the middle of starting up
logging.error("Multiple UGs running for %s on %s...?" % (volume_name, mountpoint))
return -errno.EAGAN
else:
logging.error("No UG running for %s on %s" % (volume_name, mountpoint))
if not check_only:
pid = start_UG( syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, mountpoint, uid_name=uid_name, gid_name=gid_name, hostname=hostname, debug=debug )
if pid < 0:
log.error("Failed to start UG in %s at %s, rc = %s" % (volume_name, mountpoint, pid))
return pid
else:
return 0
#-------------------------
def check_UG_mounted( mountpoint, fstype=None ):
"""
See if a UG is mounted, by walking /proc/mounts
"""
fd = None
mounts = None
try:
fd = open("/proc/mounts", "r")
mounts = fd.read()
fd.close()
except IOError, ie:
logging.error("Failed to read /proc/mounts, errno = %s" % ie.errno )
return -ie.errno
except OSError, oe:
logging.error("Failed to read /proc/mounts, errno = %s" % oe.errno )
return -oe.errno
finally:
if fd is not None:
fd.close()
fd = None
mount_lines = mounts.strip().split("\n")
for mount in mount_lines:
# format: FS MOUNTPOINT ...
mount_parts = mount.split()
mount_fstype = mount_parts[2]
mount_dir = mount_parts[1]
if mount_dir.rstrip("/") == mountpoint.rstrip("/"):
# something's mounted here...
if fstype is not None:
if fstype == mount_fstype:
return True
else:
# something else is mounted here
return False
else:
# we don't care about the fstype
return True
# nothing mounted here
return False
#-------------------------
def ensure_UG_not_mounted( mountpoint, UG_fstype=None ):
"""
Ensure that a directory does not have a UG running on it.
Return 0 on success, negative otherwise
"""
if not os.path.exists( mountpoint ):
return True
mounted = check_UG_mounted( mountpoint, fstype=UG_fstype )
if mounted:
# try unmounting
rc = subprocess.call(["/bin/fusermount", "-u", mountpoint], stderr=None )
if rc != 0:
# fusermount failed...
logging.error("Failed to unmount %s, fusermount exit status %s" % (mountpoint, rc))
return -errno.EPERM
else:
# verify unmounted
mounted = check_UG_mounted( mountpoint, fstype=UG_fstype )
if not mounted:
# failed to unmount
logging.error("Failed to unmount %s")
return -errno.EAGAIN
return 0
#-------------------------------
def ensure_UG_stopped( volume_name, mountpoint=None, UG_fstype=None ):
"""
Ensure a UG is no longer running.
"""
# stop the process
rc = stop_UG( volume_name, mountpoint=mountpoint )
if rc != 0:
log.error("Failed to stop UG in %s at %s, rc = %s" % (volume_name, mountpoint, rc))
if mountpoint is not None:
# ensure it's not mounted
rc = ensure_UG_not_mounted( mountpoint, UG_fstype=UG_fstype )
if rc != 0:
logging.error("Failed to ensure UG is not mounted on %s, rc = %s" % (mountpoint, rc))
return rc
# remove the directory
ensure_UG_mountpoint_absent( mountpoint )
return rc
#-------------------------------
def ensure_RG_running( syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, check_only=False, uid_name=None, gid_name=None, hostname=None, debug=False ):
"""
Ensure an RG is running. Return the PID on success.
"""
# is there an RG running for this volume?
running_RGs = watchdog.find_by_attrs( SYNDICATE_RG_WATCHDOG_NAME, {"volume": volume_name} )
if len(running_RGs) == 1:
# we're good!
logging.info("RG for %s already running; PID = %s" % (volume_name, running_RGs[0].pid))
return running_RGs[0].pid
elif len(running_RGs) > 1:
# too many! probably in the middle of starting up
logging.error("Multiple RGs running for %s...?" % (volume_name))
return -errno.EAGAIN
else:
logging.error("No RG running for %s" % (volume_name))
if not check_only:
pid = start_RG( syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, uid_name=uid_name, gid_name=gid_name, hostname=hostname, debug=debug )
if pid < 0:
log.error("Failed to start RG in %s, rc = %s" % (volume_name, pid))
return pid
else:
# not running
return -errno.ENOENT
#-------------------------------
def ensure_RG_stopped( volume_name ):
"""
Ensure that the RG is stopped.
"""
rc = stop_RG( volume_name )
if rc != 0:
log.error("Failed to stop RG in %s, rc = %s" % (volume_name, rc))
return rc
#-------------------------------
def ensure_AG_running( syndicate_url, principal_id, volume_name, gateway_name, key_password, user_pkey_pem, check_only=False, uid_name=None, gid_name=None, hostname=None, debug=False ):
# TODO
pass
#-------------------------------
def ensure_AG_stopped( volume_name ):
# TODO
pass
#-------------------------------
def make_UG_mountpoint_path( mountpoint_dir, volume_name ):
"""
Generate the path to a mountpoint.
"""
vol_dirname = volume_name.replace("/", ".")
vol_mountpoint = os.path.join( mountpoint_dir, vol_dirname )
return vol_mountpoint
#-------------------------------
def ensure_UG_mountpoint_exists( mountpoint, uid_name=None, gid_name=None ):
"""
Make a mountpoint (i.e. a directory)
"""
rc = 0
try:
os.makedirs( mountpoint, mode=0777 )
if uid_name is not None and gid_name is not None:
os.system("chown %s.%s %s" % (uid_name, gid_name, mountpoint))
return 0
except OSError, oe:
if oe.errno != errno.EEXIST:
return -oe.errno
else:
return 0
except Exception, e:
log.exception(e)
return -errno.EPERM
#-------------------------
def ensure_UG_mountpoint_absent( mountpoint ):
"""
Ensure that a mountpoint no longer exists
"""
try:
os.rmdir( mountpoint )
except OSError, oe:
if oe.errno != errno.ENOENT:
log.error("Failed to remove unused mountpoint %s, errno = %s" % (mountpoint, oe.errno))
except IOError, ie:
if ie.errno != errno.ENOENT:
log.error("Failed to remove unused mountpoint %s, errno = %s" % (mountpoint, ie.errno))
#-------------------------
def list_running_gateways_by_volume():
"""
Find the set of running gateways, grouped by volume.
return a dictionary with the structure of:
{ volume_name : { gateway_type: { "pids": [gateway_pid] } } }
"""
watchdog_names = {
"UG": SYNDICATE_UG_WATCHDOG_NAME,
"RG": SYNDICATE_RG_WATCHDOG_NAME,
"AG": SYNDICATE_AG_WATCHDOG_NAME
}
watchdog_name_to_type = dict( [(v, k) for (k, v) in watchdog_names.items()] )
ret = {}
for gateway_type in ["UG", "RG", "AG"]:
watchdog_name = watchdog_names[ gateway_type ]
running_watchdog_procs = watchdog.find_by_attrs( watchdog_name, {} )
# from these, find out which volumes
for running_watchdog_proc in running_watchdog_procs:
cmdline = watchdog.get_proc_cmdline( running_watchdog_proc )[0]
watchdog_attrs = watchdog.parse_proc_attrs( cmdline )
# find the volume name
volume_name = watchdog_attrs.get("volume", None)
if volume_name is None:
# nothing to do
continue
if not ret.has_key( volume_name ):
# add volume record
ret[volume_name] = {}
if not ret[volume_name].has_key( gateway_type ):
# add gateway record
ret[volume_name][gateway_type] = {}
if not ret[volume_name][gateway_type].has_key( "pids" ):
# add pids list
ret[volume_name][gateway_type][pids] = []
ret[volume_name][gateway_type]["pids"].append( running_watchdog_proc.pid )
return ret
#-------------------------
def gateway_directives_from_volume_info( volume_info, local_hostname, slice_secret ):
"""
Extract gateway directives from an observer's description of the volume for this host.
"""
gateway_directives = {
"UG": {},
"RG": {},
"AG": {}
}
volume_name = volume_info[ observer_cred.OPENCLOUD_VOLUME_NAME ]
gateway_name_prefix = volume_info[ observer_cred.OPENCLOUD_SLICE_GATEWAY_NAME_PREFIX ]
# get what we need...
try:
RG_hostname = local_hostname
AG_hostname = local_hostname
# global hostnames (i.e. multiple instantiations of the same gateway) override local hostnames.
if volume_info[ observer_cred.OPENCLOUD_SLICE_AG_GLOBAL_HOSTNAME ] is not None:
AG_hostname = volume_info[ observer_cred.OPENCLOUD_SLICE_AG_GLOBAL_HOSTNAME ]
if volume_info[ observer_cred.OPENCLOUD_SLICE_RG_GLOBAL_HOSTNAME ] is not None:
RG_hostname = volume_info[ observer_cred.OPENCLOUD_SLICE_RG_GLOBAL_HOSTNAME ]
gateway_directives["UG"]["instantiate"] = volume_info[ observer_cred.OPENCLOUD_SLICE_INSTANTIATE_UG ]
gateway_directives["UG"]["run"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RUN_UG ]
gateway_directives["UG"]["port"] = volume_info[ observer_cred.OPENCLOUD_SLICE_UG_PORT ]
gateway_directives["UG"]["closure"] = volume_info[ observer_cred.OPENCLOUD_SLICE_UG_CLOSURE ]
gateway_directives["UG"]["name"] = provisioning.make_gateway_name( gateway_name_prefix, "UG", volume_name, local_hostname )
gateway_directives["UG"]["key_password"] = provisioning.make_gateway_private_key_password( gateway_directives["UG"]["name"], slice_secret )
gateway_directives["UG"]["hostname"] = local_hostname
gateway_directives["RG"]["instantiate"] = volume_info[ observer_cred.OPENCLOUD_SLICE_INSTANTIATE_RG ]
gateway_directives["RG"]["run"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RUN_RG ]
gateway_directives["RG"]["port"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RG_PORT ]
gateway_directives["RG"]["closure"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RG_CLOSURE ]
gateway_directives["RG"]["name"] = provisioning.make_gateway_name( gateway_name_prefix, "RG", volume_name, RG_hostname )
gateway_directives["RG"]["key_password"] = provisioning.make_gateway_private_key_password( gateway_directives["RG"]["name"], slice_secret )
gateway_directives["RG"]["hostname"] = RG_hostname
gateway_directives["AG"]["instantiate"] = volume_info[ observer_cred.OPENCLOUD_SLICE_INSTANTIATE_AG ]
gateway_directives["AG"]["run"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RUN_AG ]
gateway_directives["AG"]["port"] = volume_info[ observer_cred.OPENCLOUD_SLICE_AG_PORT ]
gateway_directives["AG"]["closure"] = volume_info[ observer_cred.OPENCLOUD_SLICE_AG_CLOSURE ]
gateway_directives["AG"]["name"] = provisioning.make_gateway_name( gateway_name_prefix, "AG", volume_name, AG_hostname )
gateway_directives["AG"]["key_password"] = provisioning.make_gateway_private_key_password( gateway_directives["AG"]["name"], slice_secret )
gateway_directives["AG"]["hostname"] = AG_hostname
except Exception, e:
log.exception(e)
log.error("Invalid configuration for Volume %s" % volume_name)
return None
return gateway_directives
#-------------------------
def apply_instantion_and_runchange( gateway_directives, inst_funcs, runchange_funcs ):
"""
Apply instantiation and runchage functions over gateways, based on observer directives.
inst_funcs must be a dict of {"gateway_type" : callable(bool)} that changes the instantiation of the gateway.
runchage_funcs must be a dict of {"gateway_type" : callable(bool)} that changes the running status of a gateway.
"""
# run alloc functions
for gateway_type in ["UG", "RG", "AG"]:
try:
gateway_name = gateway_directives[ gateway_type ][ "name" ]
instantiation_status = gateway_directives[ gateway_type ][ "instantiate" ]
rc = inst_funcs[ gateway_type ]( instantiation_status )
assert rc is not None, "Failed to set instantiation = %s for %s %s with %s, rc = %s" % (instantiation_status, gateway_type, gateway_name, inst_funcs[ gateway_type ], rc )
except Exception, e:
log.exception(e)
return -errno.EPERM
# run runchange funcs
for gateway_type in ["UG", "RG", "AG"]:
try:
gateway_name = gateway_directives[ gateway_type ][ "name" ]
run_status = gateway_directives[ gateway_type ][ "run" ]
rc = runchange_funcs[ gateway_type ]( run_status )
assert rc == 0, "Failed to set running = %s for %s %s with %s, rc = %s" % (run_status, gateway_type, gateway_name, runchange_funcs[ gateway_type ], rc)
except Exception, e:
log.exception(e)
return -errno.EPERM
return 0
#-------------------------
def start_stop_volume( config, volume_info, slice_secret, client=None, hostname=None, gateway_uid_name=None, gateway_gid_name=None, debug=False ):
"""
Ensure that the instantiation and run status of the gateways for a volume match what the observer thinks it is.
This method is idempotent.
"""
volume_name = volume_info[ observer_cred.OPENCLOUD_VOLUME_NAME ]
# get what we need...
try:
syndicate_url = volume_info[ observer_cred.OPENCLOUD_SYNDICATE_URL ]
principal_id = volume_info[ observer_cred.OPENCLOUD_VOLUME_OWNER_ID ]
principal_pkey_pem = volume_info[ observer_cred.OPENCLOUD_PRINCIPAL_PKEY_PEM ]
except:
log.error("Invalid configuration for Volume %s" % volume_name)
return -errno.EINVAL
if client is None:
# connect to syndicate
client = syntool.Client( principal_id, syndicate_url, user_pkey_pem=principal_pkey_pem, debug=config['debug'] )
mountpoint_dir = config['mountpoint_dir']
UG_mountpoint_path = make_UG_mountpoint_path( mountpoint_dir, volume_name )
volume_name = volume_info[ observer_cred.OPENCLOUD_VOLUME_NAME ]
if hostname is None:
hostname = socket.gethostname()
# build up the set of directives
gateway_directives = gateway_directives_from_volume_info( volume_info, hostname, slice_secret )
rc = apply_gateway_directives( client, syndicate_url, principal_id, principal_pkey_pem, volume_name, gateway_directives, UG_mountpoint_path,
gateway_uid_name=gateway_uid_name, gateway_gid_name=gateway_gid_name, debug=debug )
if rc != 0:
log.error("Failed to apply gateway directives to synchronize %s, rc = %s" % (volume_name, rc))
return rc
#-------------------------
def apply_gateway_directives( client, syndicate_url, principal_id, principal_pkey_pem, volume_name, gateway_directives, UG_mountpoint_path,
gateway_uid_name=None, gateway_gid_name=None, debug=False ):
"""
Apply the set of gateway directives.
"""
# functions that instantiate gateways.
# NOTE: they all take the same arguments, so what we're about to do is totally valid
inst_funcs_to_type = {
"UG": provisioning.ensure_UG_exists,
"RG": provisioning.ensure_RG_exists,
"AG": provisioning.ensure_AG_exists
}
# inner function for instantiaing a gateway
def _gateway_inst_func( gateway_type, should_instantiate ):
log.info("Switch %s for %s to instantiation '%s'" % (gateway_type, volume_name, should_instantiate))
if should_instantiate == True:
new_gateway = inst_funcs_to_type[gateway_type]( client,
principal_id,
volume_name,
gateway_directives[gateway_type]["name"],
gateway_directives[gateway_type]["hostname"],
gateway_directives[gateway_type]["port"],
gateway_directives[gateway_type]["key_password"] )
if new_gateway is not None:
return 0
else:
return -errno.EPERM
elif should_instantiate == False:
rc = provisioning.ensure_gateway_absent( client, gateway_directives[gateway_type]["name"] )
if rc == True:
return 0
else:
return -errno.EPERM
else:
return 0
# construct partially-evaluated instantiation functions
inst_funcs = {
"UG": lambda should_instantiate: _gateway_inst_func( "UG", should_instantiate ),
"RG": lambda should_instantiate: _gateway_inst_func( "RG", should_instantiate ),
"AG": lambda should_instantiate: _gateway_inst_func( "AG", should_instantiate )
}
# inner function for ensuring a UG is running
def _runchange_UG( should_run ):
log.info("Switch UG for %s to run status '%s'" % (volume_name, should_run))
if should_run == True:
rc = ensure_UG_running( syndicate_url,
principal_id,
volume_name,
gateway_directives["UG"]["name"],
gateway_directives["UG"]["key_password"],
principal_pkey_pem,
mountpoint=UG_mountpoint_path,
check_only=False,
uid_name=gateway_uid_name,
gid_name=gateway_gid_name,
hostname=gateway_directives['UG']['hostname'],
debug=debug )
if rc < 0:
return rc
else:
return 0
elif should_run == False:
return ensure_UG_stopped( volume_name, mountpoint=UG_mountpoint_path )
else:
return 0
# inner function for ensuring an RG is running
def _runchange_RG( should_run ):
log.info("Switch RG for %s to run status '%s'" % (volume_name, should_run))
if should_run == True:
rc = ensure_RG_running( syndicate_url,
principal_id,
volume_name,
gateway_directives["RG"]["name"],
gateway_directives["RG"]["key_password"],
principal_pkey_pem,
check_only=False,
uid_name=gateway_uid_name,
gid_name=gateway_gid_name,
hostname=gateway_directives['RG']['hostname'],
debug=debug )
if rc < 0:
return rc
else:
return 0
elif should_run == False:
return ensure_RG_stopped( volume_name )
else:
return 0
# inner function for ensuring an RG is running
def _runchange_AG( should_run ):
log.info("Switch RG for %s to run status '%s'" % (volume_name, should_run))
if should_run == True:
rc = ensure_AG_running( syndicate_url,
principal_id,
volume_name,
gateway_directives["AG"]["name"],
gateway_directives["AG"]["key_password"],
principal_pkey_pem,
check_only=False,
uid_name=gateway_uid_name,
gid_name=gateway_gid_name,
hostname=gateway_directives['AG']['hostname'],
debug=debug )
if rc < 0:
return rc
else:
return 0
elif should_run == False:
return ensure_AG_stopped( volume_name )
else:
return 0
# functions that start gateways
runchange_funcs = {
"UG": lambda should_run: _runchange_UG( should_run ),
"RG": lambda should_run: _runchange_RG( should_run ),
"AG": lambda should_run: _runchange_AG( should_run )
}
rc = apply_instantion_and_runchange( gateway_directives, inst_funcs, runchange_funcs )
if rc != 0:
log.error("Failed to alter gateway status for volume %s, rc = %s" % (volume_name, rc) )
return rc
#-------------------------
def start_stop_all_volumes( config, volume_info_list, slice_secret, hostname=None, ignored=[], gateway_uid_name=None, gateway_gid_name=None, debug=False ):
"""
Synchronize the states of all volumes on this host, stopping any volumes that are no longer attached.
"""
success_volumes = []
failed_volumes = []
# methods that stop gateways, and take the volume name as their only argument
stoppers = {
"UG": ensure_UG_stopped, # NOTE: mountpoint can be ignored if we only care about the volume
"RG": ensure_RG_stopped,
"AG": ensure_AG_stopped
}
for volume_info in volume_info_list:
volume_name = volume_info[ observer_cred.OPENCLOUD_VOLUME_NAME ]
# get what we need...
try:
syndicate_url = volume_info[ observer_cred.OPENCLOUD_SYNDICATE_URL ]
principal_id = volume_info[ observer_cred.OPENCLOUD_VOLUME_OWNER_ID ]
principal_pkey_pem = volume_info[ observer_cred.OPENCLOUD_PRINCIPAL_PKEY_PEM ]
except:
log.error("Invalid configuration for Volume %s" % volume_name)
continue
# connect to syndicate
client = syntool.Client( principal_id, syndicate_url, user_pkey_pem=principal_pkey_pem, debug=config['debug'] )
log.info("Sync volume %s" % volume_name )
rc = start_stop_volume( config, volume_info, slice_secret, client=client, hostname=hostname, gateway_uid_name=gateway_uid_name, gateway_gid_name=gateway_gid_name, debug=debug )
if rc == 0:
log.info("Successfully sync'ed %s" % volume_name )
success_volumes.append( volume_name )
else:
log.error("Failed to sync volume %s, rc = %s" % (volume_name, rc))
failed_volumes.append( volume_name )
# find the running gateways
running_gateways = list_running_gateways_by_volume()
for volume_name, gateway_info in running_gateways.items():
# this volume isn't present, and we're not ignoring it?
if volume_name not in success_volumes and volume_name not in failed_volumes and volume_name not in ignored:
# volume isn't attached...killall of its gateways
for gateway_type in ["UG", "RG", "AG"]:
rc = stoppers[gateway_type]( volume_name )
if rc != 0:
log.error("Failed to stop %s for %s, rc = %s" % (gateway_type, volume_name, rc))
failed_volumes.append( volume_name )
if len(failed_volumes) != 0:
return -errno.EAGAIN
else:
return 0
|
nilq/baby-python
|
python
|
import os
from setuptools import find_packages, setup
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with open(filename, encoding="utf-8") as fp:
return fp.read()
setup(
name="django-formtools",
use_scm_version={"version_scheme": "post-release", "local_scheme": "dirty-tag"},
setup_requires=["setuptools_scm"],
url="https://django-formtools.readthedocs.io/en/latest/",
license="BSD",
description="A set of high-level abstractions for Django forms",
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
author="Django Software Foundation",
author_email="foundation@djangoproject.com",
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
install_requires=["Django>=2.2"],
python_requires=">=3.6",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.1",
"Framework :: Django :: 3.2",
"Framework :: Django :: 4.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Internet :: WWW/HTTP",
],
zip_safe=False,
)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import cStringIO as StringIO
from fnmatch import fnmatch
import difflib
import os
import sys
def get_name(filename):
return os.path.splitext(filename)[0]
def list_dir(dir_path, filter_func):
return sorted(filter(filter_func, os.listdir(dir_path)), key=get_name)
def main():
test_dir = os.path.dirname(os.path.realpath(__file__))
testcase_dir = os.path.join(test_dir, 'testcases')
testcase_file = os.path.join(test_dir, 'testcases.js')
def is_testcase_file(filename):
return (
fnmatch(filename, '*.html') and
not fnmatch(filename, 'manual-test*') and
not fnmatch(filename, 'disabled-*'))
new_testcases = StringIO.StringIO()
new_testcases.write("""\
// This file is automatically generated by test/update-testcases.py.
// Disable tests by adding them to test/disabled-testcases
""")
new_testcases.write('var tests = [\n \'')
new_testcases.write(
'\',\n \''.join(list_dir(testcase_dir, is_testcase_file)))
new_testcases.write('\',\n];\n')
new_testcases.seek(0)
new_testcases_lines = new_testcases.readlines()
current_testcases_lines = file(testcase_file).readlines()
lines = list(difflib.unified_diff(
current_testcases_lines, new_testcases_lines,
fromfile=testcase_file, tofile=testcase_file))
if len(lines) == 0:
sys.stdout.write('Nothing to do\n')
sys.exit(0)
if not '--dry-run' in sys.argv:
file(testcase_file, 'w').write(''.join(new_testcases_lines))
sys.stdout.write(
'Updating %s with the following diff.\n' % testcase_file)
for line in lines:
sys.stdout.write(line)
sys.exit(1)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from ..utils import get_offset, verify_series
def ohlc4(open_, high, low, close, offset=None, **kwargs):
"""Indicator: OHLC4"""
# Validate Arguments
open_ = verify_series(open_)
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
offset = get_offset(offset)
# Calculate Result
ohlc4 = 0.25 * (open_ + high + low + close)
# Offset
if offset != 0:
ohlc4 = ohlc4.shift(offset)
# Name & Category
ohlc4.name = "OHLC4"
ohlc4.category = 'overlap'
return ohlc4
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2021 Incisive Technology Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
DO NOT EDIT THIS FILE!
This module is automatically generated using the hikaru.build program that turns
a Kubernetes swagger spec into the code for the hikaru.model module.
"""
from hikaru.meta import HikaruBase, HikaruDocumentBase
from typing import Optional, List, Dict
from dataclasses import dataclass, field
@dataclass
class RawExtension(HikaruBase):
"""
RawExtension is used to hold extensions in external versions. To use this, make a
field which has RawExtension as its type in your external, versioned struct, and
Object in your internal struct. You also need to register your various plugin types.
// Internal package: type MyAPIObject struct { runtime.TypeMeta `json:",inline"`
MyPlugin runtime.Object `json:"myPlugin"` } type PluginA struct { AOption string
`json:"aOption"` } // External package: type MyAPIObject struct { runtime.TypeMeta
`json:",inline"` MyPlugin runtime.RawExtension `json:"myPlugin"` } type PluginA struct
{ AOption string `json:"aOption"` } // On the wire, the JSON will look something like
this: { "kind":"MyAPIObject", "apiVersion":"v1", "myPlugin": { "kind":"PluginA",
"aOption":"foo", }, } So what happens? Decode first uses json or yaml to unmarshal the
serialized data into your external MyAPIObject. That causes the raw JSON to be stored,
but not unpacked. The next step is to copy (using pkg/conversion) into the internal
struct. The runtime package's DefaultScheme has conversion functions installed which
will unpack the JSON stored in RawExtension, turning it into the correct object type,
and storing it in the Object. (TODO: In the case where the object is of an unknown
type, a runtime.Unknown object will be created and stored.)
Full name: io.k8s.apimachinery.pkg.runtime.RawExtension
Attributes:
"""
class IntOrString(str):
"""
IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML
marshalling and unmarshalling, it produces or consumes the inner type. This allows you
to have, for example, a JSON field that can accept a name or number.
Full name: io.k8s.apimachinery.pkg.util.intstr.IntOrString
"""
class Quantity(str):
"""
Quantity is a fixed-point representation of a number. It provides convenient
marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64()
accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note
that <suffix> may be empty, from the "" case in <decimalSI>.) <digit> ::= 0 | 1 | ...
| 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> |
<digits>. | .<digits> <sign> ::= "+" | "-" <signedNumber> ::= <number> |
<sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI>
::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See:
http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | "" | k | M | G | T
| P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
<decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber> No matter which of the
three exponent forms is used, no quantity may represent a number greater than 2^63-1
in magnitude, nor may it have more than 3 decimal places. Numbers larger or more
precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be
extended in the future if we require larger or smaller quantities. When a Quantity is
parsed from a string, it will remember the type of suffix it had, and will use the
same type again when it is serialized. Before serializing, Quantity will be put in
"canonical form". This means that Exponent/suffix will be adjusted up or down (with a
corresponding increase or decrease in Mantissa) such that: a. No precision is lost b.
No fractional digits will be emitted c. The exponent (or suffix) is as large as
possible. The sign will be omitted unless the number is negative. Examples: 1.5 will
be serialized as "1500m" 1.5Gi will be serialized as "1536Mi" Note that the quantity
will NEVER be internally represented by a floating point number. That is the whole
point of this exercise. Non-canonical values will still parse as long as they are well
formed, but will be re-emitted in their canonical form. (So always use canonical form,
or don't diff.) This format is intended to make it difficult to use these numbers
without writing some sort of special handling code in the hopes that that will cause
implementors to also use a fixed point implementation.
Full name: io.k8s.apimachinery.pkg.api.resource.Quantity
"""
@dataclass
class Info(HikaruBase):
"""
Info contains versioning information. how we'll want to distribute that information.
Full name: io.k8s.apimachinery.pkg.version.Info
Attributes:
buildDate:
compiler:
gitCommit:
gitTreeState:
gitVersion:
goVersion:
major:
minor:
platform:
"""
buildDate: str
compiler: str
gitCommit: str
gitTreeState: str
gitVersion: str
goVersion: str
major: str
minor: str
platform: str
globs = dict(globals())
__all__ = [c.__name__ for c in globs.values()
if type(c) == type]
del globs
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2.7
import socket
import sys
import os
import json
import time
import serial
import availablePorts
import argparse
DATA_AMOUNT = 1024
MAXLINE = 40
def getArgs():
parser = argparse.ArgumentParser(prog=sys.argv[0])
parser.add_argument('-p','--port',type=int,default=10000,dest='port',help="the socket port, defaults to 10000")
parser.add_argument('serial_port',default=None,nargs='?',help="the serial port, e.g., '/dev/tty.wchusbserial1410'")
return vars(parser.parse_args())
def sendBytes(ser, bytesToSend):
try:
ser.write(bytesToSend)
response = ""
while True:
response += ser.read(10).decode('utf-8')
#print("resp:"+response)
if len(response) > 0 and response[-1] == '\4':
response = response[:-1] # remove 0x04
print("response:"+response)
break
time.sleep(0.1)
except KeyboardInterrupt:
pass
except Exception as ex:
print("Exception in sendBytes.")
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
return response
def moveCursor(ser, horizontal, vertical):
print("Moving cursor %d microspaces horizontally and %d microspaces vertically" % (horizontal, vertical))
# The horizontal and vertical microspaces are capped at +-32767
# If either value is negative, we will convert it to two's complement
# which will be easy to read on the Arduino
#
# We will convert each value to a 2-byte value in little endian
# format to transfer
if horizontal < 0:
horizontal += 65535 + 1 # two's complement conversion
hb0 = horizontal & 0xff # little byte
hb1 = (horizontal >> 8) & 0xff # big byte
if vertical < 0:
vertical += 65535 + 1 # two's complement conversion
vb0 = vertical & 0xff # little byte
vb1 = (vertical >> 8) & 0xff # big byte
bytesToSend = chr(0x05) + chr(hb0) + chr(hb1) + chr(vb0) + chr(vb1)
response = sendBytes(ser, bytesToSend)
return response
def resetTypewriter(ser):
print("Resetting typewriter...")
bytesToSend = chr(0x04)
response = sendBytes(ser, bytesToSend)
#response = "Typewriter reset."
print(response)
return response
def returnCursor(ser,vertical):
print("Returning cursor...")
if vertical < 0:
vertical += 65535 + 1 # two's complement conversion
vb0 = vertical & 0xff # little byte
vb1 = (vertical >> 8) & 0xff # big byte
bytesToSend = chr(0x06) + chr(vb0) + chr(vb1)
response = sendBytes(ser, bytesToSend)
#response="Returned cursor to beginning of line."
print(response)
return response
def getMicrospaces(ser):
print("Getting microspace count...")
bytesToSend = chr(0x08)
response = sendBytes(ser, bytesToSend)
#response="Returned cursor to beginning of line."
print(response)
return response
def sendCharacters(ser, stringToPrint, spacing):
print('Sending "%s" with spacing %d...' % (stringToPrint,spacing))
# get the text length
textLen = len(stringToPrint)
# first two bytes are the file length (max: 65K)
# sent in little-endian format
stringHeader = chr(0x00) + chr(textLen & 0xff) + chr(textLen >> 8) + chr(spacing)
try:
# read MAXLINE characters at a time and send
while len(stringToPrint) > 0:
chars = stringToPrint[:MAXLINE]
stringToPrint = stringToPrint[MAXLINE:]
if chars == '':
break
ser.write(bytearray(stringHeader + chars,'utf-8'))
stringHeader = '' # not needed any more
if len(stringToPrint) > 0:
#print("sleeping")
#print("to print: " + stringToPrint)
time.sleep(3) # wait for characters to print
#sys.stdout.write(chars)
#sys.stdout.flush()
response = ""
while True:
response += ser.read(10).decode('utf-8')
#print("resp:"+response)
if len(response) > 0 and response[-1] == '\4':
response = response[:-1] # remove '\4'
break
time.sleep(0.1)
except KeyboardInterrupt:
pass
print("response: ")
print(response)
return response
def runServer(ser,port):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('localhost', port)
print('starting up on %s port %s' % server_address)
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print('Ready to receive commands!')
print('Waiting for a connection')
connection, client_address = sock.accept()
fullData = ''
try:
print('connection from %s port %s' % client_address)
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(DATA_AMOUNT)
if data:
# print('received "%s"' % data)
fullData += data
else:
print('no more data from %s port %s' % client_address)
args = json.loads(fullData)
if args['command'] == 'movecursor':
reply = moveCursor(ser, args['horizontal'],args['vertical'])
elif args['command'] == 'reset':
reply = resetTypewriter(ser)
elif args['command'] == 'return':
reply = returnCursor(ser,args['vertical'])
elif args['command'] == 'characters':
st = args['string_to_print']
if len(st) > 0:
reply = sendCharacters(ser, st,args['spacing'])
else:
reply = "Empty string, no characters sent."
elif args['command'] == 'microspaces':
reply = getMicrospaces(ser)
else:
reply = "not a known command"
connection.sendall(reply)
# print('sending "%s" to typewriter' % args)
connection.sendall('\0')
break
except Exception as ex:
print("Exception in runServer.")
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
finally:
# Clean up the connection
connection.close()
print
def setupSerial(portChoice):
print("Setting up...")
# if HARDCODED_PORT is '', then the user will get a choice
#HARDCODED_PORT = '/dev/tty.wchusbserial1410'
HARDCODED_PORT = ''
# choose port
if portChoice == None:
portChoiceInt = 0
if HARDCODED_PORT == '':
ports = availablePorts.serial_ports()
if len(ports) == 1:
# just choose the first
print("Choosing: " + ports[0])
portChoice = ports[0]
else:
if portChoiceInt == 0:
print("Please choose a port:")
for idx,p in enumerate(ports):
print("\t"+str(idx+1)+") "+p)
portChoiceInt = int(input())
portChoice = ports[portChoiceInt-1]
else:
portChoice = HARDCODED_PORT
# set up serial port
ser = serial.Serial(portChoice, 115200, timeout=0.1)
# wait a bit
time.sleep(2)
return ser
if __name__ == '__main__':
args = getArgs()
try:
ser = setupSerial(args['serial_port'])
runServer(ser,args['port'])
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
finally:
print("Closing serial connection.")
ser.close()
|
nilq/baby-python
|
python
|
# coding: utf-8
# In[1]:
import netCDF4
# In[2]:
#url = 'http://52.70.199.67:8080/opendap/ugrids/RENCI/maxele.63.nc'
url = 'http://ingria.coas.oregonstate.edu/opendap/ACTZ/ocean_his_3990_04-Dec-2015.nc'
# In[3]:
nc = netCDF4.Dataset(url)
# In[4]:
nc.variables.keys()
# In[5]:
nc.variables['lat_rho']
# In[6]:
nc.variables['lat_rho'][:5,:5]
# In[ ]:
|
nilq/baby-python
|
python
|
from django.db import models
from django.conf import settings
from mainapp.models import Product
class Order(models.Model):
FORMING = 'FM'
SENT_TO_PROCEED = 'STP'
PROCEEDED = 'PRD'
PAID = 'PD'
READY = 'RDY'
CANCEL = 'CNC'
ORDER_STATUS_CHOICES = (
(FORMING, 'формируется'),
(SENT_TO_PROCEED, 'отправлен в обработку'),
(PAID, 'оплачен'),
(PROCEEDED, 'обрабатывается'),
(READY, 'готов к выдаче'),
(CANCEL, 'отменен'),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
created = models.DateTimeField(verbose_name='создан', auto_now_add=True)
updated = models.DateTimeField(verbose_name='обновлен', auto_now=True)
status = models.CharField(verbose_name='статус',
max_length=3,
choices=ORDER_STATUS_CHOICES,
default=FORMING)
is_active = models.BooleanField(verbose_name='активен', default=True)
class Meta:
ordering = ('-created',)
verbose_name = 'заказ'
verbose_name_plural = 'заказы'
def __str__(self):
return 'Текущий заказ: {}'.format(self.id)
# def get_total_quantity(self):
# items = self.orderitems.select_related()
# return sum(list(map(lambda x: x.quantity, items)))
def get_product_type_quantity(self):
items = self.orderitems.select_related()
return len(items)
# def get_total_cost(self):
# items = self.orderitems.select_related()
# return sum(list(map(lambda x: x.quantity * x.product.price, items)))
def get_summary(self):
items = self.orderitems.select_related()
return {
'total_cost': sum(list(map(lambda x: x.quantity * x.product.price,
items))),
'total_quantity': sum(list(map(lambda x: x.quantity, items)))
}
# переопределяем метод, удаляющий объект
def delete(self):
for item in self.orderitems.select_related():
item.product.quantity += item.quantity
item.product.save()
self.is_active = False
self.save()
class OrderItemQuerySet(models.QuerySet):
def delete(self, *args, **kwargs):
for object in self:
object.product.quantity += object.quantity
object.product.save()
super(OrderItemQuerySet, self).delete(*args, **kwargs)
class OrderItem(models.Model):
objects = OrderItemQuerySet.as_manager()
order = models.ForeignKey(Order,
related_name="orderitems",
on_delete=models.CASCADE)
product = models.ForeignKey(Product,
verbose_name='продукт',
on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(verbose_name='количество',
default=0)
def get_product_cost(self):
return self.product.price * self.quantity
|
nilq/baby-python
|
python
|
import gluonts.mx.model.predictor as pred
from kensu.gluonts.ksu_utils.dataset_helpers import make_dataset_reliable
from kensu.utils.helpers import eventually_report_in_mem
from gluonts.dataset.common import ListDataset
from kensu.utils.kensu_provider import KensuProvider
from kensu.gluonts.model.forecast import SampleForecast
class RepresentableBlockPredictor(pred.RepresentableBlockPredictor):
def predict(self, Y, *args, **kwargs):
Y, old_Field, dep_fields = make_dataset_reliable(Y)
original_result = list(super(RepresentableBlockPredictor, self).predict(dataset=Y, *args, **kwargs))
if isinstance(Y, ListDataset):
Y.list_data = old_Field
deps = []
kensu = KensuProvider().instance()
for element in dep_fields:
orig_ds = eventually_report_in_mem(
kensu.extractors.extract_data_source(element, kensu.default_physical_location_ref,
logical_naming=kensu.logical_naming))
orig_sc = eventually_report_in_mem(kensu.extractors.extract_schema(orig_ds, element))
deps.append(orig_sc)
def e(iterable):
for b in iterable:
b.__class__ = SampleForecast
b.dependencies = deps
yield b
result = e(original_result)
return result
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import print_function
from select import select
import termios
import os
import sys
import optparse
import subprocess
import random
import time
#import cv2
import curses
#from awscli.customizations.emr.constants import TRUE
from keras.optimizers import RMSprop, Adam
from keras.layers.recurrent import LSTM
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv2D, Flatten
from keras.callbacks import TensorBoard
#import readscreen3
import numpy as np
import pandas as pd
import datetime
from time import time
import matplotlib.pyplot as plt
from operator import add
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
def get_options():
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=False, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
def constrained_sum_sample_pos(n, total):
"""Return a randomly chosen list of n positive integers summing to total.
Each such list is equally likely to occur."""
dividers = sorted(random.sample(range(1, total), n - 1))
return [a - b for a, b in zip(dividers + [total], [0] + dividers)]
def generate_routefile_random(episode_length, total_vehicles):
N_ROADS = 4
division = constrained_sum_sample_pos(N_ROADS, total_vehicles)
traffic = []
for i in np.arange(len(division)):
traffic.append(division[i] * 0.6)
traffic.append(division[i] * 0.2)
traffic.append(division[i] * 0.2)
with open("data/cross.rou.xml", "w") as routes:
print("""<routes>
<route id="r0" edges="51o 1i 2o 52i"/>
<route id="r1" edges="51o 1i 4o 54i"/>
<route id="r2" edges="51o 1i 3o 53i"/>
<route id="r3" edges="54o 4i 3o 53i"/>
<route id="r4" edges="54o 4i 1o 51i"/>
<route id="r5" edges="54o 4i 2o 52i"/>
<route id="r6" edges="52o 2i 1o 51i"/>
<route id="r7" edges="52o 2i 4o 54i"/>
<route id="r8" edges="52o 2i 3o 53i"/>
<route id="r9" edges="53o 3i 4o 54i"/>
<route id="r10" edges="53o 3i 1o 51i"/>
<route id="r11" edges="53o 3i 2o 52i"/>""", file=routes)
for i in np.arange(len(traffic)):
print(
'<flow id="mixed%i" begin="0" end="%i" number="%i" route="r%i" type="mixed" departLane="random" departPosLat="random"/>' % (
i, episode_length, traffic[i], i), file = routes)
print("</routes>", file=routes)
print('TRAFFIC CONFIGURATION - ')
for i in np.arange(len(traffic)):
print('Lane %i - %i' % (i+1, traffic[i]))
# The program looks like this
# <tlLogic id="0" type="static" programID="0" offset="0">
# the locations of the tls are NESW
# <phase duration="31" state="GrGr"/>
# <phase duration="6" state="yryr"/>
# <phase duration="31" state="rGrG"/>
# <phase duration="6" state="ryry"/>
# </tlLogic>
def generate_routefile(left_qty, up_qty):
with open("data/cross.rou.xml", "w") as routes:
print("""<routes>
<!--<vTypeDistribution id="mixed">-->
<!--<vType id="car" vClass="passenger" speedDev="0.2" latAlignment="compact" probability="0.3"/>-->
<!--<vType id="moped" vClass="moped" speedDev="0.4" latAlignment="compact" probability="0.7"/>-->
<!--</vTypeDistribution>-->
<route id="r0" edges="51o 1i 2o 52i"/>
<route id="r1" edges="51o 1i 4o 54i"/>
<route id="r2" edges="51o 1i 3o 53i"/>
<route id="r3" edges="54o 4i 3o 53i"/>
<route id="r4" edges="54o 4i 1o 51i"/>
<route id="r5" edges="54o 4i 2o 52i"/>
<route id="r6" edges="52o 2i 1o 51i"/>
<route id="r7" edges="52o 2i 4o 54i"/>
<route id="r8" edges="52o 2i 3o 53i"/>
<route id="r9" edges="53o 3i 4o 54i"/>
<route id="r10" edges="53o 3i 1o 51i"/>
<route id="r11" edges="53o 3i 2o 52i"/>
<vehicle id='motorcycle0' type='motorcycle' route='r0' depart='5'/>
<vehicle id='motorcycle1' type='motorcycle' route='r1' depart='5'/>
<vehicle id='motorcycle2' type='motorcycle' route='r2' depart='5'/>
<vehicle id='motorcycle3' type='motorcycle' route='r3' depart='5'/>
<vehicle id='motorcycle4' type='motorcycle' route='r4' depart='5'/>
<vehicle id='motorcycle5' type='motorcycle' route='r5' depart='10'/>
<vehicle id='motorcycle6' type='motorcycle' route='r6' depart='10'/>
<vehicle id='motorcycle7' type='motorcycle' route='r7' depart='10'/>
<vehicle id='motorcycle8' type='motorcycle' route='r8' depart='10'/>
<vehicle id='motorcycle9' type='motorcycle' route='r9' depart='10'/>
<vehicle id='passenger10' type='passenger' route='r10' depart='15'/>
<vehicle id='passenger11' type='passenger' route='r11' depart='15'/>
<vehicle id='passenger12' type='passenger' route='r0' depart='15'/>
<vehicle id='passenger13' type='passenger' route='r1' depart='15'/>
<vehicle id='passenger14' type='passenger' route='r2' depart='15'/>
<vehicle id='passenger15' type='passenger' route='r3' depart='20'/>
<vehicle id='passenger16' type='passenger' route='r4' depart='20'/>
<vehicle id='passenger17' type='passenger' route='r5' depart='20'/>
<vehicle id='passenger18' type='passenger' route='r6' depart='20'/>
<vehicle id='passenger19' type='passenger' route='r7' depart='20'/>
<vehicle id='passenger/van20' type='passenger/van' route='r8' depart='25'/>
<vehicle id='passenger/van21' type='passenger/van' route='r9' depart='25'/>
<vehicle id='passenger/van22' type='passenger/van' route='r10' depart='25'/>
<vehicle id='passenger/van23' type='passenger/van' route='r11' depart='25'/>
<vehicle id='passenger/van24' type='passenger/van' route='r0' depart='25'/>
<vehicle id='passenger/van25' type='passenger/van' route='r1' depart='30'/>
<vehicle id='passenger/van26' type='passenger/van' route='r2' depart='30'/>
<vehicle id='passenger/van27' type='passenger/van' route='r3' depart='30'/>
<vehicle id='passenger/van28' type='passenger/van' route='r4' depart='30'/>
<vehicle id='passenger/van29' type='passenger/van' route='r5' depart='30'/>
<vehicle id='truck30' type='truck' route='r6' depart='35'/>
<vehicle id='truck31' type='truck' route='r7' depart='35'/>
<vehicle id='truck32' type='truck' route='r8' depart='35'/>
<vehicle id='truck33' type='truck' route='r9' depart='35'/>
<vehicle id='truck34' type='truck' route='r10' depart='35'/>
<vehicle id='truck35' type='truck' route='r11' depart='40'/>
<vehicle id='truck36' type='truck' route='r0' depart='40'/>
<vehicle id='truck37' type='truck' route='r1' depart='40'/>
<vehicle id='truck38' type='truck' route='r2' depart='40'/>
<vehicle id='truck39' type='truck' route='r3' depart='40'/>
<vehicle id='bus40' type='bus' route='r4' depart='45'/>
<vehicle id='bus41' type='bus' route='r5' depart='45'/>
<vehicle id='bus42' type='bus' route='r6' depart='45'/>
<vehicle id='bus43' type='bus' route='r7' depart='45'/>
<vehicle id='bus44' type='bus' route='r8' depart='45'/>
<vehicle id='bus45' type='bus' route='r9' depart='50'/>
<vehicle id='bus46' type='bus' route='r10' depart='50'/>
<vehicle id='bus47' type='bus' route='r11' depart='50'/>
<vehicle id='bus48' type='bus' route='r0' depart='50'/>
<vehicle id='bus49' type='bus' route='r1' depart='50'/>
<vehicle id='bicycle50' type='bicycle' route='r2' depart='55'/>
<vehicle id='bicycle51' type='bicycle' route='r3' depart='55'/>
<vehicle id='bicycle52' type='bicycle' route='r4' depart='55'/>
<vehicle id='bicycle53' type='bicycle' route='r5' depart='55'/>
<vehicle id='bicycle54' type='bicycle' route='r6' depart='55'/>
<vehicle id='bicycle55' type='bicycle' route='r7' depart='60'/>
<vehicle id='bicycle56' type='bicycle' route='r8' depart='60'/>
<vehicle id='bicycle57' type='bicycle' route='r9' depart='60'/>
<vehicle id='bicycle58' type='bicycle' route='r10' depart='60'/>
<vehicle id='bicycle59' type='bicycle' route='r11' depart='60'/>
<vehicle id='motorcycle60' type='motorcycle' route='r0' depart='65'/>
<vehicle id='motorcycle61' type='motorcycle' route='r1' depart='65'/>
<vehicle id='motorcycle62' type='motorcycle' route='r2' depart='65'/>
<vehicle id='motorcycle63' type='motorcycle' route='r3' depart='65'/>
<vehicle id='motorcycle64' type='motorcycle' route='r4' depart='65'/>
<vehicle id='motorcycle65' type='motorcycle' route='r5' depart='70'/>
<vehicle id='motorcycle66' type='motorcycle' route='r6' depart='70'/>
<vehicle id='motorcycle67' type='motorcycle' route='r7' depart='70'/>
<vehicle id='motorcycle68' type='motorcycle' route='r8' depart='70'/>
<vehicle id='motorcycle69' type='motorcycle' route='r9' depart='70'/>
<vehicle id='passenger70' type='passenger' route='r10' depart='75'/>
<vehicle id='passenger71' type='passenger' route='r11' depart='75'/>
<vehicle id='passenger72' type='passenger' route='r0' depart='75'/>
<vehicle id='passenger73' type='passenger' route='r1' depart='75'/>
<vehicle id='passenger74' type='passenger' route='r2' depart='75'/>
<vehicle id='passenger75' type='passenger' route='r3' depart='80'/>
<vehicle id='passenger76' type='passenger' route='r4' depart='80'/>
<vehicle id='passenger77' type='passenger' route='r5' depart='80'/>
<vehicle id='passenger78' type='passenger' route='r6' depart='80'/>
<vehicle id='passenger79' type='passenger' route='r7' depart='80'/>
<vehicle id='passenger/van80' type='passenger/van' route='r8' depart='85'/>
<vehicle id='passenger/van81' type='passenger/van' route='r9' depart='85'/>
<vehicle id='passenger/van82' type='passenger/van' route='r10' depart='85'/>
<vehicle id='passenger/van83' type='passenger/van' route='r11' depart='85'/>
<vehicle id='passenger/van84' type='passenger/van' route='r0' depart='85'/>
<vehicle id='passenger/van85' type='passenger/van' route='r1' depart='90'/>
<vehicle id='passenger/van86' type='passenger/van' route='r2' depart='90'/>
<vehicle id='passenger/van87' type='passenger/van' route='r3' depart='90'/>
<vehicle id='passenger/van88' type='passenger/van' route='r4' depart='90'/>
<vehicle id='passenger/van89' type='passenger/van' route='r5' depart='90'/>
<vehicle id='truck90' type='truck' route='r6' depart='95'/>
<vehicle id='truck91' type='truck' route='r7' depart='95'/>
<vehicle id='truck92' type='truck' route='r8' depart='95'/>
<vehicle id='truck93' type='truck' route='r9' depart='95'/>
<vehicle id='truck94' type='truck' route='r10' depart='95'/>
<vehicle id='truck95' type='truck' route='r11' depart='100'/>
<vehicle id='truck96' type='truck' route='r0' depart='100'/>
<vehicle id='truck97' type='truck' route='r1' depart='100'/>
<vehicle id='truck98' type='truck' route='r2' depart='100'/>
<vehicle id='truck99' type='truck' route='r3' depart='100'/>
<vehicle id='bus100' type='bus' route='r4' depart='105'/>
<vehicle id='bus101' type='bus' route='r5' depart='105'/>
<vehicle id='bus102' type='bus' route='r6' depart='105'/>
<vehicle id='bus103' type='bus' route='r7' depart='105'/>
<vehicle id='bus104' type='bus' route='r8' depart='105'/>
<vehicle id='bus105' type='bus' route='r9' depart='110'/>
<vehicle id='bus106' type='bus' route='r10' depart='110'/>
<vehicle id='bus107' type='bus' route='r11' depart='110'/>
<vehicle id='bus108' type='bus' route='r0' depart='110'/>
<vehicle id='bus109' type='bus' route='r1' depart='110'/>
<vehicle id='bicycle110' type='bicycle' route='r2' depart='115'/>
<vehicle id='bicycle111' type='bicycle' route='r3' depart='115'/>
<vehicle id='bicycle112' type='bicycle' route='r4' depart='115'/>
<vehicle id='bicycle113' type='bicycle' route='r5' depart='115'/>
<vehicle id='bicycle114' type='bicycle' route='r6' depart='115'/>
<vehicle id='bicycle115' type='bicycle' route='r7' depart='120'/>
<vehicle id='bicycle116' type='bicycle' route='r8' depart='120'/>
<vehicle id='bicycle117' type='bicycle' route='r9' depart='120'/>
<vehicle id='bicycle118' type='bicycle' route='r10' depart='120'/>
<vehicle id='bicycle119' type='bicycle' route='r11' depart='120'/>
<vehicle id='motorcycle120' type='motorcycle' route='r0' depart='125'/>
<vehicle id='motorcycle121' type='motorcycle' route='r1' depart='125'/>
<vehicle id='motorcycle122' type='motorcycle' route='r2' depart='125'/>
<vehicle id='motorcycle123' type='motorcycle' route='r3' depart='125'/>
<vehicle id='motorcycle124' type='motorcycle' route='r4' depart='125'/>
<vehicle id='motorcycle125' type='motorcycle' route='r5' depart='130'/>
<vehicle id='motorcycle126' type='motorcycle' route='r6' depart='130'/>
<vehicle id='motorcycle127' type='motorcycle' route='r7' depart='130'/>
<vehicle id='motorcycle128' type='motorcycle' route='r8' depart='130'/>
<vehicle id='motorcycle129' type='motorcycle' route='r9' depart='130'/>
<vehicle id='passenger130' type='passenger' route='r10' depart='135'/>
<vehicle id='passenger131' type='passenger' route='r11' depart='135'/>
<vehicle id='passenger132' type='passenger' route='r0' depart='135'/>
<vehicle id='passenger133' type='passenger' route='r1' depart='135'/>
<vehicle id='passenger134' type='passenger' route='r2' depart='135'/>
<vehicle id='passenger135' type='passenger' route='r3' depart='140'/>
<vehicle id='passenger136' type='passenger' route='r4' depart='140'/>
<vehicle id='passenger137' type='passenger' route='r5' depart='140'/>
<vehicle id='passenger138' type='passenger' route='r6' depart='140'/>
<vehicle id='passenger139' type='passenger' route='r7' depart='140'/>
<vehicle id='passenger/van140' type='passenger/van' route='r8' depart='145'/>
<vehicle id='passenger/van141' type='passenger/van' route='r9' depart='145'/>
<vehicle id='passenger/van142' type='passenger/van' route='r10' depart='145'/>
<vehicle id='passenger/van143' type='passenger/van' route='r11' depart='145'/>
<vehicle id='passenger/van144' type='passenger/van' route='r0' depart='145'/>
<vehicle id='passenger/van145' type='passenger/van' route='r1' depart='150'/>
<vehicle id='passenger/van146' type='passenger/van' route='r2' depart='150'/>
<vehicle id='passenger/van147' type='passenger/van' route='r3' depart='150'/>
<vehicle id='passenger/van148' type='passenger/van' route='r4' depart='150'/>
<vehicle id='passenger/van149' type='passenger/van' route='r5' depart='150'/>
<vehicle id='truck150' type='truck' route='r6' depart='155'/>
<vehicle id='truck151' type='truck' route='r7' depart='155'/>
<vehicle id='truck152' type='truck' route='r8' depart='155'/>
<vehicle id='truck153' type='truck' route='r9' depart='155'/>
<vehicle id='truck154' type='truck' route='r10' depart='155'/>
<vehicle id='truck155' type='truck' route='r11' depart='160'/>
<vehicle id='truck156' type='truck' route='r0' depart='160'/>
<vehicle id='truck157' type='truck' route='r1' depart='160'/>
<vehicle id='truck158' type='truck' route='r2' depart='160'/>
<vehicle id='truck159' type='truck' route='r3' depart='160'/>
<vehicle id='bus160' type='bus' route='r4' depart='165'/>
<vehicle id='bus161' type='bus' route='r5' depart='165'/>
<vehicle id='bus162' type='bus' route='r6' depart='165'/>
<vehicle id='bus163' type='bus' route='r7' depart='165'/>
<vehicle id='bus164' type='bus' route='r8' depart='165'/>
<vehicle id='bus165' type='bus' route='r9' depart='170'/>
<vehicle id='bus166' type='bus' route='r10' depart='170'/>
<vehicle id='bus167' type='bus' route='r11' depart='170'/>
<vehicle id='bus168' type='bus' route='r0' depart='170'/>
<vehicle id='bus169' type='bus' route='r1' depart='170'/>
<vehicle id='bicycle170' type='bicycle' route='r2' depart='175'/>
<vehicle id='bicycle171' type='bicycle' route='r3' depart='175'/>
<vehicle id='bicycle172' type='bicycle' route='r4' depart='175'/>
<vehicle id='bicycle173' type='bicycle' route='r5' depart='175'/>
<vehicle id='bicycle174' type='bicycle' route='r6' depart='175'/>
<vehicle id='bicycle175' type='bicycle' route='r7' depart='180'/>
<vehicle id='bicycle176' type='bicycle' route='r8' depart='180'/>
<vehicle id='bicycle177' type='bicycle' route='r9' depart='180'/>
<vehicle id='bicycle178' type='bicycle' route='r10' depart='180'/>
<vehicle id='bicycle179' type='bicycle' route='r11' depart='180'/>
<vehicle id='motorcycle180' type='motorcycle' route='r0' depart='185'/>
<vehicle id='motorcycle181' type='motorcycle' route='r1' depart='185'/>
<vehicle id='motorcycle182' type='motorcycle' route='r2' depart='185'/>
<vehicle id='motorcycle183' type='motorcycle' route='r3' depart='185'/>
<vehicle id='motorcycle184' type='motorcycle' route='r4' depart='185'/>
<vehicle id='motorcycle185' type='motorcycle' route='r5' depart='190'/>
<vehicle id='motorcycle186' type='motorcycle' route='r6' depart='190'/>
<vehicle id='motorcycle187' type='motorcycle' route='r7' depart='190'/>
<vehicle id='motorcycle188' type='motorcycle' route='r8' depart='190'/>
<vehicle id='motorcycle189' type='motorcycle' route='r9' depart='190'/>
<vehicle id='passenger190' type='passenger' route='r10' depart='195'/>
<vehicle id='passenger191' type='passenger' route='r11' depart='195'/>
<vehicle id='passenger192' type='passenger' route='r0' depart='195'/>
<vehicle id='passenger193' type='passenger' route='r1' depart='195'/>
<vehicle id='passenger194' type='passenger' route='r2' depart='195'/>
<vehicle id='passenger195' type='passenger' route='r3' depart='200'/>
<vehicle id='passenger196' type='passenger' route='r4' depart='200'/>
<vehicle id='passenger197' type='passenger' route='r5' depart='200'/>
<vehicle id='passenger198' type='passenger' route='r6' depart='200'/>
<vehicle id='passenger199' type='passenger' route='r7' depart='200'/>
</routes>
""", file=routes)
lastVeh = 0
vehNr = 0
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools")) # tutorial in tests
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in docs
from sumolib import checkBinary # noqa
except ImportError:
sys.exit(
"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
options = get_options()
# this script has been called from the command line. It will start sumo as a
# server, then connect and run
if options.nogui:
sumoBinary = checkBinary('sumo')
else:
sumoBinary = checkBinary('sumo-gui')
# first, generate the route file for this simulation
# this is the normal way of using traci. sumo is started as a
# subprocess and then the python script connects and runs
print("TraCI Started")
# State = State_Lengths()
# print(State.get_tails())
# states = State.get_tails
# runner = Runner()
# print(Runner().run)
def getPhaseState(transition_time):
num_lanes = 4
num_phases = 4
phase = traci.trafficlight.getPhase("0")
phaseState = np.zeros((transition_time,num_lanes,num_phases))
for i in range(transition_time):
for j in range(num_lanes):
phaseState[i][j][phase] = 1
return phaseState
def getState(transition_time): # made the order changes
newState = []
avg_qlength = 0
# transition_time_step_leftcount = 0
# transition_time_step_rightcount = 0
# transition_time_step_topcount = 0
# transition_time_step_bottomcount = 0
avg_leftcount = 0
avg_rightcount = 0
avg_bottomcount = 0
avg_topcount = 0
for _ in range(transition_time):
traci.simulationStep()
leftcount = 0
rightcount = 0
topcount = 0
bottomcount = 0
vehicleList = traci.vehicle.getIDList()
print("Traffic : ")
for id in vehicleList:
x, y = traci.vehicle.getPosition(id)
if x<110 and x>60 and y<130 and y>120:
leftcount+=1
else :
if x<120 and x>110 and y<110 and y>600:
bottomcount+=1
else :
if x<180 and x>130 and y<120 and y>110:
rightcount+=1
else :
if x<130 and x>120 and y<180 and y>130:
topcount+=1
print("Left : ", leftcount)
print("Right : ", rightcount)
print("Top : ", topcount)
print("Bottom : ", bottomcount)
avg_topcount += topcount
avg_bottomcount += bottomcount
avg_leftcount += leftcount
avg_rightcount += rightcount
# transition_time_step_bottomcount+= bottomcount
# transition_time_step_leftcount+= leftcount
# transition_time_step_rightcount+= rightcount
# transition_time_step_topcount+= topcount
state = [bottomcount / 40,
rightcount / 40,
topcount / 40,
leftcount / 40
]
avg_qlength += ((bottomcount + rightcount + topcount + leftcount)/4)
newState.insert(0, state)
# print (state)
# df = pd.DataFrame([[, 2]], columns=['a', 'b'])
# params_dict =
avg_qlength /= transition_time
avg_leftcount /= transition_time
avg_topcount /= transition_time
avg_rightcount /= transition_time
avg_bottomcount /= transition_time
avg_lane_qlength = [avg_leftcount, avg_topcount, avg_rightcount, avg_bottomcount]
newState = np.array(newState)
phaseState = getPhaseState(transition_time)
newState = np.dstack((newState, phaseState))
newState = np.expand_dims(newState, axis=0)
return newState, avg_qlength, avg_lane_qlength
print("here")
import traci
def makeMove(action, transition_time):
if action == 1:
traci.trafficlight.setPhase("0", (int(traci.trafficlight.getPhase("0")) + 1) % 4)
# traci.simulationStep()
# traci.simulationStep()
# traci.simulationStep()
# traci.simulationStep()
return getState(transition_time)
def getReward(this_state, this_new_state):
num_lanes = 4
qLengths1 = []
qLengths2 = []
for i in range(num_lanes):
qLengths1.append(this_state[0][0][i][0])
qLengths2.append(this_new_state[0][0][i][0])
qLengths11 = [x + 1 for x in qLengths1]
qLengths21 = [x + 1 for x in qLengths2]
q1 = np.prod(qLengths11)
q2 = np.prod(qLengths21)
# print("Old State with product : ", q1)
#
# print("New State with product : ", q2)
#
#
# if q1 > q2:
# this_reward = 1
# else:
# this_reward = -1
this_reward = q1 - q2
if this_reward > 0:
this_reward = 1
elif this_reward < 0:
this_reward = -1
elif q2 > 1:
this_reward = -1
else:
this_reward = 0
return this_reward
def getRewardAbsolute(this_state, this_new_state):
num_lanes = 4
qLengths1 = []
qLengths2 = []
for i in range(num_lanes):
qLengths1.append(this_state[0][0][i][0])
qLengths2.append(this_new_state[0][0][i][0])
qLengths11 = [x + 1 for x in qLengths1]
qLengths21 = [x + 1 for x in qLengths2]
q1 = np.prod(qLengths11)
q2 = np.prod(qLengths21)
# print("Old State with product : ", q1)
#
# print("New State with product : ", q2)
#
#
# if q1 > q2:
# this_reward = 1
# else:
# this_reward = -1
this_reward = q1 - q2
return this_reward
def build_model(transition_time):
num_hidden_units_cnn = 10
num_actions = 2
model = Sequential()
model.add(Conv2D(num_hidden_units_cnn, kernel_size=(transition_time, 1), strides=1, activation='relu', input_shape=(transition_time, 4,5)))
# model.add(LSTM(8))
model.add(Flatten())
model.add(Dense(20, activation='relu'))
model.add(Dense(num_actions, activation='linear'))
opt = RMSprop(lr=0.00025)
model.compile(loss='mse', optimizer=opt)
return model
def getWaitingTime(laneID):
return traci.lane.getWaitingTime(laneID)
num_episode = 1
discount_factor = 0.9
#epsilon = 1
epsilon_start = 1
epsilon_end = 0.01
epsilon_decay_steps = 3000
Average_Q_lengths = []
params_dict = [] #for graph writing
sum_q_lens = 0
AVG_Q_len_perepisode = []
transition_time = 8
target_update_time = 20
q_estimator_model = load_model("models/single intersection models/tradeoff_models_absreward/model_15.h5")
replay_memory_init_size = 150
replay_memory_size = 8000
batch_size = 32
print(q_estimator_model.summary())
epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)
#generate_routefile_random(episode_time, num_vehicles)
#generate_routefile(290,10)
traci.start([sumoBinary, "-c", "data/cross.sumocfg",
"--tripinfo-output", "tripinfo.xml"])
traci.trafficlight.setPhase("0", 0)
nA = 2
total_t = 0
for episode in range(num_episode):
traci.load(["--start", "-c", "data/cross.sumocfg",
"--tripinfo-output", "tripinfo.xml"])
traci.trafficlight.setPhase("0", 0)
state, _, _ = getState(transition_time)
counter = 0
stride = 0
length_data_avg = []
count_data = []
delay_data_avg = []
delay_data_min = []
delay_data_max = []
delay_data_time = []
current_left_time = 0
current_top_time = 0
current_bottom_time = 0
current_right_time = 0
overall_lane_qlength = [0, 0, 0, 0]
num_cycles = 0
num_qlength_instances = 0
while traci.simulation.getMinExpectedNumber() > 0:
print("Episode # ", episode)
# print("Waiting time on lane 1i_0 = ",getWaitingTime("1i_0"))
print("Inside episode counter", counter)
counter += 1
total_t += 1
# batch_experience = experience[:batch_history]
prev_phase = traci.trafficlight.getPhase("0")
action = np.argmax(q_estimator_model.predict(state))
new_state, qlength, avg_lane_qlength = makeMove(action, transition_time)
new_phase = traci.trafficlight.getPhase("0")
print("Previous phase = ", prev_phase)
print("New phase = ", new_phase)
vehicleList = traci.vehicle.getIDList()
num_vehicles = len(vehicleList)
print("Number of cycles = ", num_cycles)
if num_vehicles:
avg = 0
max = 0
mini = 100
for id in vehicleList:
time = traci.vehicle.getAccumulatedWaitingTime(id)
if time > max:
max = time
if time < mini:
mini = time
avg += time
avg /= num_vehicles
delay_data_avg.append(avg)
delay_data_max.append(max)
delay_data_min.append(mini)
length_data_avg.append(qlength)
count_data.append(num_vehicles)
delay_data_time.append(traci.simulation.getCurrentTime() / 1000)
if traci.simulation.getCurrentTime() / 1000 < 2100:
overall_lane_qlength = list(map(add, overall_lane_qlength, avg_lane_qlength))
num_qlength_instances += 1
if prev_phase == 3 and new_phase == 0:
num_cycles += 1
if prev_phase == 0:
current_bottom_time += transition_time
if prev_phase == 1:
current_right_time += transition_time
if prev_phase == 2:
current_top_time += transition_time
if prev_phase == 3:
current_left_time += transition_time
state = new_state
overall_lane_qlength[:] = [x / num_qlength_instances for x in overall_lane_qlength]
current_right_time /= num_cycles
current_top_time /= num_cycles
current_left_time /= num_cycles
current_bottom_time /= num_cycles
avg_free_time = [current_left_time, current_top_time, current_right_time, current_bottom_time]
plt.plot(delay_data_time, delay_data_avg, 'b-', label='avg')
#plt.plot(delay_data_time, delay_data_min, 'g-', label='min')
#plt.plot(delay_data_time, delay_data_max,'r-', label='max')
plt.legend(loc='upper left')
plt.ylabel('Waiting time per minute')
plt.xlabel('Time in simulation (in s)')
plt.figure()
plt.plot(delay_data_time, length_data_avg, 'b-', label='avg')
plt.legend(loc='upper left')
plt.ylabel('Average Queue Length')
plt.xlabel('Time in simulation (in s)')
plt.figure()
plt.plot(delay_data_time, count_data, 'b-', label='avg')
plt.legend(loc='upper left')
plt.ylabel('Average Number of Vehicles in Map')
plt.xlabel('Time in simulation (in s)')
plt.figure()
label = ['Obstacle Lane abs reward', 'Top Lane w/ traffic', 'Right lane', 'Bottom lane']
index = np.arange(len(label))
plt.bar(index, avg_free_time, color=['red', 'green', 'blue', 'blue'])
plt.xlabel('Lane')
plt.ylabel('Average Green Time per Cycle')
plt.xticks(index, label)
plt.figure()
label = ['Obstacle Lane abs reward', 'Top Lane w/ traffic', 'Right lane', 'Bottom lane']
index = np.arange(len(label))
plt.bar(index, overall_lane_qlength, color=['red', 'green', 'blue', 'blue'])
plt.xlabel('Lane')
plt.ylabel('Average Q-length every 8 seconds')
plt.xticks(index, label)
plt.show()
AVG_Q_len_perepisode.append(sum_q_lens / 702)
sum_q_lens = 0
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 3 10:51:05 2016
@author: dyanni3
"""
# %% imports and prep
from threading import Lock
import numpy as np
from numpy.random import rand as r
from collections import defaultdict as d, defaultdict
from PIL import Image
from functools import reduce
from util import int2color, int2color_tuple, count_colors, has_colors
# RED = 0.2295
# RED = 0.1841900
# BLUE = 0.00254
# BLUE = 0.01234
RED = 1.0 / float(0xe41a1c)
BLUE = 1.0 / float(0x377eb8)
# BLUE = 1.0 / 0x4daf4a
class Lattice(object):
def __init__(self, size=100, slider=0, onlyRedBlue=False,
redAdvantage=1, blueAdvantage=1, defKillers=False, density=1,
numRatio=1, redGrowth=1, blueGrowth=1, deathRate=100000000,
antibioticDeath=1):
"""
:type slider: float, optional
if slider is 0 then only killing happens, if slider is 1 then only "random death"
and for a range between it's a mixture. Default 0.
:type onlyRedBlue: bool, optional
True means the lattice contains only red and blue bacteria. Defaults to False
:type size: int or tuple of ints, optional
Size of the lattice. If the given size is an int, the lattice is assumed to be
square, i.e. size=[value, value]. For a non-square lattice, use size=[x,y]. Defaults
to 100 for [100,100] lattice.
:type redAdvantage: float, optional
killing disparity, 1 means equal killers. Defaults to 1
:type blueAdvantage: float, optional
killing disparity, 1 means equal killers. Defaults to 1
:type redGrowth: float, optional
1 for equal growth. Defaults to 1
:type blueGrowth: float, optional
1 for equal growth. Defaults to 1
:type defKillers: bool, optional
if true (defective killers), killers then red and blue can't kill each other. Defaults
to False
:type density: float, optional
overall cell density at initialization of the lattice. Defaults to 1
:type numRatio: float, optional
overall number ratio (number of blue/ total number of cells). Default 1
"""
self.onlyRedBlue = onlyRedBlue
self.slider = slider
self.redGrowth = redGrowth
self.blueGrowth = blueGrowth
self.redAdvantage = redAdvantage
self.blueAdvantage = blueAdvantage
self.defKillers = defKillers
self.density = density
self.numRatio = numRatio
self.size = size
self.generation = 0
self.lock = Lock()
self.surface = None
self.counts = (0, 0, 0) # number of red, blue, green pixels
try:
self.x, self.y = size[1], size[0]
except TypeError:
self.x, self.y = size, size
self.rgb_image = np.empty((self.x, self.y, 3), dtype=np.uint8)
# if defective killers set to true then there's no random death either
# (no killing, no random death)
if defKillers:
self.slider = 0
self.lattice, self.killdict = self.create_red_blue_lattice(density, numRatio) \
if onlyRedBlue else \
self.create_other_lattice(density)
self.to_rgb_image()
def create_other_lattice(self, density):
"""
initialize the lattice with a bunch of different types of cells
(represented as different colors)
:param density:
"""
lattice = r(self.x, self.y)
if density != 1:
for bug in np.ravel(lattice):
if r() > density:
lattice[lattice == bug] = 0
# killdict is a hashtable containing the killing effectiveness for each color
killdict = d(list) # type: defaultdict[Any, float]
killdict[0] = 0
for color in np.ravel(lattice):
killdict[color] = r()
killdict[0] = 0
return lattice, killdict
def create_red_blue_lattice(self, density, numRatio):
"""
initialize the lattice to contain only red and blue cells and empty sites,
chosen randomly according to numRatio and density
:param density:
:param numRatio:
:return:
"""
try:
if density != 1:
return np.random.choice(
[0, RED, BLUE],
p=[1.0 - density, density * (1.0 - numRatio), density * numRatio],
size=(self.x, self.y)), None
else:
return np.random.choice([RED, BLUE], size=(self.x, self.y)), None
except ValueError:
print("ERROR: Density should be an integer or float")
exit(-1)
def set(self, i, j, value):
"""
Sets lattice value at pixel (i,j). Also updates rgb_image(i,j)
as well as red/blue counts.
:param i:
:param j:
:param value:
"""
self.lattice[i, j] = value
prev = has_colors(self.rgb_image[i, j])
color = self.rgb_image[i, j] = int2color(value)
self.surface.set_at((i, j), color)
x = has_colors(self.rgb_image[i, j])
c = self.counts
self.counts = (c[0] + x[0] - prev[0],
c[1] + x[1] - prev[1],
c[2] + x[2] - prev[2])
def evolve(self, n_steps=1):
"""
main function, moves the lattice forward n steps in time
:param n_steps:
"""
for t in range(n_steps):
self.generation += 1
# pick lattice site
i, j = self.random_site
# random death happens if slider>random float in [0,1]
if self.slider > r():
self.random_death(i, j)
# else killing/filling a la IBM happens
else:
n_blue, n_enemy, n_red, neighborhood = \
self.get_neighborhood(i, j)
# site is filled with red bact
if self.onlyRedBlue and self.is_red(i, j):
self.kill_red(i, j, n_blue, self.thresh)
# site is filled with a blue bacteria
elif self.onlyRedBlue and self.is_blue(i, j):
self.kill_blue(i, j, n_red, self.thresh)
elif n_enemy > 0 and not self.is_empty(i, j):
if self.has_enough_enemies(i, j, neighborhood):
self.kill(i, j)
# FILLING ....... #########
elif self.is_empty(i, j):
if self.onlyRedBlue and n_red + n_blue > 0:
self.fill_red_or_blue(i, j, n_blue, n_red)
elif n_enemy > 0:
if not self.fill_with_neighbor_color(i, j, neighborhood):
continue
@property
def thresh(self):
return 0.5 if self.x == 1 else 2
def get_neighborhood(self, i, j):
# get the neighborhood of the ith,jth 'pixel'
neighborhood = self.lattice[i - 1:i + 2, j - 1:j + 2]
# find number of species one (red, RED),
# species two (blue, BLUE)
n_blue = np.size(neighborhood[neighborhood == BLUE])
n_red = np.size(neighborhood[neighborhood == RED])
# total number of differently colored cells in neighborhood
n_enemy = np.size(neighborhood[neighborhood != self.lattice[i, j]])
return n_blue, n_enemy, n_red, neighborhood
def is_empty(self, i, j):
return self.lattice[i, j] == 0
def is_red(self, i, j):
return self.lattice[i, j] == RED
def is_blue(self, i, j):
return self.lattice[i, j] == BLUE
def fill_red_or_blue(self, i, j, n_blue, n_red):
if ((n_red * self.redGrowth + n_blue * self.blueGrowth) * r()) > 2:
if n_red * self.redGrowth * r() > n_blue * self.blueGrowth * r():
self.set(i, j, RED)
else:
self.set(i, j, BLUE)
else:
self.kill(i, j)
def fill_with_neighbor_color(self, i, j, neighborhood):
# find all the other colors in neighborhood
choices = np.ravel(neighborhood[neighborhood != 0])
# if no other cells in neighborhood then stay empty
if choices.size == 0:
self.kill(i, j)
return False
# fill with one of the other colors in neighborhood
# (according to number of cells)
choices = list(choices)
choices2 = [choice * (1 - self.killdict[choice]) for choice in choices]
choices2 = [choice / len(choices2) for choice in choices2]
zeroprob = 1 - sum(choices2)
choices2.append(zeroprob)
choices2 = np.array(choices2)
choices.append(0)
choices = np.array(choices)
self.set(i, j, np.random.choice(choices, p=choices2))
# self.lattice[i,j]=np.random.choice(np.ravel(neighborhood[neighborhood!=0]))
return True
def kill_blue(self, i, j, n_red, thresh):
if n_red * r() * self.redAdvantage > thresh and not self.defKillers:
self.set(i, j, 0)
def kill_red(self, i, j, n_blue, thresh):
"""
if number of blue cells * their killing advantage * random number > 2,
kill this red bacteria (replace with empty site)
:param i:
:param j:
:param n_blue:
:param thresh:
"""
if n_blue * r() * self.blueAdvantage > thresh and not self.defKillers:
self.kill(i, j)
def has_enough_enemies(self, i, j, neighborhood):
return self.enemy_weight(i, j, neighborhood) * r() > 2
def enemy_weight(self, i, j, neighborhood):
enemy_weight = 0
for enemy in np.ravel(neighborhood):
if enemy != 0 and enemy != self.lattice[i, j]:
try:
enemy_weight += self.killdict[enemy]
except TypeError:
print("ERROR")
pass
# enemy_weight=enemy_weight+self.killdict[enemy][0];
return enemy_weight
def kill(self, i, j):
self.set(i, j, 0)
def random_death(self, i, j):
self.set(i, j, np.random.choice(np.ravel(
self.lattice[i - 1:i + 2, j - 1:j + 2])))
@property
def random_site(self):
try:
j = np.random.randint(1, self.y - 2)
i = np.random.randint(1, self.x - 2)
except ValueError:
# this will happen if you've chosen your lattice to be one dimensional
i = 0
j = np.random.randint(0, self.y - 1)
return i, j
def to_rgb_image(self):
"""
Convert lattice to a list of RGB tuples
"""
r, g, b = (0, 0, 0)
# img = np.empty((self.x, self.y, 3), dtype=np.uint8)
for i in range(self.x):
for j in range(self.y):
x = self.lattice[i, j]
self.rgb_image[i, j] = int2color(x)
r += 1 if x == RED else 0
b += 1 if x == BLUE else 0
self.counts = (r, g, b)
return self.rgb_image
def view(self):
"""
Convert lattice to an image
:return:
RGB image of the lattice
"""
lu = list(map(int2color_tuple, np.ravel(self.lattice[:, :])))
imu = Image.new('RGB', [self.lattice.shape[1], self.lattice.shape[0]])
imu.putdata(lu)
print(reduce(count_colors, lu, [0, 0, 0]))
if not self.onlyRedBlue:
return imu
return imu
|
nilq/baby-python
|
python
|
import tensorflow as tf
import numpy as np
from load_data import load_data
import sklearn.preprocessing as prep
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.metrics import accuracy_score
class LR(object):
def __init__(self,
n_input=750,
n_class=2,
learning_rate=0.001,
):
self.x = tf.placeholder(tf.float32, [None, n_input])
self.y = tf.placeholder(tf.float32, [None, n_class])
self.w = tf.Variable(tf.zeros([n_input, n_class], dtype=tf.float32))
self.b = tf.Variable(tf.zeros([n_class], dtype=tf.float32))
self.init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(self.init)
self.pred = tf.nn.softmax(tf.add(tf.matmul(self.x, self.w), self.b))
# self.pred_ = np.argmax(self.pred, axis=1)
self.cost = tf.reduce_mean(-tf.reduce_sum(self.y*tf.log(self.pred), reduction_indices=1))
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(self.cost)
def fit(self, X, Y, train_epoch=25, batch_size=100):
for epoch in range(train_epoch):
total_batch = int(X.shape[0] / batch_size)
avg_cost = 0.
for i in range(total_batch):
batch_x = X[i * batch_size: (i + 1) * batch_size]
batch_y = Y[i * batch_size: (i + 1) * batch_size]
_, c = self.sess.run([self.optimizer, self.cost], feed_dict={self.x: batch_x, self.y: batch_y})
avg_cost += c/total_batch
# print 'epoch%s,' % str(epoch + 1), 'cost:', avg_cost
def predict_proba(self, X):
return self.sess.run(self.pred, feed_dict={self.x: X})
# def predict(self, X):
# return self.sess.run(self.pred_, feed_dict={self.x: X})
def test_LR():
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
X_train, X_test, y_train, y_test = mnist.train.images, mnist.test.images, mnist.train.labels, mnist.test.labels
X_train, X_test = standard_scale(X_train, X_test)
print y_train.shape
lr = LR(n_input=784, n_class=10)
lr.fit(X_train, y_train)
y_test_pred = lr.predict_proba(X_test)
y_pred = np.argmax(y_test_pred, axis=1)
print y_test
print accuracy_score(y_pred, np.argmax(y_test, axis=1))
if __name__ == "__main__":
test_LR()
|
nilq/baby-python
|
python
|
# --------------------------------------------------------
# High Resolution Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Rao Fu, RainbowSecret
# --------------------------------------------------------
import os
import pdb
import logging
import torch.nn as nn
BN_MOMENTUM = 0.1
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
class BasicBlock(nn.Module):
"""Only replce the second 3x3 Conv with the TransformerBlocker"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
nilq/baby-python
|
python
|
from fixtures.builder import FixtureBuilder
def build():
fixture = FixtureBuilder('TUFTestFixtureDelegated')\
.create_target('testtarget.txt')\
.publish(with_client=True)\
.delegate('unclaimed', ['level_1_*.txt'])\
.create_target('level_1_target.txt', signing_role='unclaimed')\
.publish(with_client=True)
# === Point of No Return ===
# Past this point, we don't re-export the client. This supports testing the
# client's own ability to pick up and trust new data from the repository.
fixture.add_key('targets')\
.add_key('snapshot')\
.invalidate()\
.publish()\
.revoke_key('targets')\
.revoke_key('snapshot')\
.invalidate()\
.publish()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Uso: test_fs.py part_file_name")
exit(1)
# testa tamanho do FS virtual total
statinfo = os.stat(sys.argv[1])
if statinfo.st_size != 4194304:
print("Tamanho invalido. Deve ter exatamente 4Mb (4194304).")
exit(1)
with open(sys.argv[1], "rb") as f:
# testa integridade do boot block
for i in xrange(1024):
b = f.read(1)
if ord(b) != 0xbb:
print("Boot block invalido no offset %d." % i)
exit(1)
print("Boot block: OK...")
# testa integridade do header da FAT16
b = f.read(2)
if not(ord(b[0]) == 0xff and ord(b[1]) == 0xfd):
print("Header da FAT16 invalido: identificador do boot record invalido.")
exit(1)
for i in xrange(8):
b = f.read(2)
if not(ord(b[0]) == 0xff and ord(b[1]) == 0xfe):
print("Header da FAT16 invalido: corpo do header FAT16 invalido.")
exit(1)
b = f.read(2)
if not(ord(b[0]) == 0xff and ord(b[1]) == 0xff):
print("Header da FAT16 invalido: end of FAT16 invalido.")
exit(1)
print("FAT header: OK...")
print("Filesystem: OK!")
|
nilq/baby-python
|
python
|
'''
有一些原木,现在想把这些木头切割成一些长度相同的小段木头,需要得到的小段的数目至少为 k。当然,我们希望得到的小段越长越好,你需要计算能够得到的小段木头的最大长度。
Example
样例 1
输入:
L = [232, 124, 456]
k = 7
输出: 114
Explanation: 我们可以把它分成114cm的7段,而115cm不可以
样例 2
输入:
L = [1, 2, 3]
k = 7
输出: 0
说明:很显然我们不能按照题目要求完成。
Challenge
O(n log Len), Len为 n 段原木中最大的长度
Notice
木头长度的单位是厘米。原木的长度都是正整数,我们要求切割得到的小段木头的长度也要求是整数。无法切出要求至少 k 段的,则返回 0 即可。
'''
class Solution:
"""
@param L: Given n pieces of wood with length L[i]
@param k: An integer
@return: The maximum length of the small pieces
算法:二分
题目意思是说给出 n 段木材L[i], 将这 n 段木材切分为至少 k 段,这 k 段等长,
若直接枚举每段木材的长度则时间复杂度高达 O(n*maxL), 我们可以使用二分答案来优化枚举木材长度的过程
设left=0,即木材长度最小为0,设right=max_L 即所有木材中最长的长度,因为结果是不可能大于这个长度的,mid = left + right/2
若长度为mid时不能完成,说明太长了,那么我们往区间[left,mid]搜,
若可以完成,说明也许可以更长,那么我们往[mid,right]搜,
在check函数中,我们判断用所有木头除当前mid的值的和是否大于等于k,若小于则说明该mid不可行, 若大于等于则说明mid可行
由于判断条件是left + 1 < right,最后结果就是left的值
复杂度分析
时间复杂度O(nlog(L))
二分查找的复杂度
空间复杂度O(size(L))
只有数组L
"""
# todo 九章算法强化班中讲过的基于值的二分法。 : 类似的还有robot jumping,copybooks
def woodCut(self, L, k):
# write your code here
len_L = len(L)
if len_L == 0:
return 0
max_L = 0
for i in range(len_L):
max_L = max(max_L, L[i])
left, right = 0, max_L
def check(mid):
cou = 0
# 计算当前长度下能分成几段
for i in range(len_L):
cou += (int)(L[i] / mid)
# 如果还能分更长的,返回true
if cou >= k:
return True
# 如果不能分更长的,返回false
return False
while left + 1 < right:
mid = (int)(left + (right - left) / 2)
if check(mid): # 如果还能分更长的,则往[mid,right]走
left = mid
else: # 如果不能分更长的,则往[left,mid]走
right = mid
if check(right):
return right
return left
|
nilq/baby-python
|
python
|
import logging
from autobahn.twisted.websocket import WebSocketServerProtocol
logger = logging.getLogger(__name__)
class PsutilRemoteServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
logger.info("Client connecting: {}".format(request.peer))
def onOpen(self):
logger.info("Opening connection")
self.factory.register(self)
def onClose(self, wasClean, code, reason):
logger.info("Closing connection: {}".format(reason))
self.factory.unregister(self)
|
nilq/baby-python
|
python
|
DEFAULT_SYSTEM = 'frontera.tacc.utexas.edu'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""Positive Negative.
Given 2 int values, return True if one is negative and one is positive.
Except if the parameter "negative" is True, then return
True only if both are negative.
source: https://codingbat.com/prob/p162058
"""
def pos_neg(a: int, b: int, negative: bool) -> bool:
"""Differences in signed digits.
Return True if:
- negative is True and both a,b < 0.
- negative is False and
((a > 0 and b < 0) or (a < 0 and b > 0).
Return False otherwise.
"""
if negative:
return (a < 0 and b < 0)
return (a > 0 and b < 0) or (a < 0 and b > 0)
if __name__ == "__main__":
assert pos_neg(1, -1, False) is True
assert pos_neg(-1, 1, False) is True
assert pos_neg(-4, -5, True) is True
assert pos_neg(-4, -5, False) is False
assert pos_neg(-4, 5, False) is True
assert pos_neg(-4, 5, True) is False
assert pos_neg(1, 1, False) is False
assert pos_neg(-1, -1, False) is False
assert pos_neg(1, -1, True) is False
assert pos_neg(-1, 1, True) is False
assert pos_neg(1, 1, True) is False
assert pos_neg(-1, -1, True) is True
assert pos_neg(5, -5, False) is True
assert pos_neg(-6, 6, False) is True
assert pos_neg(-5, -6, False) is False
assert pos_neg(-2, -1, False) is False
assert pos_neg(1, 2, False) is False
assert pos_neg(-5, 6, True) is False
assert pos_neg(-5, -5, True) is True
print('Passed')
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
from gensim.models import Word2Vec
from sklearn.decomposition import TruncatedSVD
from sklearn.model_selection import StratifiedKFold
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
def create_groupby_features(df, group_columns_list, method_dict, add_to_original_data=False, suffix=""):
"""Create statistical features by grouing 'group_columns_list' and compute stats on other columns
specified in method_dict.
Parameters
----------
df : pandas dataframe
Feature dataframe.
group_columns_list : list
List of columns you want to group with, could be multiple columns.
method_dict: dict
Dictionay used to create stats variables
shoubld be {'feature_1': ['method_1', 'method_2'],
'feature_2': ['method_1', 'method_2']},
if method is a lambda, use function inplace of method string.
add_to_original_data: boolean
Only keep stats or add stats variable to raw data, default False.
Returns
-------
df_copy : pandas dataframe
New pandas dataframe with grouped columns and statistic columns.
Examples
--------
create_groupby_features(df=data,
group_columns_list=['class'],
method_dict={'before': ['count','mean']})
"""
assert type(group_columns_list) == list, str([1]) + " should be a list"
df_copy = df.copy()
grouped = df_copy.groupby(group_columns_list)
the_stats = grouped.agg(method_dict)
if suffix != "":
the_stats.columns = [
"".join(group_columns_list) + "_LV_" + "_".join(x[::-1]) + "_" + str(suffix)
for x in the_stats.columns.ravel()
]
else:
the_stats.columns = [
"".join(group_columns_list) + "_LV_" + "_".join(x[::-1]) for x in the_stats.columns.ravel()
]
the_stats.reset_index(inplace=True)
if not add_to_original_data:
df_copy = the_stats
else:
df_copy = pd.merge(
left=df_copy[group_columns_list], right=the_stats, on=group_columns_list, how="left"
).reset_index(drop=True)
return df_copy
def create_svd_interaction_features(
data, col_tobe_grouped, col_tobe_computed, tfidf=True, n_components=1, verbose=False
):
"""Extract col_tobe_grouped level information utilize information of col_tobe_computed by using SVD.
Parameters
----------
data : pandas dataframe
col_tobe_grouped : list
[str, str, str, ...]
col_tobe_computed : str
tfidf : bool
If true, use tfidf to extract information
If false, use count to extract information
n_components: int
Number of columns to genderate
verbose: bool
If true, show debug information.
If false, do not show debug information.
Returns
-------
result : pandas dataframe
col_tobe_grouped level dataframe, columns are information about col_tobe_computed.
Examples
--------
Your code here.
"""
if verbose:
print("col_tobe_grouped:{} | col_tobe_computed:{}".format(col_tobe_grouped, col_tobe_computed))
print("dataset shape: {}".format(data.shape))
# Step1: Generate dataframe that to be embedded
data_tobe_embedded = data.groupby(col_tobe_grouped)[col_tobe_computed].agg(
lambda x: " ".join(list([str(y) for y in x]))
)
if verbose:
print("\nData shape to be embedded: {}".format(data_tobe_embedded.shape))
print(data_tobe_embedded[:2])
# Step2: Choose appropriate vectorizer
if tfidf:
vectorizer = TfidfVectorizer(tokenizer=lambda x: x.split(" "))
else:
vectorizer = CountVectorizer(tokenizer=lambda x: x.split(" "))
# Step3: Create vectorizer
data_embedded_vector = vectorizer.fit_transform(data_tobe_embedded)
if verbose:
print("\nData shape embedded vector: {}".format(data_embedded_vector.shape))
# Step4: Embed information of col_tobe_computed into col_tobe_grouped level
svd = TruncatedSVD(n_components=n_components, random_state=2019)
data_embedded_reduce = svd.fit_transform(data_embedded_vector)
result = pd.DataFrame(data_embedded_reduce)
if tfidf:
result.columns = [
"_".join(col_tobe_grouped) + "_{}_svd_tfidf_{}".format(col_tobe_computed, index)
for index in range(n_components)
]
else:
result.columns = [
"_".join(col_tobe_grouped) + "_{}_svd_count_{}".format(col_tobe_computed, index)
for index in range(n_components)
]
result[col_tobe_grouped] = data_tobe_embedded.reset_index()[col_tobe_grouped]
if verbose:
print("Data shape embedded svd: {}".format(data_embedded_reduce.shape))
print(result[:2])
return result
def create_w2v_interaction_features(data, col1, col2, n_components, window_size, verbose=False):
"""Extract col1 level information utilize information of col2 by using word2vec.
Parameters
----------
data : pandas dataframe
col1 : str
col2 : str
n_components: int
Number of columns to genderate.
window_size: int
Window size of word2vec method.
verbose: bool
If true, show debug information.
If false, do not show debug information.
Returns
-------
result : pandas dataframe
col1 level dataframe, columns are information about col2.
Examples
--------
Your code here.
"""
if verbose:
print("col1:{} | col2:{}".format(col1, col2))
print("dataset shape: {}".format(data.shape))
# Step1: Generate dataframe that to be embedded.
data_tobe_embedded = data.groupby([col2])[col1].agg(lambda x: list([str(y) for y in x]))
list_tobe_embedded = list(data_tobe_embedded.values)
if verbose:
print("\nData shape to be embedded: {}".format(data_tobe_embedded.shape))
print(data_tobe_embedded[:2])
# Step2: Do word embedding.
w2v = Word2Vec(list_tobe_embedded, size=n_components, window=window_size, min_count=1)
keys = list(w2v.wv.vocab.keys())
dict_w2v = {}
for key in keys:
dict_w2v[key] = w2v.wv[key]
result = pd.DataFrame(dict_w2v).T.reset_index()
# Step3: Rename new columns/
result.columns = [col1] + [col1 + "_{}_w2v_{}".format(col2, index) for index in range(n_components)]
result[col1] = result[col1].astype(data[col1].dtype)
return result
class TargetEncodingSmoothing(BaseEstimator, TransformerMixin):
def __init__(self, columns_names, k, f):
""" Target encoding class.
Parameters
----------
columns_names : list
Columns to be encoded.
k : float
Inflection point, that's the point where f(x) is equal 0.5.
f : float
Steepness, a value which controls how step is our function.
"""
self.columns_names = columns_names
self.learned_values = {}
self.dataset_mean = np.nan
self.k = k
self.f = f
def smoothing_func(self, N):
return 1 / (1 + np.exp(-(N - self.k) / self.f))
def fit(self, X, y, **fit_params):
""" Fit target encodings.
Parameters
----------
X : pandas.DataFrame
Pandas dataframe which contains features.
y : numpy
Target values.
Returns
-------
Class
"""
X_ = X.copy()
X_["__target__"] = y
self.learned_values = {}
self.dataset_mean = np.mean(y)
for c in [x for x in X_.columns if x in self.columns_names]:
stats = X_[[c, "__target__"]].groupby(c)["__target__"].agg(["mean", "size"])
# Compute weight.
stats["alpha"] = self.smoothing_func(stats["size"])
# Take weighted sum of 2 means: dataset mean and level mean.
stats["__target__"] = stats["alpha"] * stats["mean"] + (1 - stats["alpha"]) * self.dataset_mean
# Keep weighted target and raw encoded columns.
stats = stats.drop([x for x in stats.columns if x not in ["__target__", c]], axis=1).reset_index()
# Save into dict
self.learned_values[c] = stats
return self
def transform(self, X, **fit_params):
""" Transform fitted target encoding information into X.
Parameters
----------
X : pandas.DataFrame
Pandas dataframe which contains features.
Returns
-------
pandas.DataFrame
Transformed values.
"""
# Get raw values.
transformed_X = X[self.columns_names].copy()
# Transform encoded information into raw values.
for c in transformed_X.columns:
transformed_X[c] = transformed_X[[c]].merge(self.learned_values[c], on=c, how="left")["__target__"]
# Fill y dataset mean into missing values.
transformed_X = transformed_X.fillna(self.dataset_mean)
transformed_X.columns = [d + "_smooth_te" for d in transformed_X.columns]
return transformed_X
def fit_transform(self, X, y, **fit_params):
""" Fit and Transform
Parameters
----------
X : pandas.DataFrame
Pandas dataframe which contains features.
y : numpy array
Target values.
Returns
-------
pandas.DataFrame
Transformed values.
"""
self.fit(X, y)
return self.transform(X)
def get_CV_target_encoding(data, y, encoder, cv=5):
""" Add cross validation noise into training target encoding.
Parameters
----------
data : pandas.DataFrame
Pandas dataframe which contains features.
y : numpy array
Target values.
encoder : TargetEncodingSmoothing
TargetEncodingSmoothing Instance
cv : int, optional
Cross validation fold, by default 5
Returns
-------
[type]
[description]
"""
# Create cross validation schema.
skf = StratifiedKFold(n_splits=cv, random_state=2019, shuffle=True)
result = []
# Do cross validation.
for train_index, test_index in skf.split(data, y):
encoder.fit(data.iloc[train_index, :].reset_index(drop=True), y[train_index])
tmp = encoder.transform(data.iloc[test_index, :].reset_index(drop=True))
tmp["index"] = test_index
result.append(tmp)
# Concat all folds.
result = pd.concat(result, ignore_index=True)
# Recover to default order.
result = result.sort_values("index").reset_index(drop=True).drop("index", axis=1)
return result
class TargetEncodingExpandingMean(BaseEstimator, TransformerMixin):
def __init__(self, columns_names):
self.columns_names = columns_names
self.learned_values = {}
self.dataset_mean = np.nan
def fit(self, X, y, **fit_params):
X_ = X.copy()
self.learned_values = {}
self.dataset_mean = np.mean(y)
X_["__target__"] = y
for c in [x for x in X_.columns if x in self.columns_names]:
stats = X_[[c, "__target__"]].groupby(c)["__target__"].agg(["mean", "size"])
stats["__target__"] = stats["mean"]
stats = stats.drop([x for x in stats.columns if x not in ["__target__", c]], axis=1).reset_index()
self.learned_values[c] = stats
return self
def transform(self, X, **fit_params):
transformed_X = X[self.columns_names].copy()
for c in transformed_X.columns:
transformed_X[c] = (transformed_X[[c]].merge(self.learned_values[c], on=c, how="left"))["__target__"]
transformed_X = transformed_X.fillna(self.dataset_mean)
transformed_X.columns = [d + "_expand_te" for d in transformed_X.columns]
return transformed_X
def fit_transform(self, X, y, **fit_params):
self.fit(X, y)
# Expanding mean transform
X_ = X[self.columns_names].copy().reset_index(drop=True)
X_["__target__"] = y
X_["index"] = X_.index
X_transformed = pd.DataFrame()
for c in self.columns_names:
X_shuffled = X_[[c, "__target__", "index"]].copy()
X_shuffled = X_shuffled.sample(n=len(X_shuffled), replace=False)
X_shuffled["cnt"] = 1
X_shuffled["cumsum"] = X_shuffled.groupby(c, sort=False)["__target__"].apply(lambda x: x.shift().cumsum())
X_shuffled["cumcnt"] = X_shuffled.groupby(c, sort=False)["cnt"].apply(lambda x: x.shift().cumsum())
X_shuffled["encoded"] = X_shuffled["cumsum"] / X_shuffled["cumcnt"]
X_shuffled["encoded"] = X_shuffled["encoded"].fillna(self.dataset_mean)
X_transformed[c] = X_shuffled.sort_values("index")["encoded"].values
X_transformed.columns = [d + "_expand_te" for d in X_transformed.columns]
return X_transformed
def create_expand_noise_te_features(df_train, y_train, df_test, columns_names):
"""[summary]
Parameters
----------
df_train : pandas.DataFrame
Pandas dataframe which contains features.
y_train : numpy array
Train target
df_test : pandas.DataFrame
Pandas dataframe which contains features.
columns_names : list
Columns to be encoded.
k : float
Inflection point, that's the point where f(x) is equal 0.5.
f : float
Steepness, a value which controls how step is our function.
cv_noise : int, optional
[description], by default 5
Returns
-------
[type]
[description]
"""
te = TargetEncodingExpandingMean(columns_names=columns_names)
X_train = te.fit_transform(df_train, y_train)
X_test = te.transform(df_test)
return X_train, X_test
def create_smooth_noise_te_features(df_train, y_train, df_test, columns_names, k, f, cv_noise=5):
"""[summary]
Parameters
----------
df_train : pandas.DataFrame
Pandas dataframe which contains features.
y_train : numpy array
Train target
df_test : pandas.DataFrame
Pandas dataframe which contains features.
columns_names : list
Columns to be encoded.
k : float
Inflection point, that's the point where f(x) is equal 0.5.
f : float
Steepness, a value which controls how step is our function.
cv_noise : int, optional
[description], by default 5
Returns
-------
[type]
[description]
"""
te = TargetEncodingSmoothing(columns_names=columns_names, k=k, f=f)
X_train = get_CV_target_encoding(df_train, y_train, te, cv=cv_noise)
te.fit(df_train, y_train)
X_test = te.transform(df_test)
return X_train, X_test
def create_noise_te_features_forlocal_cv(data, y, columns_names, k, f, n_splits=5, cv_noise=5):
""" Load features and target, then generate target encoded values to correspoding train and valid.
Parameters
----------
data : pandas.DataFrame
Pandas dataframe which contains features.
y : numpy array
Target values.
columns_names : list
Columns to be encoded.
k : float
Inflection point, that's the point where f(x) is equal 0.5.
f : float
Steepness, a value which controls how step is our function.
n_splits : int optional
Cross validation fold, by default 5
cv_noise : int optional
Noise cross validation fold, by default 5
Returns
-------
X_train : pandas.DataFrame
Train encoded columns.
X_valid : pandas.DataFrame
Valid encoded columns.
"""
skf = StratifiedKFold(n_splits=n_splits, random_state=2019, shuffle=True)
for train_index, valid_index in skf.split(data, y):
train_x = data.loc[train_index, columns_names].reset_index(drop=True)
valid_x = data.loc[valid_index, columns_names].reset_index(drop=True)
train_y, valid_y = y[train_index], y[valid_index]
te = TargetEncodingSmoothing(columns_names=columns_names, k=k, f=f)
X_train = get_CV_target_encoding(train_x, train_y, te, cv=cv_noise)
te.fit(train_x, train_y)
X_valid = te.transform(valid_x).values
return X_train, X_valid
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-01 05:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Interface', '0003_auto_20171201_0503'),
]
operations = [
migrations.AddField(
model_name='huntuser',
name='current_landmark',
field=models.IntegerField(default=0),
),
]
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.