max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
venv/lib/python3.7/site-packages/google/type/postal_address_pb2.py | nicholasadamou/StockBird | 15 | 16200 | <gh_stars>10-100
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/type/postal_address.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/type/postal_address.proto',
package='google.type',
syntax='proto3',
serialized_options=_b('\n\017com.google.typeB\022PostalAddressProtoP\001ZFgoogle.golang.org/genproto/googleapis/type/postaladdress;postaladdress\242\002\003GTP'),
serialized_pb=_b('\n google/type/postal_address.proto\x12\x0bgoogle.type\"\xfd\x01\n\rPostalAddress\x12\x10\n\x08revision\x18\x01 \x01(\x05\x12\x13\n\x0bregion_code\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t\x12\x13\n\x0bpostal_code\x18\x04 \x01(\t\x12\x14\n\x0csorting_code\x18\x05 \x01(\t\x12\x1b\n\x13\x61\x64ministrative_area\x18\x06 \x01(\t\x12\x10\n\x08locality\x18\x07 \x01(\t\x12\x13\n\x0bsublocality\x18\x08 \x01(\t\x12\x15\n\raddress_lines\x18\t \x03(\t\x12\x12\n\nrecipients\x18\n \x03(\t\x12\x14\n\x0corganization\x18\x0b \x01(\tBu\n\x0f\x63om.google.typeB\x12PostalAddressProtoP\x01ZFgoogle.golang.org/genproto/googleapis/type/postaladdress;postaladdress\xa2\x02\x03GTPb\x06proto3')
)
_POSTALADDRESS = _descriptor.Descriptor(
name='PostalAddress',
full_name='google.type.PostalAddress',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='revision', full_name='google.type.PostalAddress.revision', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region_code', full_name='google.type.PostalAddress.region_code', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language_code', full_name='google.type.PostalAddress.language_code', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postal_code', full_name='google.type.PostalAddress.postal_code', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sorting_code', full_name='google.type.PostalAddress.sorting_code', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='administrative_area', full_name='google.type.PostalAddress.administrative_area', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locality', full_name='google.type.PostalAddress.locality', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sublocality', full_name='google.type.PostalAddress.sublocality', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='address_lines', full_name='google.type.PostalAddress.address_lines', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recipients', full_name='google.type.PostalAddress.recipients', index=9,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='organization', full_name='google.type.PostalAddress.organization', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=303,
)
DESCRIPTOR.message_types_by_name['PostalAddress'] = _POSTALADDRESS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PostalAddress = _reflection.GeneratedProtocolMessageType('PostalAddress', (_message.Message,), dict(
DESCRIPTOR = _POSTALADDRESS,
__module__ = 'google.type.postal_address_pb2'
# @@protoc_insertion_point(class_scope:google.type.PostalAddress)
))
_sym_db.RegisterMessage(PostalAddress)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 1.632813 | 2 |
dataprep/tests/data_connector/test_integration.py | dylanzxc/dataprep | 1 | 16201 | from ...data_connector import Connector
from os import environ
def test_data_connector() -> None:
token = environ["DATAPREP_DATA_CONNECTOR_YELP_TOKEN"]
dc = Connector("yelp", _auth={"access_token": token})
df = dc.query("businesses", term="ramen", location="vancouver")
assert len(df) > 0
dc.info()
schema = dc.show_schema("businesses")
assert len(schema) > 0
df = dc.query("businesses", _count=120, term="ramen", location="vancouver")
assert len(df) == 120
df = dc.query("businesses", _count=10000, term="ramen", location="vancouver")
assert len(df) < 1000
| 2.4375 | 2 |
plot_helpers.py | aspuru-guzik-group/QNODE | 14 | 16202 | <filename>plot_helpers.py
import os
import torch
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import cm
from qutip import *
import imageio
plt.rcParams['axes.labelsize'] = 16
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
def animate_recon(xt, xm, xe, title=''):
"""x is [ts,3]"""
images = []
for x, label, col in zip([xt, xm, xe],['training dynamics', 'latent neural ode reconstruction','latent neural ode extrapolation' ], ['black','limegreen', 'blue']):
for i, v in enumerate(x):
bloch = bloch_format(Bloch())
bloch.add_vectors(v)
bloch.vector_color =[col]
bloch.render()
s = x[:i+1]
#print(v, s[-1])
bloch.axes.plot(s[:,1], -s[:,0], s[:,2], color=col)
if label =='latent neural ode reconstruction':
bloch.axes.plot(xt[:,1], -xt[:,0], xt[:,2], color='black')
if label =='latent neural ode extrapolation':
bloch.axes.plot(xt[:,1], -xt[:,0], xt[:,2], color='black')
bloch.axes.plot(xm[:,1], -xm[:,0], xm[:,2], color='limegreen')
plt.suptitle(label, fontdict={'color':col})
plt.savefig('exp/temp_file.png')
images.append(imageio.imread('exp/temp_file.png'))
imageio.mimsave('exp/'+title+'.gif', images, duration=0.05)
def plot_bloch_vectors(xm, title=''):
# xm is np x 3
bloch = bloch_format(Bloch())
for i, vm in enumerate(xm):
bloch.add_vectors(vm)
bloch.vector_color =['black']
bloch.render()
plt.suptitle(r'interpolated initial states $|\Psi_0 \rangle $')
plt.savefig('exp/bvecs'+title+'.pdf', bbox_inches='tight')
def animate_traj(xt, title=''):
"""xt, xm is [ts,3] --> generate gif of both simultaneously"""
images = []
for i, vt in enumerate(xt):
bloch = bloch_format(Bloch())
bloch.add_vectors(vt)
bloch.vector_color =['black']
bloch.render()
t = xt[:i+1]
bloch.axes.plot(t[:,1], -t[:,0], t[:,2], color='black', label='dynamics')
#plt.legend(loc='lower center')
#plt.suptitle('latent neural ode --', fontdict={'color':'limegreen'})
#plt.title('True quantum dynamics', fontdict={'color':'black'})
plt.savefig('exp/temp_file.png', bbox_inches='tight')
images.append(imageio.imread('exp/temp_file.png'))
imageio.mimsave('exp/'+title+'.gif', images, duration=0.05)
def animate_recon_(xt, xm, title=''):
"""xt, xm is [ts,3] --> generate gif of both simultaneously"""
images = []
for i, (vt, vm) in enumerate(zip(xt,xm)):
bloch = bloch_format(Bloch())
bloch.add_vectors(vt)
bloch.add_vectors(vm)
bloch.vector_color =['black', 'limegreen']
bloch.render()
t = xt[:i+1]
m = xm[:i+1]
bloch.axes.plot(t[:,1], -t[:,0], t[:,2], color='black', label='train')
bloch.axes.plot(m[:,1], -m[:,0], m[:,2], color='limegreen', label='neural ode')
#plt.legend(loc='lower center')
plt.suptitle('latent neural ode --', fontdict={'color':'limegreen'})
plt.title('True quantum dynamics', fontdict={'color':'black'})
plt.savefig('exp/temp_file.png')
images.append(imageio.imread('exp/temp_file.png'))
imageio.mimsave('exp/'+title+'.gif', images, duration=0.05)
def animate_single_traj(x, title=''):
"""x is [ts,3]"""
images = []
for i, v in enumerate(x):
bloch = Bloch()
bloch.add_vectors(v)
bloch.add_points(v)
bloch.render()
s = x[:i+1]
print(v, s[-1])
bloch.axes.plot(s[:,1], -s[:,0], s[:,2], color='limegreen')
plt.savefig('exp/temp_file.png')
images.append(imageio.imread('exp/temp_file.png'))
imageio.mimsave('exp/traj'+title+'.gif', images, duration=0.125)
os.remove('exp/temp_file.png')
def plot_traj_bloch(x, title='', col='limegreen',view=[0,90]):
bloch = bloch_format(Bloch(), view)#[-40,30])
bloch.render()
bloch.axes.plot(x[:,1], -x[:,0], x[:,2], color=col)
plt.savefig('exp/'+title)
def construct_gif(xs, title=''):
""" constructs a gif of stationary bloch trajectories """
cmap = cm.get_cmap('Greens', len(xs))
cols = cmap(range(len(xs)))
images = []
for i, x in enumerate(xs):
filename='temp_file.png'
plot_traj_bloch(x, filename)
images.append(imageio.imread('exp/'+filename))
imageio.mimsave('exp/'+title+'.gif', images, duration=0.5)
os.remove('exp/temp_file.png')
def bloch_format(bloch, view=[0, 90]):
bloch.frame_color = 'gray'
bloch.frame_num = 6
bloch.frame_alpha = 0.15
bloch.sphere_alpha = 0.1
bloch.sphere_color = 'whitesmoke'
bloch.view = view
bloch.ylabel = ['','']
bloch.xlabel = ['','']
bloch.zlabel = ['','']
return bloch
def slerp(val, low, high):
omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)), -1, 1))
so = np.sin(omega)
if so == 0.:
return (1.0-val) * low + val * high # L'Hopital's rule/LERP
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega) / so * high
def get_latent_interp(z1, z2, num_steps, linear=False):
zs = []
ratios = np.linspace(0, 1, num_steps)
print(ratios)
for ratio in ratios:
if linear:
v = (1.0 - ratio) * z1 + ratio * z2
else:
v = slerp(ratio, z1, z2)
zs.append(v)
return zs
def normalize(a):
a = a - np.real(a).min()
return a/np.abs(a).max()
def norm(s):
s =np.sum(s**2,-1) **.5
return s
def get_interpolate(model, data, i, j, n_steps=8):
nts = len(data.train_time_steps)
ts = torch.from_numpy(data.train_time_steps).float()
x1 = data.train_expect_data[[i]]
x2 = data.train_expect_data[[j]]
trajs = np.concatenate((x1, x2), axis=0).reshape((2, nts, 3))
trajs = torch.from_numpy(trajs).float()
z0 = model.encode(trajs, ts, reconstruct=True)
z1, z2 = z0[0,:], z0[1,:]
zs = get_latent_interp(z1, z2, n_steps)
return zs
def round_3sf(num_list):
trimmed = []
for num in num_list:
trimmed.append(round(num, 3 - int(math.floor(math.log10(abs(num)))) - 1))
return trimmed
| 2.4375 | 2 |
state.py | Lekensteyn/wgll | 1 | 16203 | # State tracking for WireGuard protocol operations.
# Author: <NAME> <<EMAIL>>
# Licensed under the MIT license <http://opensource.org/licenses/MIT>.
import base64
import hashlib
import inspect
import socket
import traceback
from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt
def calc_mac1(key, data):
mac1_key = hashlib.blake2s(b'mac1----' + key.pub).digest()
return hashlib.blake2s(data, digest_size=16, key=mac1_key).digest()
def is_bytes(value):
# Check for __bytes__ due to PublicKey / PrivateKey.
return type(value) == bytes or hasattr(value, '__bytes__')
def to_bytes(data, length, byteorder='big'):
if not data:
data = 0
if type(data) == int:
if not length:
# Indeterminate length, just expand it.
length = (data.bit_length() + 7) // 8
return data.to_bytes(length, byteorder)
if type(data) == str:
data = base64.b64decode(data)
elif not is_bytes(data):
raise RuntimeError(f'Expected bytes, got: {data!r}')
else:
data = bytes(data)
if length and len(data) != length:
print(f'Warning: want {length}, got length {len(data)}: {data!r}')
traceback.print_stack()
return data
class Storage:
def __init__(self, name, spec, variables):
self.name = name
self.spec = spec
self.instances = []
self.variables = variables
def add(self, *args, **kwargs):
return self.add_object(self.spec(*args, **kwargs))
def add_object(self, obj):
i = len(self.instances)
obj.name = f'{self.name}_{i}'
# De-duplicate
for obj2 in self.instances:
if repr(obj2) == repr(obj):
obj = obj2
break
else:
self.instances.append(obj)
self.variables[obj.name] = obj
print(f'{obj.name} = {obj}')
return obj
def resolve(self, name):
'''Resolves an item name (or the item itself) to a matching item in this
storage.'''
if name == None:
assert self.instances, f'No previous instance found for {self.name}'
return self.instances[-1]
if isinstance(name, self.spec):
name = name.name
assert self.instances, f'No instances found for {name}'
# XXX maybe this could split the name and directly use it as index.
for instance in self.instances[::-1]:
if instance.name == name:
return instance
raise RuntimeError(f'Instance name {name} not found')
class Base:
def __repr__(self):
try:
fields = self.fields
except AttributeError:
fields = list(inspect.signature(self.__init__).parameters)
params = []
for field in fields:
value = getattr(self, field)
# XXX should repr dump the full values or refer to the state name?
if hasattr(value, 'name') and False:
display = getattr(value, 'name')
elif is_bytes(value):
# Cannot just check type(value) because of PublicKey.
value = bytes(value)
if not value.replace(b'\0', b''):
# Simplify display
display = None
elif len(value) > 16:
display = repr(base64.b64encode(value).decode('utf8'))
else:
display = "b'%s'" % ''.join('\\x%02x' % x for x in value)
else:
display = repr(value)
params.append(f'{field}={display}')
params = ', '.join(params)
return f'{self.__class__.__name__}({params})'
class Address(Base):
def __init__(self, host, port):
self.host = host
self.port = int(port)
self.address = (self.host, self.port)
class LocalAddress(Address):
def __init__(self, host, port):
super().__init__(host, port)
self._socket = None
@property
def socket(self):
if not self._socket:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind((self.host, self.port))
print(f'{self.name}: Created socket {self._socket}')
return self._socket
class PublicKey:
def __init__(self, pub):
self.pub = to_bytes(pub, 32, byteorder='little')
def __bytes__(self):
return self.pub
def __repr__(self):
return repr(self.pub)
class PrivateKey:
def __init__(self, priv):
self.priv = to_bytes(priv, 32, byteorder='little')
self.pub = PublicKey(crypto_scalarmult_base(self.priv))
def __bytes__(self):
return self.priv
def __repr__(self):
return repr(self.priv)
class StateI0(Base):
def __init__(self, SpubR, EprivI, SprivI, time, psk):
if not SpubR:
raise RuntimeError('Missing SpubR')
self.SpubR = PublicKey(SpubR)
self.EprivI = PrivateKey(EprivI)
self.SprivI = PrivateKey(SprivI)
self.time = to_bytes(time, 12)
self.psk = to_bytes(psk, 32)
self._compute_hs()
@property
def EpubI(self):
return self.EprivI.pub
@property
def SpubI(self):
return self.SprivI.pub
def _compute_hs(self):
hs = NoiseWG()
# pre-message
hs.mix_hash(self.SpubR)
# message from initiator to responder
hs.mix_hash(self.EpubI)
hs.mix_key(self.EpubI)
hs.mix_dh(self.EprivI, self.SpubR)
self.enc_SpubI = hs.encrypt_and_hash(self.SpubI)
hs.mix_dh(self.SprivI, self.SpubR)
self.enc_time = hs.encrypt_and_hash(self.time)
self.handshake_state = hs
class StateR0(Base):
def __init__(self, EprivR, SprivR, psk):
self.EprivR = PrivateKey(EprivR)
self.SprivR = PrivateKey(SprivR)
self.psk = to_bytes(psk, 32)
def EpubI(self):
return crypto_scalarmult_base(self.EprivR)
class StateI1(Base):
fields = ['Tsend', 'Trecv']
def __init__(self, StateI0, EpubR):
if not StateI0:
raise RuntimeError('Missing handshake initiation state')
if not EpubR:
raise RuntimeError('Missing handshake initiation details')
self._compute_hs(StateI0, EpubR, StateI0.handshake_state.copy())
def _compute_hs(self, StateI0, EpubR, hs):
hs.mix_hash(EpubR)
hs.mix_key(EpubR)
hs.mix_dh(StateI0.EprivI, EpubR)
hs.mix_dh(StateI0.SprivI, EpubR)
hs.mix_key_and_hash(StateI0.psk)
self.enc_empty = hs.encrypt_and_hash(b'')
self.Tsend, self.Trecv = hs.split()
class StateR1(Base):
# SpubI and time are not really needed by the handshake, but perhaps this
# could serve as debugging aid.
fields = ['SpubI', 'time', 'Tsend', 'Trecv']
def __init__(self, StateR0, EpubI, enc_SpubI, enc_time):
if not StateR0:
raise RuntimeError('Missing handshake response state')
if not EpubI or not enc_SpubI or not enc_time:
raise RuntimeError('Missing handshake response details')
self._compute_hs(StateR0, EpubI, enc_SpubI, enc_time)
def _compute_hs(self, StateR0, EpubI, enc_SpubI, enc_time):
hs = NoiseWG()
# pre-message
hs.mix_hash(StateR0.SprivR.pub)
# message from initiator to responder
hs.mix_hash(EpubI)
hs.mix_key(EpubI)
hs.mix_dh(StateR0.SprivR, EpubI)
self.SpubI = PublicKey(hs.decrypt_and_hash(enc_SpubI))
hs.mix_dh(StateR0.SprivR, self.SpubI)
self.time = hs.decrypt_and_hash(enc_time)
# message from responder to initiator
self.EpubR = StateR0.EprivR.pub
hs.mix_hash(self.EpubR)
hs.mix_key(self.EpubR)
hs.mix_dh(StateR0.EprivR, EpubI)
hs.mix_dh(StateR0.EprivR, self.SpubI)
hs.mix_key_and_hash(StateR0.psk)
self.enc_empty = hs.encrypt_and_hash(b'')
self.Trecv, self.Tsend = hs.split()
class Data(Base):
def __init__(self, data):
self.data = to_bytes(data, 0)
class Field:
def __init__(self, name, size, constructor=None, fixed=None):
self.name = name
self.size = size
self.fixed = fixed
if constructor is None:
def constructor(data): return to_bytes(data, size)
self._constructor = constructor
def parse_value(self, value):
return self._constructor(value)
class Message(Base):
def __init__(self, *args, **kwargs):
# Do not expose fixed fields through the constructor.
self.fields = [f.name for f in self.fields_desc if not f.fixed]
for i, value in enumerate(args):
name = self.fields[i]
assert name not in kwargs, f'Duplicate parameter: {name}'
kwargs[name] = value
for f in self.fields_desc:
val = kwargs.pop(f.name, None)
val = f.parse_value(val)
assert not f.size or len(bytes(val)) == f.size, \
f'Expected size {f.size} for {f.name}, got {len(val)}: {val!r}'
setattr(self, f.name, val)
assert not kwargs, f'Unexpected parameters: {kwargs}'
def __bytes__(self):
bs = b''
for f in self.fields_desc:
val = f.fixed
if val is None:
val = bytes(getattr(self, f.name))
assert not f.size or len(val) == f.size, \
f'Expected size {f.size} for {f.name}, got {len(val)}: {val!r}'
bs += val
return bs
@classmethod
def from_bytes(cls, bs):
min_size = sum(f.size for f in cls.fields_desc)
assert len(bs) >= min_size, f'Missing data: {len(bs)} < {min_size}'
fields = {}
for fs in cls.fields_desc:
if not fs.size:
# No explicit size set, consume remaining data
value, bs = bs, None
else:
value, bs = bs[:fs.size], bs[fs.size:]
# Ignore values in fixed fields.
if not fs.fixed:
value = fs.parse_value(value)
fields[fs.name] = value
assert not bs, f'Trailing data: {bs}'
return cls(**fields)
class MsgType1(Message):
fields_desc = (
Field('type', 4, fixed=b'\1\0\0\0'),
Field('sender', 4, lambda x: to_bytes(x, 4, 'little')),
Field('EpubI', 32, PublicKey),
Field('enc_SpubI', 48),
Field('enc_time', 28),
Field('mac1', 16, fixed=b'\0' * 16), # overwritten later
Field('mac2', 16),
)
def __init__(self, *args, SpubR=None, **kwargs):
super().__init__(*args, **kwargs)
self.SpubR = PublicKey(SpubR)
def __bytes__(self):
msg = super().__bytes__()
msg = msg[:-32]
msg += calc_mac1(self.SpubR, msg)
msg += self.mac2
return msg
class MsgType2(Message):
fields_desc = (
Field('type', 4, fixed=b'\2\0\0\0'),
Field('sender', 4, lambda x: to_bytes(x, 4, 'little')),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('EpubR', 32, PublicKey),
Field('enc_empty', 16),
Field('mac1', 16, fixed=b'\0' * 16), # overwritten later
Field('mac2', 16),
)
def __init__(self, *args, SpubI=None, **kwargs):
super().__init__(*args, **kwargs)
self.SpubI = PublicKey(SpubI)
def __bytes__(self):
msg = super().__bytes__()
msg = msg[:-32]
msg += calc_mac1(self.SpubI, msg)
msg += self.mac2
return msg
class MsgType3(Message):
fields_desc = (
Field('type', 4, fixed=b'\3\0\0\0'),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('nonce', 24),
Field('enc_cookie', 32),
)
class MsgType4(Message):
fields_desc = (
Field('type', 4, fixed=b'\4\0\0\0'),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('counter', 8, lambda x: to_bytes(x, 8, 'little')),
Field('enc_payload', 0),
)
class State:
def __init__(self):
variables = {}
self.addrL = Storage('addrL', LocalAddress, variables)
self.addrR = Storage('addrR', Address, variables)
self.StateI0 = Storage('StateI0', StateI0, variables)
self.StateI1 = Storage('StateI1', StateI1, variables)
self.StateR0 = Storage('StateR0', StateR0, variables)
self.StateR1 = Storage('StateR1', StateR1, variables)
self.MsgType1 = Storage('MsgType1', MsgType1, variables)
self.MsgType2 = Storage('MsgType2', MsgType2, variables)
self.MsgType3 = Storage('MsgType3', MsgType3, variables)
self.MsgType4 = Storage('MsgType4', MsgType4, variables)
self.Data = Storage('Data', Data, variables)
self.variables = {}
def _wait_for_message(self, what, addrL):
addrL = self.addrL.resolve(addrL)
msg_class = what.spec
print(f'Wait for {msg_class.__name__} on {addrL}')
# XXX increase this for testing data messages with higher MTU?
data, address = addrL.socket.recvfrom(4096)
addrR = self.addrR.add(*address)
msg = msg_class.from_bytes(data)
what.add_object(msg)
return msg, addrR
def _send_message(self, what, msg, addrR, addrL):
msg = what.resolve(msg)
addrR = self.addrR.resolve(addrR)
addrL = self.addrL.resolve(addrL)
addrL.socket.sendto(bytes(msg), addrR.address)
def set_local(self, host, port):
return self.addrL.add(host, port)
def set_remote(self, host, port):
return self.addrR.add(host, port)
def noise_init(self, SpubR=None, EprivI=None, SprivI=None, time=None, psk=None):
return self.StateI0.add(SpubR, EprivI, SprivI, time, psk)
def noise_resp(self, EprivR=None, SprivR=None, psk=None):
return self.StateR0.add(EprivR, SprivR, psk)
def make_init(self, sender=None, StateI0=None):
sender = to_bytes(sender, 4, 'little')
StateI0 = self.StateI0.resolve(StateI0)
return self.MsgType1.add(sender, StateI0.EpubI.pub, StateI0.enc_SpubI,
StateI0.enc_time, SpubR=StateI0.SpubR.pub)
def send_init(self, MsgType1=None, addrR=None, addrL=None):
self._send_message(self.MsgType1, MsgType1, addrR, addrL)
def wait_for_init(self, addrL=None):
return self._wait_for_message(self.MsgType1, addrL)
def process_init(self, MsgType1=None, StateR0=None):
MsgType1 = self.MsgType1.resolve(MsgType1)
StateR0 = self.StateR0.resolve(StateR0)
return self.StateR1.add(StateR0, MsgType1.EpubI, MsgType1.enc_SpubI,
MsgType1.enc_time)
def make_resp(self, MsgType1=None, sender=None, StateR1=None):
MsgType1 = self.MsgType1.resolve(MsgType1)
receiver = MsgType1.sender
sender = to_bytes(sender, 4, 'little')
StateR1 = self.StateR1.resolve(StateR1)
return self.MsgType2.add(sender, receiver, StateR1.EpubR.pub,
StateR1.enc_empty,
SpubI=StateR1.SpubI.pub)
def send_resp(self, MsgType2=None, addrR=None, addrL=None):
self._send_message(self.MsgType2, MsgType2, addrR, addrL)
def wait_for_resp(self, addrL=None):
return self._wait_for_message(self.MsgType2, addrL)
def process_resp(self, MsgType2=None, StateI0=None):
MsgType2 = self.MsgType2.resolve(MsgType2)
StateI0 = self.StateI0.resolve(StateI0)
return self.StateI1.add(StateI0, MsgType2.EpubR)
def _make_data(self, receiver=None, counter=None, Tsend=None, data=None):
receiver = to_bytes(receiver, 4, 'little')
counter = to_bytes(counter, 8, 'little')
assert len(Tsend) == 32
data = data or b''
nonce = int.from_bytes(counter, 'little')
enc_data = aead_encrypt(Tsend, nonce, data, b'')
return self.MsgType4.add(receiver, counter, enc_data)
def make_data_as_init(self, receiver=None, counter=None, TsendI=None, data=None):
StateI1 = self.StateI1.resolve(TsendI)
return self._make_data(receiver, counter, StateI1.Tsend, data)
def make_data_as_resp(self, receiver=None, counter=None, TsendR=None, data=None):
StateR1 = self.StateR1.resolve(TsendR)
return self._make_data(receiver, counter, StateR1.Tsend, data)
def send_data(self, MsgType4=None, addrR=None, addrL=None):
self._send_message(self.MsgType4, MsgType4, addrR, addrL)
def wait_for_data(self, addrL=None):
return self._wait_for_message(self.MsgType4, addrL)
def _process_data(self, MsgType4=None, Trecv=None):
assert len(Trecv) == 32
MsgType4 = self.MsgType4.resolve(MsgType4)
nonce = int.from_bytes(MsgType4.counter, 'little')
data = aead_decrypt(Trecv, nonce, MsgType4.enc_payload, b'')
return self.Data.add(data)
def process_data_as_init(self, MsgType4=None, TrecvI=None):
StateI1 = self.StateI1.resolve(TrecvI)
return self._process_data(MsgType4, StateI1.Trecv)
def process_data_as_resp(self, MsgType4=None, TrecvR=None):
StateR1 = self.StateR1.resolve(TrecvR)
return self._process_data(MsgType4, StateR1.Trecv)
| 2.1875 | 2 |
BAMF_Detect/modules/dendroid.py | bwall/bamfdetect | 152 | 16204 | from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata
from base64 import b64decode
from string import printable
class dendroid(AndroidParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="dendroid",
bot_name="Dendroid",
description="Android RAT",
authors=["<NAME> (@botnet_hunter)"],
version="1.0.0",
date="August 18, 2014",
references=[]
)
AndroidParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("dendroid.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
uri = None
password = None
for s in data_strings(file_data, charset="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/="):
try:
line = b64decode(s)
if len(line) == 0:
continue
valid = True
for c in line:
if c not in printable:
valid = False
if not valid:
continue
if line.lower().startswith("https://") or line.lower().startswith("http://"):
uri = line
continue
if uri is not None:
password = line
break
except TypeError:
continue
if uri is not None:
results["c2_uri"] = uri
if password is not None:
try:
password.decode("utf8")
results["password"] = password
except UnicodeDecodeError:
results["password"] = "h" + password.encode("hex")
return results
Modules.list.append(dendroid()) | 2.59375 | 3 |
corehq/ex-submodules/couchforms/tests/test_analytics.py | caktus/commcare-hq | 0 | 16205 | <reponame>caktus/commcare-hq
import datetime
import uuid
from django.test import TestCase
from mock import patch
from requests import ConnectionError
from couchforms.analytics import (
app_has_been_submitted_to_in_last_30_days,
domain_has_submission_in_last_30_days,
get_all_xmlns_app_id_pairs_submitted_to_in_domain,
get_exports_by_form,
get_first_form_submission_received,
get_form_analytics_metadata,
get_last_form_submission_received,
get_number_of_forms_in_domain,
update_analytics_indexes,
)
from couchforms.models import XFormInstance, XFormError
from pillowtop.es_utils import initialize_index_and_mapping
from testapps.test_pillowtop.utils import process_pillow_changes
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import get_es_new, send_to_elasticsearch
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.tests.utils import FormProcessorTestUtils
from corehq.form_processor.utils import TestFormMetadata
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX_INFO
from corehq.util.elastic import ensure_index_deleted
from corehq.util.test_utils import (
DocTestMixin,
disable_quickcache,
get_form_ready_to_save,
trap_extra_setup,
)
@es_test
@disable_quickcache
class ExportsFormsAnalyticsTest(TestCase, DocTestMixin):
maxDiff = None
@classmethod
def setUpClass(cls):
super(ExportsFormsAnalyticsTest, cls).setUpClass()
from casexml.apps.case.tests.util import delete_all_xforms
from corehq.apps.app_manager.models import Application, Module, Form
delete_all_xforms()
with trap_extra_setup(ConnectionError, msg="cannot connect to elasicsearch"):
cls.es = get_es_new()
initialize_index_and_mapping(cls.es, XFORM_INDEX_INFO)
cls.domain = 'exports_forms_analytics_domain'
cls.app_id_1 = 'a' + uuid.uuid4().hex
cls.app_id_2 = 'b' + uuid.uuid4().hex
cls.xmlns_1 = 'my://crazy.xmlns/'
cls.xmlns_2 = 'my://crazy.xmlns/app'
cls.apps = [
Application(_id=cls.app_id_2, domain=cls.domain,
modules=[Module(forms=[Form(xmlns=cls.xmlns_2)])])
]
for app in cls.apps:
app.save()
cls.forms = [
XFormInstance(domain=cls.domain,
app_id=cls.app_id_1, xmlns=cls.xmlns_1),
XFormInstance(domain=cls.domain,
app_id=cls.app_id_1, xmlns=cls.xmlns_1),
XFormInstance(domain=cls.domain,
app_id=cls.app_id_2, xmlns=cls.xmlns_2),
]
cls.error_forms = [XFormError(domain=cls.domain)]
cls.all_forms = cls.forms + cls.error_forms
for form in cls.all_forms:
form.save()
send_to_elasticsearch('forms', form.to_json())
cls.es.indices.refresh(XFORM_INDEX_INFO.index)
update_analytics_indexes()
@classmethod
def tearDownClass(cls):
for form in cls.all_forms:
form.delete()
for app in cls.apps:
app.delete()
ensure_index_deleted(XFORM_INDEX_INFO.index)
super(ExportsFormsAnalyticsTest, cls).tearDownClass()
def test_get_form_analytics_metadata__no_match(self):
self.assertIsNone(
get_form_analytics_metadata(self.domain, self.app_id_1, self.xmlns_2))
def test_get_form_analytics_metadata__no_app(self):
self.assertEqual(
get_form_analytics_metadata(self.domain, self.app_id_1, self.xmlns_1),
{'submissions': 2, 'xmlns': 'my://crazy.xmlns/'}
)
def test_get_form_analytics_metadata__app(self):
self.assertEqual(get_form_analytics_metadata(self.domain, self.app_id_2, self.xmlns_2), {
'app': {'id': self.app_id_2, 'langs': [], 'name': None},
'app_deleted': False,
'form': {'id': 0, 'name': {}},
'module': {'id': 0, 'name': {}},
'submissions': 1,
'xmlns': 'my://crazy.xmlns/app'
})
def test_get_exports_by_form(self):
self.assertEqual(get_exports_by_form(self.domain), [{
'value': {'xmlns': 'my://crazy.xmlns/', 'submissions': 2},
'key': ['exports_forms_analytics_domain', self.app_id_1,
'my://crazy.xmlns/']
}, {
'value': {
'xmlns': 'my://crazy.xmlns/app',
'form': {'name': {}, 'id': 0},
'app': {'langs': [], 'name': None, 'id': self.app_id_2},
'module': {'name': {}, 'id': 0},
'app_deleted': False, 'submissions': 1},
'key': ['exports_forms_analytics_domain', self.app_id_2,
'my://crazy.xmlns/app']
}])
TEST_ES_META = {
XFORM_INDEX_INFO.index: XFORM_INDEX_INFO
}
@disable_quickcache
class CouchformsESAnalyticsTest(TestCase):
domain = 'hqadmin-es-accessor'
@classmethod
def setUpClass(cls):
super(CouchformsESAnalyticsTest, cls).setUpClass()
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def create_form_and_sync_to_es(received_on):
with process_pillow_changes('xform-pillow', {'skip_ucr': True}):
with process_pillow_changes('DefaultChangeFeedPillow'):
metadata = TestFormMetadata(domain=cls.domain, app_id=cls.app_id,
xmlns=cls.xmlns, received_on=received_on)
form = get_form_ready_to_save(metadata, is_db_test=True)
form_processor = FormProcessorInterface(domain=cls.domain)
form_processor.save_processed_models([form])
return form
from casexml.apps.case.tests.util import delete_all_xforms
delete_all_xforms()
cls.now = datetime.datetime.utcnow()
cls._60_days = datetime.timedelta(days=60)
cls.domain = 'my_crazy_analytics_domain'
cls.app_id = uuid.uuid4().hex
cls.xmlns = 'my://crazy.xmlns/'
with trap_extra_setup(ConnectionError):
cls.elasticsearch = get_es_new()
initialize_index_and_mapping(cls.elasticsearch, XFORM_INDEX_INFO)
cls.forms = [create_form_and_sync_to_es(cls.now), create_form_and_sync_to_es(cls.now - cls._60_days)]
cls.elasticsearch.indices.refresh(XFORM_INDEX_INFO.index)
@classmethod
def tearDownClass(cls):
ensure_index_deleted(XFORM_INDEX_INFO.index)
FormProcessorTestUtils.delete_all_cases_forms_ledgers(cls.domain)
super(CouchformsESAnalyticsTest, cls).tearDownClass()
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_get_number_of_cases_in_domain(self):
self.assertEqual(
get_number_of_forms_in_domain(self.domain),
len(self.forms)
)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_domain_has_submission_in_last_30_days(self):
self.assertEqual(
domain_has_submission_in_last_30_days(self.domain), True)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_get_first_form_submission_received(self):
self.assertEqual(
get_first_form_submission_received(self.domain),
self.now - self._60_days)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_get_last_form_submission_received(self):
self.assertEqual(
get_last_form_submission_received(self.domain), self.now)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_app_has_been_submitted_to_in_last_30_days(self):
self.assertEqual(
app_has_been_submitted_to_in_last_30_days(self.domain, self.app_id),
True)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_get_all_xmlns_app_id_pairs_submitted_to_in_domain(self):
self.assertEqual(
get_all_xmlns_app_id_pairs_submitted_to_in_domain(self.domain),
{(self.xmlns, self.app_id)})
| 1.554688 | 2 |
src/pythae/models/svae/svae_config.py | eknag/benchmark_VAE | 1 | 16206 | <gh_stars>1-10
from pydantic.dataclasses import dataclass
from ...models import VAEConfig
@dataclass
class SVAEConfig(VAEConfig):
r"""
:math:`\mathcal{S}`-VAE model config config class
Parameters:
input_dim (int): The input_data dimension
latent_dim (int): The latent space dimension in which lives the hypersphere. Default: None.
reconstruction_loss (str): The reconstruction loss to use ['bce', 'mse']. Default: 'mse'
"""
pass
| 2.3125 | 2 |
amck/imat/download_data.py | aaronmckinstry706/imaterialist | 0 | 16207 | <reponame>aaronmckinstry706/imaterialist
# Parts of code taken from https://www.kaggle.com/aloisiodn/python-3-download-multi-proc-prog-bar-resume by Dourado.
# Improvements on the original script:
# * you can choose which dataset to download;
# * uses threads instead of processes;
# * unpacks data into .../label/id.jpg directory structure, which can be used easily via classes in PyTorch;
# * performance-relevant parameters are command line arguments.
# For performance parameters, the recommended values (from my machine; probably requires tweaking for others) are 100
# connection pools, 128 threads. Not all images with working URLs will be retrieved, but about 90-95% of them will. As
# a consequence, to ensure that nearly all images have been downloaded, repeat the script 3-4 times.
import argparse
import io
import json
import logging
import multiprocessing.pool as pool
import pathlib
import random
import sys
import typing
import urllib3
import PIL.Image as Image
from tqdm import tqdm
# Get command line arguments.
arg_parser = argparse.ArgumentParser(
description='Downloads the data files using the links given in the JSON training, validation, and test files. '
'Assumes that the files are stored in the directory data/metadata (relative to the current working '
'directory). Training files will be written to data/training/label_id/image_id.jpg, validation files '
'will be written to data/validation/label_id/image_id.jpg, and test files will be written to '
'data/testing/image_id.jpg.')
arg_parser.add_argument(
'--num-pools', '-p', type=int, default=10, help='Number of connection pools to cache at one time.')
arg_parser.add_argument(
'--num-workers', '-w', type=int, default=8, help='Number of threads to perform downloads.')
arg_parser.add_argument(
'--verbose', '-v', action='count', help='Print additional output messages. Can be passed multiple times. Once '
'prints additional status information, and two or more times prints '
'debugging information.', default=0)
arg_parser.add_argument(
'--limit', '-l', type=int, default=sys.maxsize, help='Maximum number of files to download before stopping.')
arg_parser.add_argument(
'--re-download', action='store_true', default=False, help='Whether to re-download existing files.')
arg_parser.add_argument(
'--dataset', '-d', type=str, choices={'training', 'validation', 'testing'}, help='Which dataset to download.')
parsed_args = arg_parser.parse_args()
# Set up logging.
urllib3.disable_warnings()
LOGGER = logging.getLogger(__name__)
STDOUT_HANDLER = logging.StreamHandler(sys.stdout)
if parsed_args.verbose == 1:
STDOUT_HANDLER.setLevel(logging.INFO)
elif parsed_args.verbose >= 2:
STDOUT_HANDLER.setLevel(logging.DEBUG)
LOGGER.addHandler(STDOUT_HANDLER)
LOGGER.setLevel(logging.DEBUG)
# Initialize globals.
failed_downloads = []
http = urllib3.PoolManager(num_pools=parsed_args.num_pools)
def download_image(url: str, filepath: pathlib.Path):
global parsed_args
global http
file_exists = filepath.exists()
if parsed_args.re_download and file_exists:
filepath.unlink()
elif not parsed_args.re_download and file_exists:
return
response = http.request('GET', url, timeout=urllib3.Timeout(10))
image_data = response.data
pil_image = Image.open(io.BytesIO(image_data))
pil_image_rgb = pil_image.convert('RGB')
pil_image_rgb.save(str(filepath), format='JPEG', quality=90)
def download_labeled_image(info: typing.Tuple[str, int, int, pathlib.Path]):
global failed_downloads
url: str = info[0]
image_id: int = info[1]
label_id: int = info[2]
base_dir: pathlib.Path = info[3]
label_dir = base_dir.joinpath(str(label_id))
filepath = label_dir.joinpath(str(image_id) + '.jpg')
label_dir.mkdir(parents=True, exist_ok=True)
try:
download_image(url, filepath)
except Exception as e:
failed_downloads.append((image_id, str(e)))
def download_unlabeled_image(info: typing.Tuple[str, int, pathlib.Path]):
global failed_downloads
url: str = info[0]
image_id: int = info[1]
base_dir: pathlib.Path = info[2]
label_dir = base_dir.joinpath('dummy-class')
filepath = label_dir.joinpath(str(image_id) + '.jpg')
label_dir.mkdir(parents=True, exist_ok=True)
try:
download_image(url, filepath)
except Exception as e:
failed_downloads.append((image_id, str(e)))
training_base_dir = pathlib.Path('data/training')
validation_base_dir = pathlib.Path('data/validation')
testing_base_dir = pathlib.Path('data/testing')
metadata_base_dir = pathlib.Path('data/metadata')
with metadata_base_dir.joinpath('train.json').open('r') as training_urls_file:
training_urls_json = json.load(training_urls_file)
with metadata_base_dir.joinpath('validation.json').open('r') as validation_urls_file:
validation_urls_json = json.load(validation_urls_file)
with metadata_base_dir.joinpath('test.json').open('r') as testing_urls_file:
testing_urls_json = json.load(testing_urls_file)
num_training_images = len(training_urls_json['images'])
num_validation_images = len(validation_urls_json['images'])
num_testing_images = len(testing_urls_json['images'])
LOGGER.info('{} training images, {} validation images, and {} testing images.'.format(
num_training_images, num_validation_images, num_testing_images))
thread_pool = pool.ThreadPool(processes=parsed_args.num_workers)
if parsed_args.dataset == 'training':
training_image_info = []
for image_info, annotation_info in zip(training_urls_json['images'], training_urls_json['annotations']):
training_image_info.append((image_info['url'][0], image_info['image_id'], annotation_info['label_id'],
training_base_dir))
random.shuffle(training_image_info)
with tqdm(total=len(training_image_info), desc='Training images') as t:
for i, _ in enumerate(thread_pool.imap_unordered(download_labeled_image, training_image_info)):
t.update(1)
if i >= parsed_args.limit:
break
elif parsed_args.dataset == 'validation':
validation_image_info = []
for image_info, annotation_info in zip(validation_urls_json['images'], validation_urls_json['annotations']):
validation_image_info.append((image_info['url'][0], image_info['image_id'], annotation_info['label_id'],
validation_base_dir))
random.shuffle(validation_image_info)
with tqdm(total=len(validation_image_info), desc='Validation images') as t:
for i, _ in enumerate(thread_pool.imap_unordered(download_labeled_image, validation_image_info)):
t.update(1)
if i >= parsed_args.limit:
break
elif parsed_args.dataset == 'testing':
testing_image_info = []
for image_info in testing_urls_json['images']:
testing_image_info.append((image_info['url'][0], image_info['image_id'], testing_base_dir))
random.shuffle(testing_image_info)
with tqdm(total=len(testing_image_info), desc='Testing images') as t:
for i, _ in enumerate(thread_pool.imap_unordered(download_unlabeled_image, testing_image_info)):
t.update(1)
if i >= parsed_args.limit:
break
LOGGER.info('{} images could not be retrieved.'.format(len(failed_downloads)))
| 2.5625 | 3 |
JTP Recap./2.Program_IO/function.py | SNP0301/Study_Python | 0 | 16208 | """
Function
def function_name(arg1, arg2, ...) :
<op 1>
<op 2>
...
Function with undefined amount of input
def fn_name(*args) --> args' elements make tuple.
kwargs = Keyword Parameter
>>> def print_kwargs(**kwargs):
... print(kwargs)
...
>>> print_kwargs(a=1)
{'a':1}
>>> print_kwargs(name='foo', age=3)
{'age':3, 'name':'foo'}
**args_name = make args_name as a dictionary
clearing & assignment : element should be added in the last part of args
Lambda : another method to make fn
lambda arg1, arg2, .. : operation_of_fn
>>> add = lambda a,b : a+b
>>> result = add(3,4)
>>> print(result)
7
lambda can return result with out expression 'return'
Contents Source : https://wikidocs.net/24
""" | 4.5625 | 5 |
tweet/common.py | skiwheelr/URS | 4 | 16209 | all
import tweepy, config, users, re, groupy
from tweepy import OAuthHandler
from tweepy import API
print(tweepy.__version__)
auth = OAuthHandler(config.consumer_key, config.consumer_secret)
auth.set_access_token(config.access_token,config.access_token_secret)
api = tweepy.API(auth)
from groupy.client import Client
client = Client.from_token(config.groupme_token)
def messenger(tickr):
for group in client.groups.list():
if group.name=="COMMonMENTions":
# print(group.name)
# msg ="Mentioned by pharmdca and mrzackmorris: "+ str(tickr)
message = group.post(text="(<50 Tweets) Mentioned by @ripster47, @pharmdca and @mrzackmorris: "+ str(tickr))
exp = r'\$([A-Z]{3,4})'
one = []
two = []
three = []
all = []
#mrzackmorris
for user in users.list[:1]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in one:
one.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", one)
#pharmdca
for user in users.list[1:2]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in two:
two.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", two)
#ripster47
for user in users.list[2:3]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in three:
three.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", three)
a_set = set(one)
b_set = set(two)
c_set = set(three)
if (a_set & b_set & c_set):
all.append(a_set & b_set & c_set)
print("All 3 mentioned ", all)
messenger(all)
else: print("Nothing Notable")
| 2.578125 | 3 |
crystalpy/examples/PlotData1D.py | oasys-kit/crystalpy | 0 | 16210 | """
---OK---
"""
from collections import OrderedDict
import copy
import numpy as np
from crystalpy.examples.Values import Interval
class PlotData1D(object):
"""
Represents a 1D plot. The graph data together with related information.
"""
def __init__(self, title, title_x_axis, title_y_axis):
"""
Constructor.
:param title: Plot title.
:param title_x_axis: X axis' title.
:param title_y_axis: Y axis' title.
"""
# Set titles.
self.title = title
self.title_x_axis = title_x_axis
self.title_y_axis = title_y_axis
# Initialize X and Y ranges.
self.x_min = None
self.x_max = None
self.y_min = None
self.y_max = None
# Initialize X and Y data.
self.x = None
self.y = None
# Initialize plot information to empty ordered dictionary.
self._plot_info = OrderedDict()
def set_x_min(self, x_min):
"""
Sets x range minimum.
:param x_min: X range minimum.
"""
self.x_min = x_min
def set_x_max(self, x_max):
"""
Sets X range maximum.
:param x_max: X range maximum.
"""
self.x_max = x_max
def set_y_min(self, y_min):
"""
Sets Y range minimum.
:param y_min: Y range minimum.
"""
self.y_min = y_min
def set_y_max(self, y_max):
"""
Sets Y range maximum.
:param y_max: Y range maximum.
"""
self.y_max = y_max
def set_x(self, x):
"""
Sets X data.
:param x: x data.
"""
self.x = x
def set_y(self, y):
"""
Sets Y data.
:param y: y data.
"""
self.y = y
def _set_interval_to_zero(self, indices, lower=True, upper=True):
"""
Sets the y's to zero in certain intervals of x's (extrema included).
:param indices: pair with the two extrema of the x interval.
:param lower: if True include the lower end of the interval.
:param upper: if True include the upper end of the interval.
"""
try:
inf_index = indices.inf
sup_index = indices.sup
# adjust the indices according to the lower and upper parameters.
if not lower:
inf_index += 1
if not upper:
sup_index -= 1
# in the index range defined by inf_index and sup_index, set the y's to zero.
for i in range(inf_index, sup_index + 1):
self.y[i] = 0
except TypeError:
print("\nERROR: could not set the values to zero in the specified intervals.\n")
def _unwrap_interval(self, indices, deg, lower=True, upper=True):
"""
Unwraps the y data vector in a certain interval.
:param indices: indices determining the interval to unwrap.
:param deg: True if values are in degrees. False if radians.
:param lower: if True include the lower end of the interval.
:param upper: if True include the upper end of the interval.
"""
inf_index = indices.inf
sup_index = indices.sup
# adjust the indices according to the lower and upper parameters.
if not lower:
inf_index += 1
if not upper:
sup_index -= 1
# numpy.unwrap works on data in radians, so if the data is in degrees, it needs to be converted.
if deg:
self.y = np.deg2rad(self.y)
# cut out the part to unwrap and then stitch it back on.
temp = self.y[inf_index:sup_index + 1]
self.y[inf_index:sup_index + 1] = np.unwrap(temp)
# convert back to degrees.
self.y = np.rad2deg(self.y)
return
# cut out the part to unwrap and then stitch it back on.
temp = self.y[inf_index:sup_index + 1]
self.y[inf_index:sup_index + 1] = np.unwrap(temp)
def _optimize_interval(self, indices, phase_limits):
"""
Takes an interval and restricts it so that the extrema match the points where the phase
becomes bigger(smaller) than some upper(lower) limit.
:param indices: indices corresponding to the interval to be optimized.
:param phase_limits: the limits of the phase to be used for the optimization, [min, max].
:return: indices of the optimized interval.
"""
inf = indices.inf
sup = indices.sup
# check the intervals.
if (self.y[inf] > phase_limits[1] or
self.y[inf] < phase_limits[0]):
print("\nERROR in PlotData1D._optimize_interval: First value in the interval exceeds limitations.")
return indices
if (self.y[sup] > phase_limits[1] or
self.y[sup] < phase_limits[0]):
print("\nERROR in PlotData1D._optimize_interval: Last value in the interval exceeds limitations.")
return indices
# starting from the lower end.
i = inf # counter initialization.
while phase_limits[0] < self.y[i] < phase_limits[1]:
i += 1
# if the conditions are not satisfied for index i:
new_inf = i - 1
# starting from the upper end.
i = sup # counter initialization.
while phase_limits[0] < self.y[i] < phase_limits[1]:
i -= 1
# if the conditions are not satisfied for index i:
new_sup = i + 1
new_indices = Interval(new_inf, new_sup)
# check that the inf is smaller than (or equal to) the sup.
if not new_indices.check_extrema():
print("\nERROR in PlotData1D._optimize_interval: The phase might be undersampled.")
return indices
return new_indices
def smart_unwrap(self, intervals, intervals_number, phase_limits, deg):
"""
Unwraps data correctly by avoiding discontinuities.
:param intervals: list of pairs. Each element is a pair with the two extrema of the x interval.
:param phase_limits: min and max tolerable values for the phase plot, [min, max].
:param intervals_number: number of intervals to set to zero.
:param deg: True if values are in degrees. False if radians.
"""
if intervals_number == 0:
if deg:
self.y = np.deg2rad(self.y) # unwrap works with radians.
self.y = np.unwrap(self.y)
self.y = np.rad2deg(self.y) # convert back to degrees.
return
self.y = np.unwrap(self.y)
return
# transform self.x into a numpy.ndarray object.
x = np.asarray(self.x)
# careful! only works with monotonic sequences.
temp_index = x.argmin()
for interval in intervals:
inf = interval.inf
sup = interval.sup
# find the indices of the y array corresponding to inf and sup.
inf_index = abs(x - inf).argmin()
sup_index = abs(x - sup).argmin()
# optimize the interval.
indices = Interval(inf_index, sup_index)
new_indices = self._optimize_interval(indices, phase_limits)
# unwrap the data before the interval.
indices_to_unwrap = Interval(temp_index, new_indices.inf)
self._unwrap_interval(indices_to_unwrap, deg, lower=True, upper=False)
# set the interval to zero.
indices_to_set = new_indices
self._set_interval_to_zero(indices_to_set, lower=True, upper=False)
temp_index = new_indices.sup
# careful! only works with monotonic sequences.
indices_to_unwrap = Interval(temp_index, x.argmax())
self._unwrap_interval(indices_to_unwrap, deg, lower=True, upper=True)
def add_xy_point(self, x_point, y_point):
"""
Adds an x-y point.
:param x_point: x coordinate.
:param y_point: y coordinate.
"""
self.x.append(x_point)
self.y.append(y_point)
def add_plot_info(self, name, info):
"""
Adds a plot info.
:param name: Name of the info.
:param info: The info.
"""
self._plot_info[name] = info
def plot_info(self):
"""
Returns the plot info copy.
:return: The plot info.
"""
return copy.deepcopy(self._plot_info)
| 2.921875 | 3 |
src/deep_dialog/usersims/__init__.py | Yuqing2018/tcbot_python3 | 0 | 16211 | from .usersim_rule import *
from .realUser import * | 1.054688 | 1 |
hs_file_types/models/geofeature.py | tommac7/hydroshare | 0 | 16212 | <filename>hs_file_types/models/geofeature.py<gh_stars>0
import os
import logging
import shutil
import zipfile
import xmltodict
from lxml import etree
from osgeo import ogr, osr
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.utils.html import strip_tags
from django.template import Template, Context
from dominate.tags import legend, table, tbody, tr, th, div
from hs_core.models import Title, CoreMetaData
from hs_core.hydroshare import utils
from hs_core.forms import CoverageTemporalForm
from hs_core.signals import post_add_geofeature_aggregation
from hs_geographic_feature_resource.models import GeographicFeatureMetaDataMixin, \
OriginalCoverage, GeometryInformation, FieldInformation
from base import AbstractFileMetaData, AbstractLogicalFile, FileTypeContext
UNKNOWN_STR = "unknown"
class GeoFeatureFileMetaData(GeographicFeatureMetaDataMixin, AbstractFileMetaData):
# the metadata element models are from the geographic feature resource type app
model_app_label = 'hs_geographic_feature_resource'
def get_metadata_elements(self):
elements = super(GeoFeatureFileMetaData, self).get_metadata_elements()
elements += [self.originalcoverage, self.geometryinformation]
elements += list(self.fieldinformations.all())
return elements
@classmethod
def get_metadata_model_classes(cls):
metadata_model_classes = super(GeoFeatureFileMetaData, cls).get_metadata_model_classes()
metadata_model_classes['originalcoverage'] = OriginalCoverage
metadata_model_classes['geometryinformation'] = GeometryInformation
metadata_model_classes['fieldinformation'] = FieldInformation
return metadata_model_classes
def get_html(self):
"""overrides the base class function"""
html_string = super(GeoFeatureFileMetaData, self).get_html()
html_string += self.geometryinformation.get_html()
if self.spatial_coverage:
html_string += self.spatial_coverage.get_html()
if self.originalcoverage:
html_string += self.originalcoverage.get_html()
if self.temporal_coverage:
html_string += self.temporal_coverage.get_html()
html_string += self._get_field_informations_html()
template = Template(html_string)
context = Context({})
return template.render(context)
def _get_field_informations_html(self):
root_div = div(cls="content-block")
with root_div:
legend('Field Information')
with table(style="width: 100%;"):
with tbody():
with tr(cls='row'):
th('Name')
th('Type')
th('Width')
th('Precision')
for field_info in self.fieldinformations.all():
field_info.get_html(pretty=False)
return root_div.render()
def get_html_forms(self, datatset_name_form=True):
"""overrides the base class function to generate html needed for metadata editing"""
root_div = div("{% load crispy_forms_tags %}")
with root_div:
super(GeoFeatureFileMetaData, self).get_html_forms()
with div(cls="content-block"):
div("{% crispy geometry_information_form %}")
with div(cls="content-block"):
div("{% crispy spatial_coverage_form %}")
with div(cls="content-block"):
div("{% crispy original_coverage_form %}")
template = Template(root_div.render())
context_dict = dict()
context_dict["geometry_information_form"] = self.get_geometry_information_form()
update_action = "/hsapi/_internal/GeoFeatureLogicalFile/{0}/{1}/{2}/update-file-metadata/"
create_action = "/hsapi/_internal/GeoFeatureLogicalFile/{0}/{1}/add-file-metadata/"
temp_cov_form = self.get_temporal_coverage_form()
if self.temporal_coverage:
form_action = update_action.format(self.logical_file.id, "coverage",
self.temporal_coverage.id)
temp_cov_form.action = form_action
else:
form_action = create_action.format(self.logical_file.id, "coverage")
temp_cov_form.action = form_action
context_dict["temp_form"] = temp_cov_form
context_dict['original_coverage_form'] = self.get_original_coverage_form()
context_dict['spatial_coverage_form'] = self.get_spatial_coverage_form()
context = Context(context_dict)
rendered_html = template.render(context)
rendered_html += self._get_field_informations_html()
return rendered_html
def get_geometry_information_form(self):
return GeometryInformation.get_html_form(resource=None, element=self.geometryinformation,
file_type=True, allow_edit=False)
def get_original_coverage_form(self):
return OriginalCoverage.get_html_form(resource=None, element=self.originalcoverage,
file_type=True, allow_edit=False)
@classmethod
def validate_element_data(cls, request, element_name):
"""overriding the base class method"""
# the only metadata that we are allowing for editing is the temporal coverage
element_name = element_name.lower()
if element_name != 'coverage' or 'start' not in request.POST:
err_msg = 'Data for temporal coverage is missing'
return {'is_valid': False, 'element_data_dict': None, "errors": err_msg}
element_form = CoverageTemporalForm(data=request.POST)
if element_form.is_valid():
return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}
else:
return {'is_valid': False, 'element_data_dict': None, "errors": element_form.errors}
def get_xml(self, pretty_print=True):
"""Generates ORI+RDF xml for this aggregation metadata"""
# get the xml root element and the xml element to which contains all other elements
RDF_ROOT, container_to_add_to = super(GeoFeatureFileMetaData, self)._get_xml_containers()
if self.geometryinformation:
self.geometryinformation.add_to_xml_container(container_to_add_to)
for fieldinfo in self.fieldinformations.all():
fieldinfo.add_to_xml_container(container_to_add_to)
if self.originalcoverage:
self.originalcoverage.add_to_xml_container(container_to_add_to)
return CoreMetaData.XML_HEADER + '\n' + etree.tostring(RDF_ROOT, encoding='UTF-8',
pretty_print=pretty_print)
class GeoFeatureLogicalFile(AbstractLogicalFile):
metadata = models.OneToOneField(GeoFeatureFileMetaData, related_name="logical_file")
data_type = "GeographicFeature"
@classmethod
def get_allowed_uploaded_file_types(cls):
"""only .zip or .shp file can be set to this logical file group"""
# See Shapefile format:
# http://resources.arcgis.com/en/help/main/10.2/index.html#//005600000003000000
return (".zip", ".shp", ".shx", ".dbf", ".prj",
".sbx", ".sbn", ".cpg", ".xml", ".fbn",
".fbx", ".ain", ".aih", ".atx", ".ixs",
".mxs")
@classmethod
def get_main_file_type(cls):
"""The main file type for this aggregation"""
return ".shp"
@classmethod
def get_allowed_storage_file_types(cls):
"""file types allowed in this logical file group are the followings"""
return [".shp", ".shx", ".dbf", ".prj",
".sbx", ".sbn", ".cpg", ".xml", ".fbn",
".fbx", ".ain", ".aih", ".atx", ".ixs",
".mxs"
]
@classmethod
def create(cls, resource):
"""this custom method MUST be used to create an instance of this class"""
feature_metadata = GeoFeatureFileMetaData.objects.create(keywords=[])
# Note we are not creating the logical file record in DB at this point
# the caller must save this to DB
return cls(metadata=feature_metadata, resource=resource)
@staticmethod
def get_aggregation_display_name():
return 'Geographic Feature Content: The multiple files that are part of a geographic ' \
'shapefile'
@staticmethod
def get_aggregation_type_name():
return "GeographicFeatureAggregation"
# used in discovery faceting to aggregate native and composite content types
@staticmethod
def get_discovery_content_type():
"""Return a human-readable content type for discovery.
This must agree between Composite Types and native types.
"""
return "Geographic Feature (ESRI Shapefiles)"
@property
def supports_resource_file_move(self):
"""resource files that are part of this logical file can't be moved"""
return False
@property
def supports_resource_file_add(self):
"""doesn't allow a resource file to be added"""
return False
@property
def supports_resource_file_rename(self):
"""resource files that are part of this logical file can't be renamed"""
return False
@property
def supports_delete_folder_on_zip(self):
"""does not allow the original folder to be deleted upon zipping of that folder"""
return False
@classmethod
def check_files_for_aggregation_type(cls, files):
"""Checks if the specified files can be used to set this aggregation type
:param files: a list of ResourceFile objects
:return If the files meet the requirements of this aggregation type, then returns this
aggregation class name, otherwise empty string.
"""
if _check_if_shape_files(files, temp_files=False):
return cls.__name__
else:
return ""
@classmethod
def set_file_type(cls, resource, user, file_id=None, folder_path=None):
""" Creates a GeoFeatureLogicalFile (aggregation) from a .shp or a .zip resource file """
log = logging.getLogger()
with FileTypeContext(aggr_cls=cls, user=user, resource=resource, file_id=file_id,
folder_path=folder_path,
post_aggr_signal=post_add_geofeature_aggregation,
is_temp_file=True) as ft_ctx:
res_file = ft_ctx.res_file
try:
meta_dict, shape_files, shp_res_files = extract_metadata_and_files(resource,
res_file)
except ValidationError as ex:
log.exception(ex.message)
raise ex
file_name = res_file.file_name
# file name without the extension
base_file_name = file_name[:-len(res_file.extension)]
xml_file = ''
for f in shape_files:
if f.lower().endswith('.shp.xml'):
xml_file = f
break
file_folder = res_file.file_folder
upload_folder = file_folder
file_type_success = False
res_files_to_delete = []
msg = "GeoFeature aggregation. Error when creating aggregation. Error:{}"
with transaction.atomic():
try:
if res_file.extension.lower() == ".zip":
files_to_upload = shape_files
res_files_for_aggr = []
res_files_to_delete.append(res_file)
else:
files_to_upload = []
res_files_for_aggr = shp_res_files
# create a GeoFeature logical file object
logical_file = cls.create_aggregation(dataset_name=base_file_name,
resource=resource,
res_files=res_files_for_aggr,
new_files_to_upload=files_to_upload,
folder_path=upload_folder)
log.info("GeoFeature aggregation - files were added to the aggregation.")
add_metadata(resource, meta_dict, xml_file, logical_file)
log.info("GeoFeature aggregation and resource level metadata updated.")
file_type_success = True
ft_ctx.logical_file = logical_file
ft_ctx.res_files_to_delete = res_files_to_delete
except Exception as ex:
msg = msg.format(ex.message)
log.exception(msg)
if not file_type_success:
raise ValidationError(msg)
@classmethod
def _validate_set_file_type_inputs(cls, resource, file_id=None, folder_path=None):
res_file, folder_path = super(GeoFeatureLogicalFile, cls)._validate_set_file_type_inputs(
resource, file_id, folder_path)
if folder_path is None and res_file.extension.lower() not in ('.zip', '.shp'):
# when a file is specified by the user for creating this file type it must be a
# zip or shp file
raise ValidationError("Not a valid geographic feature file.")
return res_file, folder_path
@classmethod
def get_primary_resouce_file(cls, resource_files):
"""Gets a resource file that has extension .shp from the list of files *resource_files* """
res_files = [f for f in resource_files if f.extension.lower() == '.shp']
return res_files[0] if res_files else None
def create_aggregation_xml_documents(self, create_map_xml=True):
super(GeoFeatureLogicalFile, self).create_aggregation_xml_documents(create_map_xml)
self.metadata.is_dirty = False
self.metadata.save()
def extract_metadata_and_files(resource, res_file, file_type=True):
"""
validates shape files and extracts metadata
:param resource: an instance of BaseResource
:param res_file: an instance of ResourceFile
:param file_type: A flag to control if extraction being done for file type or resource type
:return: a dict of extracted metadata, a list file paths of shape related files on the
temp directory, a list of resource files retrieved from iRODS for this processing
"""
shape_files, shp_res_files = get_all_related_shp_files(resource, res_file, file_type=file_type)
temp_dir = os.path.dirname(shape_files[0])
if not _check_if_shape_files(shape_files):
if res_file.extension.lower() == '.shp':
err_msg = "There was a problem parsing the component files associated with " \
"{folder_path} as a geographic shapefile. This may be because a component " \
"file is corrupt or missing. The .shp, .shx, and .dbf shapefile component " \
"files are required. Other shapefile component files " \
"(.cpg, .prj, .sbn, .sbx, .xml, .fbn, .fbx, .ain, .aih, .atx, .ixs, .mxs) " \
"should also be added where available."
err_msg = err_msg.format(folder_path=res_file.short_path)
else:
err_msg = "One or more dependent shape files are missing in the selected zip file " \
"or one or more files are not of shape file type."
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
raise ValidationError(err_msg)
shp_file = ''
for f in shape_files:
if f.lower().endswith('.shp'):
shp_file = f
break
try:
meta_dict = extract_metadata(shp_file_full_path=shp_file)
return meta_dict, shape_files, shp_res_files
except Exception as ex:
# remove temp dir
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
if file_type:
msg = "GeoFeature file type. Error when setting file type. Error:{}"
else:
msg = "Failed to parse the .shp file. Error{}"
msg = msg.format(ex.message)
raise ValidationError(msg)
def add_metadata(resource, metadata_dict, xml_file, logical_file=None):
"""
creates/updates metadata at resource and file level
:param resource: an instance of BaseResource
:param metadata_dict: dict containing extracted metadata
:param xml_file: file path (on temp directory) of the xml file that is part of the
geo feature files
:param logical_file: an instance of GeoFeatureLogicalFile if metadata needs to be part of the
logical file
:return:
"""
# populate resource and logical file level metadata
target_obj = logical_file if logical_file is not None else resource
if "coverage" in metadata_dict.keys():
coverage_dict = metadata_dict["coverage"]['Coverage']
target_obj.metadata.coverages.all().filter(type='box').delete()
target_obj.metadata.create_element('coverage',
type=coverage_dict['type'],
value=coverage_dict['value'])
originalcoverage_dict = metadata_dict["originalcoverage"]['originalcoverage']
if target_obj.metadata.originalcoverage is not None:
target_obj.metadata.originalcoverage.delete()
target_obj.metadata.create_element('originalcoverage', **originalcoverage_dict)
field_info_array = metadata_dict["field_info_array"]
target_obj.metadata.fieldinformations.all().delete()
for field_info in field_info_array:
field_info_dict = field_info["fieldinformation"]
target_obj.metadata.create_element('fieldinformation', **field_info_dict)
geometryinformation_dict = metadata_dict["geometryinformation"]
if target_obj.metadata.geometryinformation is not None:
target_obj.metadata.geometryinformation.delete()
target_obj.metadata.create_element('geometryinformation', **geometryinformation_dict)
if xml_file:
shp_xml_metadata_list = parse_shp_xml(xml_file)
for shp_xml_metadata in shp_xml_metadata_list:
if 'description' in shp_xml_metadata:
# overwrite existing description metadata - at the resource level
if not resource.metadata.description:
abstract = shp_xml_metadata['description']['abstract']
resource.metadata.create_element('description',
abstract=abstract)
elif 'title' in shp_xml_metadata:
title = shp_xml_metadata['title']['value']
title_element = resource.metadata.title
if title_element.value.lower() == 'untitled resource':
resource.metadata.update_element('title', title_element.id, value=title)
if logical_file is not None:
logical_file.dataset_name = title
logical_file.save()
elif 'subject' in shp_xml_metadata:
# append new keywords to existing keywords - at the resource level
existing_keywords = [subject.value.lower() for
subject in resource.metadata.subjects.all()]
keyword = shp_xml_metadata['subject']['value']
if keyword.lower() not in existing_keywords:
resource.metadata.create_element('subject', value=keyword)
# add keywords at the logical file level
if logical_file is not None:
if keyword not in logical_file.metadata.keywords:
logical_file.metadata.keywords += [keyword]
logical_file.metadata.save()
def get_all_related_shp_files(resource, selected_resource_file, file_type):
"""
This helper function copies all the related shape files to a temp directory
and return a list of those temp file paths as well as a list of existing related
resource file objects
:param resource: an instance of BaseResource to which the *selecetd_resource_file* belongs
:param selected_resource_file: an instance of ResourceFile selected by the user to set
GeoFeaureFile type (the file must be a .shp or a .zip file)
:param file_type: a flag (True/False) to control resource VS file type actions
:return: a list of temp file paths for all related shape files, and a list of corresponding
resource file objects
"""
def collect_shape_resource_files(res_file):
# compare without the file extension (-4)
if res_file.short_path.lower().endswith('.shp.xml'):
if selected_resource_file.short_path[:-4] == res_file.short_path[:-8]:
shape_res_files.append(f)
elif selected_resource_file.short_path[:-4] == res_file.short_path[:-4]:
shape_res_files.append(res_file)
shape_temp_files = []
shape_res_files = []
temp_dir = ''
if selected_resource_file.extension.lower() == '.shp':
for f in resource.files.all():
if f.file_folder == selected_resource_file.file_folder:
if f.extension.lower() == '.xml' and not f.file_name.lower().endswith('.shp.xml'):
continue
if f.extension.lower() in GeoFeatureLogicalFile.get_allowed_storage_file_types():
collect_shape_resource_files(f)
for f in shape_res_files:
temp_file = utils.get_file_from_irods(f)
if not temp_dir:
temp_dir = os.path.dirname(temp_file)
else:
file_temp_dir = os.path.dirname(temp_file)
dst_dir = os.path.join(temp_dir, os.path.basename(temp_file))
shutil.copy(temp_file, dst_dir)
shutil.rmtree(file_temp_dir)
temp_file = dst_dir
shape_temp_files.append(temp_file)
elif selected_resource_file.extension.lower() == '.zip':
temp_file = utils.get_file_from_irods(selected_resource_file)
temp_dir = os.path.dirname(temp_file)
if not zipfile.is_zipfile(temp_file):
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
raise ValidationError('Selected file is not a zip file')
zf = zipfile.ZipFile(temp_file, 'r')
zf.extractall(temp_dir)
zf.close()
for dirpath, _, filenames in os.walk(temp_dir):
for name in filenames:
if name == selected_resource_file.file_name:
# skip the user selected zip file
continue
file_path = os.path.abspath(os.path.join(dirpath, name))
shape_temp_files.append(file_path)
shape_res_files.append(selected_resource_file)
return shape_temp_files, shape_res_files
def _check_if_shape_files(files, temp_files=True):
"""
checks if the list of file temp paths in *files* are part of shape files
must have all these file extensions: (shp, shx, dbf)
:param files: list of files located in temp directory in django if temp_file is True, otherwise
list of resource files are from django db
:param temp_files: a flag to treat list of files *files* as temp files or not
:return: True/False
"""
# Note: this is the original function (check_fn_for_shp) in geo feature resource receivers.py
# used by is_shapefiles
# at least needs to have 3 mandatory files: shp, shx, dbf
if len(files) >= 3:
# check that there are no files with same extension
if temp_files:
# files are on temp directory
file_extensions = set([os.path.splitext(os.path.basename(f).lower())[1] for f in files])
else:
# files are in db
file_extensions = set([f.extension.lower() for f in files])
if len(file_extensions) != len(files):
return False
# check if there is the xml file
xml_file = ''
for f in files:
if temp_files:
# files are on temp directory
if f.lower().endswith('.shp.xml'):
xml_file = f
else:
# files are in db
if f.file_name.lower().endswith('.shp.xml'):
xml_file = f
if temp_files:
# files are on temp directory
file_names = set([os.path.splitext(os.path.basename(f))[0] for f in files if
not f.lower().endswith('.shp.xml')])
else:
# files are in db
file_names = set([os.path.splitext(os.path.basename(f.file_name))[0] for f in files if
not f.file_name.lower().endswith('.shp.xml')])
if len(file_names) > 1:
# file names are not the same
return False
# check if xml file name matches with other file names
if xml_file:
# -8 for '.shp.xml'
if temp_files:
# files are on temp directory
xml_file_name = os.path.basename(xml_file)
else:
# files are in db
xml_file_name = xml_file.file_name
if xml_file_name[:-8] not in file_names:
return False
for ext in file_extensions:
if ext not in GeoFeatureLogicalFile.get_allowed_storage_file_types():
return False
for ext in ('.shp', '.shx', '.dbf'):
if ext not in file_extensions:
return False
else:
return False
# test if we can open the shp file
if temp_files:
# files are on temp directory
shp_file = [f for f in files if f.lower().endswith('.shp')][0]
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(shp_file)
if dataset is None:
return False
dataset = None
return True
def extract_metadata(shp_file_full_path):
"""
Collects metadata from a .shp file specified by *shp_file_full_path*
:param shp_file_full_path:
:return: returns a dict of collected metadata
"""
try:
metadata_dict = {}
# wgs84 extent
parsed_md_dict = parse_shp(shp_file_full_path)
if parsed_md_dict["wgs84_extent_dict"]["westlimit"] != UNKNOWN_STR:
wgs84_dict = parsed_md_dict["wgs84_extent_dict"]
# if extent is a point, create point type coverage
if wgs84_dict["westlimit"] == wgs84_dict["eastlimit"] \
and wgs84_dict["northlimit"] == wgs84_dict["southlimit"]:
coverage_dict = {"Coverage": {"type": "point",
"value": {
"east": wgs84_dict["eastlimit"],
"north": wgs84_dict["northlimit"],
"units": wgs84_dict["units"],
"projection": wgs84_dict["projection"]
}}}
else: # otherwise, create box type coverage
coverage_dict = {"Coverage": {"type": "box",
"value": parsed_md_dict["wgs84_extent_dict"]}}
metadata_dict["coverage"] = coverage_dict
# original extent
original_coverage_dict = {}
original_coverage_dict["originalcoverage"] = {"northlimit":
parsed_md_dict
["origin_extent_dict"]["northlimit"],
"southlimit":
parsed_md_dict
["origin_extent_dict"]["southlimit"],
"westlimit":
parsed_md_dict
["origin_extent_dict"]["westlimit"],
"eastlimit":
parsed_md_dict
["origin_extent_dict"]["eastlimit"],
"projection_string":
parsed_md_dict
["origin_projection_string"],
"projection_name":
parsed_md_dict["origin_projection_name"],
"datum": parsed_md_dict["origin_datum"],
"unit": parsed_md_dict["origin_unit"]
}
metadata_dict["originalcoverage"] = original_coverage_dict
# field
field_info_array = []
field_name_list = parsed_md_dict["field_meta_dict"]['field_list']
for field_name in field_name_list:
field_info_dict_item = {}
field_info_dict_item['fieldinformation'] = \
parsed_md_dict["field_meta_dict"]["field_attr_dict"][field_name]
field_info_array.append(field_info_dict_item)
metadata_dict['field_info_array'] = field_info_array
# geometry
geometryinformation = {"featureCount": parsed_md_dict["feature_count"],
"geometryType": parsed_md_dict["geometry_type"]}
metadata_dict["geometryinformation"] = geometryinformation
return metadata_dict
except:
raise ValidationError("Parsing of shapefiles failed!")
def parse_shp(shp_file_path):
"""
:param shp_file_path: full file path fo the .shp file
output dictionary format
shp_metadata_dict["origin_projection_string"]: original projection string
shp_metadata_dict["origin_projection_name"]: origin_projection_name
shp_metadata_dict["origin_datum"]: origin_datum
shp_metadata_dict["origin_unit"]: origin_unit
shp_metadata_dict["field_meta_dict"]["field_list"]: list [fieldname1, fieldname2...]
shp_metadata_dict["field_meta_dict"]["field_attr_dic"]:
dict {"fieldname": dict {
"fieldName":fieldName,
"fieldTypeCode":fieldTypeCode,
"fieldType":fieldType,
"fieldWidth:fieldWidth,
"fieldPrecision:fieldPrecision"
}
}
shp_metadata_dict["feature_count"]: feature count
shp_metadata_dict["geometry_type"]: geometry_type
shp_metadata_dict["origin_extent_dict"]:
dict{"west": east, "north":north, "east":east, "south":south}
shp_metadata_dict["wgs84_extent_dict"]:
dict{"west": east, "north":north, "east":east, "south":south}
"""
shp_metadata_dict = {}
# read shapefile
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(shp_file_path)
# get layer
layer = dataset.GetLayer()
# get spatialRef from layer
spatialRef_from_layer = layer.GetSpatialRef()
if spatialRef_from_layer is not None:
shp_metadata_dict["origin_projection_string"] = str(spatialRef_from_layer)
prj_name = spatialRef_from_layer.GetAttrValue('projcs')
if prj_name is None:
prj_name = spatialRef_from_layer.GetAttrValue('geogcs')
shp_metadata_dict["origin_projection_name"] = prj_name
shp_metadata_dict["origin_datum"] = spatialRef_from_layer.GetAttrValue('datum')
shp_metadata_dict["origin_unit"] = spatialRef_from_layer.GetAttrValue('unit')
else:
shp_metadata_dict["origin_projection_string"] = UNKNOWN_STR
shp_metadata_dict["origin_projection_name"] = UNKNOWN_STR
shp_metadata_dict["origin_datum"] = UNKNOWN_STR
shp_metadata_dict["origin_unit"] = UNKNOWN_STR
field_list = []
filed_attr_dic = {}
field_meta_dict = {"field_list": field_list, "field_attr_dict": filed_attr_dic}
shp_metadata_dict["field_meta_dict"] = field_meta_dict
# get Attributes
layerDefinition = layer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
fieldName = layerDefinition.GetFieldDefn(i).GetName()
field_list.append(fieldName)
attr_dict = {}
field_meta_dict["field_attr_dict"][fieldName] = attr_dict
attr_dict["fieldName"] = fieldName
fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()
attr_dict["fieldTypeCode"] = fieldTypeCode
fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)
attr_dict["fieldType"] = fieldType
fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()
attr_dict["fieldWidth"] = fieldWidth
fieldPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()
attr_dict["fieldPrecision"] = fieldPrecision
# get layer extent
layer_extent = layer.GetExtent()
# get feature count
featureCount = layer.GetFeatureCount()
shp_metadata_dict["feature_count"] = featureCount
# get a feature from layer
feature = layer.GetNextFeature()
# get geometry from feature
geom = feature.GetGeometryRef()
# get geometry name
shp_metadata_dict["geometry_type"] = geom.GetGeometryName()
# reproject layer extent
# source SpatialReference
source = spatialRef_from_layer
# target SpatialReference
target = osr.SpatialReference()
target.ImportFromEPSG(4326)
# create two key points from layer extent
left_upper_point = ogr.Geometry(ogr.wkbPoint)
left_upper_point.AddPoint(layer_extent[0], layer_extent[3]) # left-upper
right_lower_point = ogr.Geometry(ogr.wkbPoint)
right_lower_point.AddPoint(layer_extent[1], layer_extent[2]) # right-lower
# source map always has extent, even projection is unknown
shp_metadata_dict["origin_extent_dict"] = {}
shp_metadata_dict["origin_extent_dict"]["westlimit"] = layer_extent[0]
shp_metadata_dict["origin_extent_dict"]["northlimit"] = layer_extent[3]
shp_metadata_dict["origin_extent_dict"]["eastlimit"] = layer_extent[1]
shp_metadata_dict["origin_extent_dict"]["southlimit"] = layer_extent[2]
# reproject to WGS84
shp_metadata_dict["wgs84_extent_dict"] = {}
if source is not None:
# define CoordinateTransformation obj
transform = osr.CoordinateTransformation(source, target)
# project two key points
left_upper_point.Transform(transform)
right_lower_point.Transform(transform)
shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = left_upper_point.GetX()
shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = left_upper_point.GetY()
shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = right_lower_point.GetX()
shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = right_lower_point.GetY()
shp_metadata_dict["wgs84_extent_dict"]["projection"] = "WGS 84 EPSG:4326"
shp_metadata_dict["wgs84_extent_dict"]["units"] = "Decimal degrees"
else:
shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["projection"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["units"] = UNKNOWN_STR
return shp_metadata_dict
def parse_shp_xml(shp_xml_full_path):
"""
Parse ArcGIS 10.X ESRI Shapefile Metadata XML. file to extract metadata for the following
elements:
title
abstract
keywords
:param shp_xml_full_path: Expected fullpath to the .shp.xml file
:return: a list of metadata dict
"""
metadata = []
try:
if os.path.isfile(shp_xml_full_path):
with open(shp_xml_full_path) as fd:
xml_dict = xmltodict.parse(fd.read())
dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
if 'idCitation' in dataIdInfo_dict:
if 'resTitle' in dataIdInfo_dict['idCitation']:
if '#text' in dataIdInfo_dict['idCitation']['resTitle']:
title_value = dataIdInfo_dict['idCitation']['resTitle']['#text']
else:
title_value = dataIdInfo_dict['idCitation']['resTitle']
title_max_length = Title._meta.get_field('value').max_length
if len(title_value) > title_max_length:
title_value = title_value[:title_max_length-1]
title = {'title': {'value': title_value}}
metadata.append(title)
if 'idAbs' in dataIdInfo_dict:
description_value = strip_tags(dataIdInfo_dict['idAbs'])
description = {'description': {'abstract': description_value}}
metadata.append(description)
if 'searchKeys' in dataIdInfo_dict:
searchKeys_dict = dataIdInfo_dict['searchKeys']
if 'keyword' in searchKeys_dict:
keyword_list = []
if type(searchKeys_dict["keyword"]) is list:
keyword_list += searchKeys_dict["keyword"]
else:
keyword_list.append(searchKeys_dict["keyword"])
for k in keyword_list:
metadata.append({'subject': {'value': k}})
except Exception:
# Catch any exception silently and return an empty list
# Due to the variant format of ESRI Shapefile Metadata XML
# among different ArcGIS versions, an empty list will be returned
# if any exception occurs
metadata = []
finally:
return metadata
| 1.890625 | 2 |
tests/test_node.py | mjholtkamp/py-iptree | 0 | 16213 | import unittest
from iptree import IPNode
class TestIPNode(unittest.TestCase):
def test_node_ipv4(self):
node = IPNode('0.0.0.0/0')
node.add(IPNode('127.0.0.1/32'))
assert '127.0.0.1/32' in node
assert '192.0.2.1/32' not in node
def test_node_ipv6(self):
node = IPNode('::/0')
node.add(IPNode('::1/128'))
assert '::1/128' in node
assert '2001:db8::1/128' not in node
def test_node_aggregate(self):
root = IPNode('::/0')
child = IPNode('2001:db8::/32')
child.add(IPNode('2001:db8:cafe::1'))
child.add(IPNode('2001:db8:cafe::2'))
root.add(child)
leafs = list(root.aggregate())
assert root.children == {}
assert child.parent is None
assert child.children == {}
assert len(leafs) == 2
def test_node_iter_does_not_empty(self):
root = IPNode('::/0')
root.add(IPNode('2001:db8::1'))
assert [x.network for x in root] == ['2001:db8::1']
# repeat to show that __iter__ does not empty children
assert [x.network for x in root] == ['2001:db8::1']
def test_user_data(self):
data = {
'user': 'data',
}
root = IPNode('::/0', data=data)
assert root.data['user'] == 'data'
| 3.046875 | 3 |
Codewars/8kyu/invert-values/Python/test.py | RevansChen/online-judge | 7 | 16214 | <reponame>RevansChen/online-judge<gh_stars>1-10
# Python - 3.4.3
Test.it('Basic Tests')
Test.assert_equals(invert([1, 2, 3, 4, 5]), [-1, -2, -3, -4, -5])
Test.assert_equals(invert([1, -2, 3, -4, 5]), [-1, 2, -3, 4, -5])
Test.assert_equals(invert([]), [])
| 3.0625 | 3 |
locustfile_create_order.py | Ashutosh-Kaushik/ss-load-test-locust | 1 | 16215 | <filename>locustfile_create_order.py<gh_stars>1-10
import csv
import random
import warnings
import os
from locust import HttpUser, task, between
body = {
"campaignid":"5kXk20gGDISJdM5el5IT",
"walletamount":"0"
}
header = {
"Host": "fkhapi.sastasundar.com",
"Apptype": "N",
"Appversion": "4.0.4",
"Appversioncode": "109",
"Deviceid": "81653dce-0dd2-4201-8916-4aecbdd89269",
"Devicedensity": "320",
"Devicedensitytype": "xhdpi",
"Deviceheight": "1184",
"Devicewidth": "768",
"Devicename": "Unknown Google Nexus 4",
"Deviceosinfo": "5.1",
"Networkinfo": "Wifi",
"Accesstoken": "<PASSWORD>",
"Refdeviceid": "4dd29c0f2f8d1842",
"Userid": "4937724",
"Pincode": "700120",
"Is_panindia": "0",
"Warehouse_id": "1",
"Content-Type": "application/json",
"Content-Length": "56",
"Accept-Encoding": "gzip, deflate",
"User-Agent": "okhttp/5.0.0-alpha.2"
}
class SastaSundarCheckout(HttpUser):
host = os.getenv('TARGET_URL', 'https://fkhapi.sastasundar.com')
def on_start(self):
warnings.filterwarnings("ignore")
self.client.verify = False
@task
def sasta_sundar_search_query(self):
response = self.client.post("/orderinfo/createorder", headers=header, json=body) | 2.390625 | 2 |
compy/plot/grid.py | tilleyd/compy | 0 | 16216 | <gh_stars>0
"""Contains the grid class to create multiple figures."""
from typing import Optional, Tuple
from .figure import Figure
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
class Grid:
def __init__(
self, rows: int, cols: int, size: Optional[Tuple[float, float]] = None
):
"""Creates a grid containing multiple subfigures.
Args:
rows: Number of figure rows.
cols: Number of figure columns.
size: Optional size in inches, (width, height).
"""
self.rows = rows
self.cols = cols
self.grid = gridspec.GridSpec(rows, cols)
self.figure = plt.figure(figsize=size)
self.figures = []
for r in range(rows):
row = []
for c in range(cols):
ax = plt.subplot(self.grid[r, c])
fig = Figure(ax=ax)
row.append(fig)
self.figures.append(row)
def get_figure(self, row: int, col: int) -> Figure:
"""Return the figure at a specified row and column."""
return self.figures[row][col]
def show(self):
"""Show the figure when in interactive mode."""
self.figure.show()
def save(self, path):
"""Save the figure to a image or pdf file path."""
self.figure.savefig(path, bbox_inches="tight")
| 3.484375 | 3 |
parallel_accel/shared/parallel_accel/shared/schemas/external.py | google/parallel_accel | 1 | 16217 | # Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module provides types definitions."""
import dataclasses
import enum
import json
import time
from typing import Any, Dict, List, Optional, Union
import uuid
import linear_algebra
import marshmallow
import marshmallow_dataclass
import marshmallow_enum
#####################################
# Utility functions #
#####################################
def decode(
schema: marshmallow.Schema, data: str, **kwargs
) -> dataclasses.dataclass:
"""Decodes input string using provided schema.
Args:
schema: Schema to be used for deserialization.
data: JSON-encoded data to be deserialized.
**kwargs: Extra keyworded arguments to be passed to
`marshmallow.Schemas.loads` method.
Returns:
Deserialized `dataclasses.dataclass` object.
"""
return schema.loads(data, **kwargs)
def encode(
schema: marshmallow.Schema, data: dataclasses.dataclass, **kwargs
) -> str:
"""Encodes input data using provided schema.
Args:
schema: Schema to be used for serialization.
data: Dataclass object to be serialized.
**kwargs: Extra keyworded arguments to be passed to
`marshmallow.Schemas.dumps` method.
Returns:
JSON-encoded serialized data.
"""
return schema.dumps(data, separators=(",", ":"), **kwargs)
#####################################
# Types aliases #
#####################################
OperatorsType = List[linear_algebra.ops.ProbBasisAxisSum]
#####################################
# marshmallow helpers #
#####################################
_SerializedLinearAlgebraObject = Dict[str, Any]
_SerializedProbBasisAxisSums = List[List[Dict[str, Any]]]
# `linear_algebra` offers only functions to dump and load objects from the JSON encoded
# string, and does not support builtin dict objects. When we call json.dumps()
# over already JSON encoded string, all quotation marks and brackets are
# prefixed with the backslash. Instead, we can convert JSON object to the dict
# type and reduce serialized object size.
def _deserialize_linear_algebra_object(data: _SerializedLinearAlgebraObject) -> Any:
"""Deserializes linear_algebra object from dict type.
Since `linear_algebra` does not provide function to load objects from builtin dict
objects, we need some workaround here: first we dump the dict object into
JSON encoded string, then parse them into `linear_algebra` object.
Args:
data: Dict encoded linear_algebra object.
Returns:
Deserialized linear_algebra object.
"""
return linear_algebra.read_json(json_text=json.dumps(data))
def _serialize_linear_algebra_object(obj: Any) -> _SerializedLinearAlgebraObject:
"""Serializes linear_algebra object to dict type.
Since `linear_algebra` does not provide function to dump objects into builtin dict
objects, we need some workaround here: first we dump the `linear_algebra` object into
JSON encoded string, then parsing them into dict object.
Args:
data: linear_algebra object to be encoded.
Returns:
Serialized linear_algebra object.
"""
return json.loads(linear_algebra.to_json(obj))
class _LinearAlgebraField(marshmallow.fields.Field):
"""`marshmallow.fields.Field` that serializes and deserializes `linear_algebra` type
object."""
def _serialize(
self, value: Any, *_args, **_kwargs
) -> _SerializedLinearAlgebraObject:
"""See base class documentation."""
return _serialize_linear_algebra_object(value)
def _deserialize(
self, value: _SerializedLinearAlgebraObject, *_args, **_kwargs
) -> Any:
"""See base class documentation."""
try:
return _deserialize_linear_algebra_object(value)
except json.JSONDecodeError as ex:
raise marshmallow.ValidationError("Not a JSON object") from ex
class _OperatorsField(marshmallow.fields.Field):
"""`marshmallow.fields.Field` that serializes and deserializes
`linear_algebra.ProbBasisAxisSum` operators."""
def _serialize(
self, value: OperatorsType, _attr, _obj, **kwargs
) -> _SerializedProbBasisAxisSums:
"""See base class documentation."""
if not isinstance(value, list):
value = [value]
return [[_serialize_linear_algebra_object(term) for term in op] for op in value]
def _deserialize(
self, value: _SerializedProbBasisAxisSums, _attr, _obj, **kwargs
) -> OperatorsType:
"""See base class documentation."""
try:
return [
sum([_deserialize_linear_algebra_object(term) for term in op])
for op in value
]
except json.JSONDecodeError as ex:
raise marshmallow.ValidationError("Not a JSON object") from ex
Graph = marshmallow_dataclass.NewType(
"Graph", linear_algebra.Graph, field=_LinearAlgebraField
)
Operators = marshmallow_dataclass.NewType(
"Operators", OperatorsType, field=_OperatorsField
)
ParamResolver = marshmallow_dataclass.NewType(
"ParamResolver", linear_algebra.ParamResolver, field=_LinearAlgebraField
)
Result = marshmallow_dataclass.NewType("Result", linear_algebra.Result, field=_LinearAlgebraField)
Sweepable = marshmallow_dataclass.NewType(
"Sweepable", linear_algebra.study.Sweepable, field=_LinearAlgebraField
)
#####################################
# Server side events #
#####################################
@dataclasses.dataclass
class ServerSideEvent:
"""Base class for server side event.
Both `event` and `timestamp` fields are auto-populated if using default
values:
- `event` is set to the class name
- `timestamp` is set to the current time
Attributes:
id: Event unique id.
data: Event payload.
event: Event name.
timestamp: Event timestamp (in UNIX seconds).
"""
id: uuid.UUID # pylint: disable=invalid-name
data: Any
event: str = dataclasses.field(default="")
timestamp: int = dataclasses.field(default=0)
def __post_init__(self) -> None:
if self.event == "":
self.event = self.__class__.__name__
if self.timestamp == 0:
self.timestamp = int(time.time())
@dataclasses.dataclass
class StreamTimeoutEvent(ServerSideEvent):
"""Server side event that indicates the stream connection reached the
maximum timeout (10 minutes)."""
data: Optional[Any] = dataclasses.field(default=None)
#####################################
# API relevant types #
#####################################
@dataclasses.dataclass
class APIError:
"""API error response.
Attributes:
code: HTTP error code.
message: Error details.
"""
code: int
message: str
#####################################
# Jobs relevant types #
#####################################
@dataclasses.dataclass
class BatchJobContext:
"""Simulation batch job context.
Attributes:
acyclic_graphs (List[linear_algebra.Graph]): List of acyclic_graphs to be run as a batch.
params (List[linear_algebra.study.Sweepable]): List of parameters to be used
with acyclic_graphs, same size as list of acyclic_graphs.
"""
acyclic_graphs: List[Graph]
params: List[Sweepable]
def __post_init__(self) -> None:
if len(self.acyclic_graphs) != len(self.params):
raise ValueError(
"Number of sweeps parameters has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class JobContext:
"""Simulation job context.
Attributes:
acyclic_graph (linear_algebra.Graph): Graph to be run.
param_resolver (linear_algebra.ParamResolver): ParamResolver to be used with the
acyclic_graph.
"""
acyclic_graph: Graph
param_resolver: ParamResolver
@dataclasses.dataclass
class SweepJobContext:
"""Simulation sweep job context.
Attributes:
acyclic_graph (linear_algebra.Graph): Graph to be run.
params (linear_algebra.study.Sweepable): Parameters to be used with the
acyclic_graph.
"""
acyclic_graph: Graph
params: Sweepable
class JobStatus(enum.IntEnum):
"""Current job status.
Attributes:
NOT_STARTED: The job was added to the queue.
IN_PROGRESS: The job is being processed.
COMPLETE: Simulation has been completed successfully.
ERROR: Simulation has failed.
"""
NOT_STARTED = 0
IN_PROGRESS = 1
COMPLETE = 2
ERROR = 3
@dataclasses.dataclass
class JobProgress:
"""Job computation progress.
Attributes:
current: Number of completed work units.
total: Total number of work units.
"""
completed: int = dataclasses.field(default=0)
total: int = dataclasses.field(default=1)
def __post_init__(self) -> None:
if self.completed < 0:
raise ValueError("Current work unit cannot be less than zero")
if self.total < 1:
raise ValueError("Total number of work units cannot be less than 1")
if self.completed > self.total:
raise ValueError(
"Current work unit cannot be greater than total work units"
)
@dataclasses.dataclass
class JobResult:
"""Simulation job result.
Attributes:
id: Unique job id.
status: Current job status.
error_message: Optional error message explaining why the computation
failed, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.ERROR`.
progress: Optional computation progress, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.IN_PROGRESS`.
result: Optional simulation job result, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.COMPLETE`.
"""
id: uuid.UUID # pylint: disable=invalid-name
status: JobStatus = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobStatus, by_value=True
)
}
)
error_message: Optional[str] = dataclasses.field(default=None)
progress: Optional[JobProgress] = dataclasses.field(default=None)
result: Optional[Any] = dataclasses.field(default=None)
def __post_init__(self) -> None:
if self.status == JobStatus.IN_PROGRESS and self.progress is None:
raise ValueError("Missing job progress")
if self.status == JobStatus.ERROR:
if not self.error_message:
raise ValueError("Missing error messsage")
if self.result:
raise ValueError("Failed job cannot have result field")
if self.status == JobStatus.COMPLETE:
if not self.result:
raise ValueError("Missing job result")
if self.error_message:
raise ValueError(
"Completed job cannot have error_message field"
)
if (
self.progress is not None
and self.progress.total != self.progress.completed
):
raise ValueError("Not all work units are marked as completed")
@dataclasses.dataclass
class JobStatusEvent(ServerSideEvent):
"""Job status changed event.
Attributes:
data: Simulation job result.
"""
data: JobResult
@dataclasses.dataclass
class JobSubmitted:
"""Submitted job.
Attributes:
id: Unique job id.
"""
id: uuid.UUID # pylint: disable=invalid-name
#####################################
# Expectation job relevant types #
#####################################
@dataclasses.dataclass
class ExpectationBatchJobContext(BatchJobContext):
"""Expectation values batch job context.
Attributes:
operators (List[List[linear_algebra.ops.ProbBasisAxisSum]]): List of list of
`linear_algebra.ops.ProbBasisAxisSum` operators, same size as list of acyclic_graphs.
"""
operators: List[Operators]
def __post_init__(self) -> None:
super().__post_init__()
if len(self.operators) != len(self.acyclic_graphs):
raise ValueError(
"Number of operators has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class ExpectationBatchJobResult(JobResult):
"""Expectation values batch job result.
Attributes:
result: List of expectation values list, same size as number of
acyclic_graphs. Each element has the outer size of input sweep parameters
and the inner size of input operators size.
"""
result: Optional[List[List[List[float]]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationJobContext(JobContext):
"""Expectation values job context.
Attributes:
operators (linear_algebra.ops.ProbBasisAxisSum): List of `linear_algebra.ops.ProbBasisAxisSum` operators.
"""
operators: Operators
@dataclasses.dataclass
class ExpectationJobResult(JobResult):
"""Expectation values job result.
Attributes:
result: List of floats, same size as input operators size.
"""
result: Optional[List[float]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationSweepJobContext(SweepJobContext):
"""Expectation values sweep job context.
Attributes:
operators (List[linear_algebra.ops.ProbBasisAxisSum]): List of `linear_algebra.ops.ProbBasisAxisSum`
operators, same size as list of acyclic_graphs.
"""
operators: Operators
@dataclasses.dataclass
class ExpectationSweepJobResult(JobResult):
"""Expectation values sweep job result.
Attributes:
result: List of expectation values list. The outer size is the same as
input sweep size, the inner size is the same size as input operators
size.
"""
result: Optional[List[List[float]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationJobStatusEvent(JobStatusEvent):
"""Expectation job status changed event.
Attributes:
data: Expectation job result.
"""
data: Union[
ExpectationJobResult,
ExpectationBatchJobResult,
ExpectationSweepJobResult,
]
########################################
# Noisy expectation job relevant types #
########################################
@dataclasses.dataclass
class NoisyExpectationJobContext(ExpectationJobContext):
"""Noisy expectation job context.
Attributes:
num_samples: Number of times the operators will run. Can be specified as
a single value or list of same size as input operators.
"""
# We cannot set default field value for Union type
num_samples: Union[int, List[int]]
def __post_init__(self) -> None:
if isinstance(self.num_samples, list) and (
len(self.num_samples) != len(self.operators)
):
raise ValueError(
"Number of num_samples has to match number of operators"
)
@dataclasses.dataclass
class NoisyExpectationJobResult(ExpectationJobResult):
"""Noisy expectation job result."""
@dataclasses.dataclass
class NoisyExpectationJobStatusEvent(JobStatusEvent):
"""Noisy expecation job status changed event.
Attributes:
data: Noisy expecation job result.
"""
data: NoisyExpectationJobResult
#####################################
# Sample job relevant types #
#####################################
@dataclasses.dataclass
class SampleBatchJobContext(BatchJobContext):
"""Sample batch job context.
Attributes:
repetitions: Number of times the acyclic_graphs will run. Can be specified as
a single value or list of same size as input acyclic_graphs.
"""
class RepetitionsValidator(
marshmallow.validate.Validator
): # pylint: disable=too-few-public-methods
"""A Helper class for validating repetitions field value."""
def __call__(
self, value: Union[int, List[int]]
) -> Union[int, List[int]]:
if isinstance(value, list) and not all(x > 0 for x in value):
raise marshmallow.ValidationError(
"All elements must be greater than or equal to 1"
)
if isinstance(value, int) and not value > 0:
raise marshmallow.ValidationError(
"Must be greater than or equal to 1"
)
return value
# We cannot set default field value for Union type
repetitions: Union[int, List[int]] = dataclasses.field(
metadata={"validate": RepetitionsValidator()}
)
def __post_init__(self) -> None:
super().__post_init__()
if isinstance(self.repetitions, list) and (
len(self.repetitions) != len(self.acyclic_graphs)
):
raise ValueError(
"Number of repetitions has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class SampleBatchJobResult(JobResult):
"""Sample batch job result.
Attributes:
result (Optional[List[List[linear_algebra.Result]]]): Output from running the
acyclic_graph.
"""
result: Optional[List[List[Result]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleJobContext(JobContext):
"""Sample job context.
Attributes:
repetitions: Number of times the acyclic_graph will run.
"""
repetitions: int = dataclasses.field(
default=1, metadata={"validate": marshmallow.validate.Range(min=1)}
)
@dataclasses.dataclass
class SampleJobResult(JobResult):
"""Sample job result.
Attributes:
result: Output from running the acyclic_graph.
"""
result: Optional[Result] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleSweepJobContext(SweepJobContext):
"""Sample sweep job context.
Attributes:
repetitions: Number of times the acyclic_graph will run.
"""
repetitions: int = dataclasses.field(
default=1, metadata={"validate": marshmallow.validate.Range(min=1)}
)
@dataclasses.dataclass
class SampleSweepJobResult(JobResult):
"""Sample sweep job result.
Attributes:
result: Output from running the acyclic_graph.
"""
result: Optional[List[Result]] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleJobStatusEvent(JobStatusEvent):
"""Sample job status changed event.
Attributes:
data: Sample job result.
"""
data: Union[SampleJobResult, SampleBatchJobResult, SampleSweepJobResult]
#####################################
# Jobs queue relevant types #
#####################################
class JobType(enum.IntEnum):
"""Simulation job type.
Attributes:
SAMPLE: Sampling.
EXPECTATION: Expectation values.
NOISY_EXPECTATION: Noisy expectation values.
"""
SAMPLE = 0
EXPECTATION = 1
NOISY_EXPECTATION = 2
@dataclasses.dataclass
class JobsQueue:
"""Current status of jobs queue.
Attributes:
ids: List of pending jobs ids.
"""
ids: List[uuid.UUID] = dataclasses.field(default_factory=[])
@dataclasses.dataclass
class PendingJob:
"""Queued job details.
Attributes:
id: Unique job id.
status: Current job status.
type: Job type.
"""
id: uuid.UUID # pylint: disable=invalid-name
status: JobStatus = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobStatus,
by_value=True,
)
}
)
type: JobType = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobType, by_value=True
)
}
)
def __post_init__(self) -> None:
if self.status in (JobStatus.COMPLETE, JobStatus.ERROR):
raise ValueError(
f"PendingJob cannot have {self.status.name} status"
)
#####################################
# Tasks relevant types #
#####################################
class TaskState(enum.IntEnum):
"""Current task state.
Attributes:
PENDING: Task is scheduled for execution.
RUNNING: Task is running.
DONE: Task is finished.
"""
PENDING = 0
RUNNING = 1
DONE = 2
@dataclasses.dataclass
class TaskStatus:
"""Current task status.
Attributes:
state: Current task state.
error: Optional error message explaining why the task failed, only set
if the state is :attr:`parallel_accel.client.schemas.TaskState.DONE` and the
`success` flag is False.
success: Optional flag indicating whether task finished successfully,
only set if the task state is
:attr:`parallel_accel.client.schemas.TaskState.DONE`.
"""
state: TaskState = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
TaskState, by_value=True
)
}
)
error: Optional[str] = dataclasses.field(default=None)
success: Optional[bool] = dataclasses.field(default=None)
def __post_init__(self) -> None:
"""See base class documentation."""
if self.state != TaskState.DONE and (
(self.error is not None) or (self.success is not None)
):
field = "error" if self.error is not None else "success"
raise ValueError(f"Unfinished task cannot have {field} field.")
@dataclasses.dataclass
class TaskSubmitted:
"""Submitted task.
Attributes:
id: Unique task id.
"""
id: uuid.UUID # pylint: disable=invalid-name
@dataclasses.dataclass
class TaskStatusEvent(ServerSideEvent):
"""Task status changed event.
Attributes:
data: Task status.
"""
data: TaskStatus
#####################################
# Worker relevant types #
#####################################
class WorkerState(enum.IntEnum):
"""ASIC worker state.
Attributes:
BOOTING: Worker is booting.
ERROR: Worker encountered an error.
IDLE: Worker is idling.
OFFLINE: Worker is offline.
PROCESSING_JOB: Worker is processing a job.
SHUTTING_DOWN: Worker is shutting down.
"""
OFFLINE = 0
BOOTING = 1
SHUTTING_DOWN = 2
IDLE = 3
PROCESSING_JOB = 4
ERROR = 5
@dataclasses.dataclass
class Worker:
"""Current status of the ASIC worker.
Attributes:
state: Current worker state.
error: Optional error message explaining problem with the worker, only
set when the `state` is
:attr:`parallel_accel.client.schemas.WorkerState.ERROR`.
job_id: Currently processed job id, only set when the `state` is
:obj:`parallel_accel.client.schemas.WorkerState.PROCESSING_JOB`.
"""
state: WorkerState = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
WorkerState, by_value=True
)
}
)
error: Optional[str] = dataclasses.field(default=None)
job_id: Optional[uuid.UUID] = dataclasses.field(default=None)
def __post_init__(self) -> None:
"""See base class documentation."""
if (
self.state
not in (
WorkerState.PROCESSING_JOB,
WorkerState.ERROR,
)
and ((self.error is not None) or (self.job_id is not None))
):
raise ValueError(
"Cannot have extra properties for the worker status "
f"{self.state.name}"
)
if self.state == WorkerState.ERROR:
if not self.error:
raise ValueError("Missing error messsage")
if self.job_id:
raise ValueError("Cannot have job_id field for the ERROR state")
if self.state == WorkerState.PROCESSING_JOB:
if not self.job_id:
raise ValueError("Missing job id")
if self.error:
raise ValueError("Cannot have error field for the IDLE state")
#####################################
# marshmallow schemas #
#####################################
class _SSERenderer:
"""A helper class for serializing and deserializing objects to server side
events message format.
The server side event message is UTF-8 text data separated by a pair of
newline characters.
"""
@staticmethod
def dumps(obj: Dict[str, Any], *_args, **_kwargs) -> str:
r"""Encodes input object into text string.
Args:
obj: Object to be serialized.
Returns:
Text string in format:
{key}: {value}\n
...
\n
"""
result = ""
for key in ("event", "id", "timestamp", "data"):
value = obj.get(key, None)
if not value:
continue
if key == "data":
value = json.dumps(value, separators=(",", ":"))
result += f"{key}: {value}\n"
result += "\n"
return result
@staticmethod
def loads( # pylint: disable=invalid-name
s: str, *_args, **_kwargs
) -> Dict[str, Any]:
"""Decodes input text string into dict object.
Args:
s: Text string to be decoded.
Returns:
Dict object.
"""
obj = {}
for line in s.split("\n"):
line = line.strip()
if not line:
continue
key, value = line.split(": ")
if key == "data":
value = json.loads(value)
obj[key] = value
return obj
class _BaseSchema(marshmallow.Schema):
"""Base `marshmallow.schema.Schema` for ParallelAccel related schemas.
This is a helper schema that provides custom `marsobj_fnllow.post_dump` method,
that excludes all None fields from the final serialization result.
"""
@marshmallow.post_dump
def remove_empty_fields( # pylint: disable=no-self-use
self, data: Dict, **_kwargs
) -> Dict[str, Any]:
"""Removes all None fields from the input data.
Args:
data: Input data dictionary object.
Returns:
Filtered dictionary object.
"""
return {k: v for k, v in data.items() if v is not None}
class _SSEBaseSchema(_BaseSchema):
"""Base `marshmallow.schema.Schema` for ParallelAccel service server side events."""
class Meta: # pylint: disable=too-few-public-methods
"""Metadata passed to the `marshmallow.schemas.Schema` constructor."""
render_module = _SSERenderer
(
APIErrorSchema,
ExpectationBatchJobContextSchema,
ExpectationBatchJobResultSchema,
ExpectationJobContextSchema,
ExpectationJobResultSchema,
ExpectationJobStatusEventSchema,
ExpectationSweepJobContextSchema,
ExpectationSweepJobResultSchema,
JobProgressSchema,
JobResultSchema,
JobStatusEventSchema,
JobSubmittedSchema,
JobsQueueSchema,
NoisyExpectationJobContextSchema,
NoisyExpectationJobResultSchema,
NoisyExpectationJobStatusEventSchema,
PendingJobSchema,
SampleBatchJobContextSchema,
SampleBatchJobResultSchema,
SampleJobContextSchema,
SampleJobResultSchema,
SampleJobStatusEventSchema,
SampleSweepJobContextSchema,
SampleSweepJobResultSchema,
ServerSideEventSchema,
StreamTimeoutEventSchema,
TaskStatusEventSchema,
TaskStatusSchema,
TaskSubmittedSchema,
WorkerSchema,
) = tuple(
marshmallow_dataclass.class_schema(x, base_schema=y)()
for x, y in (
(APIError, None),
(ExpectationBatchJobContext, None),
(ExpectationBatchJobResult, _BaseSchema),
(ExpectationJobContext, None),
(ExpectationJobResult, _BaseSchema),
(ExpectationJobStatusEvent, _SSEBaseSchema),
(ExpectationSweepJobContext, None),
(ExpectationSweepJobResult, _BaseSchema),
(JobProgress, None),
(JobResult, _BaseSchema),
(JobStatusEvent, _SSEBaseSchema),
(JobSubmitted, None),
(JobsQueue, None),
(NoisyExpectationJobContext, None),
(NoisyExpectationJobResult, _BaseSchema),
(NoisyExpectationJobStatusEvent, _SSEBaseSchema),
(PendingJob, None),
(SampleBatchJobContext, None),
(SampleBatchJobResult, _BaseSchema),
(SampleJobContext, None),
(SampleJobResult, _BaseSchema),
(SampleJobStatusEvent, _SSEBaseSchema),
(SampleSweepJobContext, None),
(SampleSweepJobResult, _BaseSchema),
(ServerSideEvent, _SSEBaseSchema),
(StreamTimeoutEvent, _SSEBaseSchema),
(TaskStatusEvent, _SSEBaseSchema),
(TaskStatus, _BaseSchema),
(TaskSubmitted, None),
(Worker, _BaseSchema),
)
)
| 2.078125 | 2 |
godot-toolkit/godot_config_file.py | WiggleWizard/godot-toolkit | 0 | 16218 | try:
from configparser import RawConfigParser
except ImportError:
from ConfigParser import RawConfigParser
class GodotConfigFile(RawConfigParser):
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value == ""):
key = " = ".join((key, str("\"\"").replace('\n', '\n\t')))
elif (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n") | 2.640625 | 3 |
DMOJ/CCC/escape room.py | eddiegz/Personal-C | 3 | 16219 | import collections
def cal(num):
i=1
f=factor[num]
while i*i<=num:
if num%i==0 and i<=max(n,m) and num//i<=max(n,m):
f.append(i)
i+=1
return num
def dfs(i,j):
if i==m-1 and j==n-1:
return True
if i>=m and j>=n or grid[i][j] in factor:
return False
num=cal(grid[i][j])
for p in factor[num]:
nj=num//p
if dfs(p-1,nj-1) or dfs(nj-1,p-1):
return True
return False
m=int(input())
n=int(input())
grid=[]
for i in range(m):
grid.append(list(map(int,input().split())))
factor=collections.defaultdict(list)
print('yes' if dfs(0, 0) else 'no')
| 3.328125 | 3 |
volume_loader.py | xeTaiz/deep-volumetric-ambient-occlusion | 9 | 16220 | <gh_stars>1-10
import os
import pydicom
import numpy as np
import dicom_numpy
from utils import hidden_errors
from tf_utils import *
from pathlib import Path
def read_dicom_folder(dicom_folder, rescale=None):
''' Reads all .dcm files in `dicom_folder` and merges them to one volume
Returns:
The volume and the affine transformation from pixel indices to xyz coordinates
'''
dss = [pydicom.dcmread(str(dicom_folder/dcm)) for dcm in os.listdir(dicom_folder) if dcm.endswith('.dcm')]
vol, mat = dicom_numpy.combine_slices(dss, rescale)
return vol, dss[0]
def get_largest_dir(dirs, minsize=100):
''' Returns the dir with the most files from `dirs`'''
m = max(dirs, key=lambda d: len(os.listdir(d)) if os.path.isdir(d) else 0)
if len(os.listdir(m)) >= minsize: return m
else: return None
def get_volume_dirs(path):
path = Path(path)
return list(
filter(lambda p: p is not None,
map( get_largest_dir, # extract subdir with most files in it (highest res volume)
map( lambda p: list(p.iterdir()), # get list of actual volume directorie
map( lambda p: next(p.iterdir())/'Unknown Study', # cd into subfolders CQ500-CT-XX/Unknown Study/
filter(lambda p: p.is_dir(), # Get all dirs, no files
path.iterdir()))))) # Iterate over path directory
)
def get_volume_gen(volume_dirs, rescale=None, tf_pts=None):
''' Make a generator that loads volumes from a list of volume directories, `volume_dirs`.
Returns: (volume:np.ndarray , index_to_pos_4x4:np.ndarray) '''
def vol_gen():
for vol_dir in volume_dirs:
with hidden_errors():
try:
vol, dcm = read_dicom_folder(vol_dir, rescale)
vox_scl = np.array([dicom.PixelSpacing[0], dicom.PixelSpacing[1], dicom.SliceThickness]).astype(np.float32)
vox_scl /= vox_scl.min()
vol_name = str(vol_dir.parent.parent.parent.name)
if tf_pts is None:
peaks = get_histogram_peaks(normalized_vol)
tf_pts = get_trapezoid_tf_points_from_peaks(peaks)
except dicom_numpy.DicomImportException:
print(f'Could not load {vol_dir}')
continue
yield vol, tf_pts, vox_scl, vol_name
return vol_gen()
__all__ = ['read_dicom_folder', 'get_largest_dir', 'get_volume_gen', 'get_volume_dirs']
| 2.515625 | 3 |
sudoku_solver/gui.py | andrewhalle/sudoku_solver | 0 | 16221 | <gh_stars>0
import sys
from PyQt5.QtCore import Qt, QSize, QPoint
from PyQt5.QtWidgets import QApplication, QDialog, QWidget, QLabel, QPushButton, QVBoxLayout, QHBoxLayout
from PyQt5.QtGui import QPainter, QColor, QPen, QFont
from .sudoku import Sudoku
class SudokuWidget(QWidget):
def __init__(self, parent=None):
super(SudokuWidget, self).__init__(parent)
self.sudoku = Sudoku()
self.focus_square = (0, 0)
self.setFixedSize(500, 500)
self.setFocusPolicy(Qt.ClickFocus)
def solve(self):
self.sudoku.solve()
self.update()
def clear(self):
self.sudoku = Sudoku()
self.update()
def enter(self, value):
i = self.focus_square[0]
j = self.focus_square[1]
if value < 0 or value > 9:
raise ValueError("that's not a valid sudoku value")
self.sudoku.data[i][j] = value
def moveFocusSquare(self, new_focus_square):
if not isinstance(new_focus_square, tuple) or len(new_focus_square) != 2:
raise ValueError("new focus square must be 2x2 tuple")
if new_focus_square[0] < 0 or new_focus_square[0] > 8 or new_focus_square[1] < 0 or new_focus_square[1] > 8:
raise ValueError("index out of bounds")
self.focus_square = new_focus_square
def keyPressEvent(self, event):
if event.key() == Qt.Key_Right:
if self.focus_square[0] == 8:
return
self.moveFocusSquare((self.focus_square[0] + 1, self.focus_square[1]))
self.update()
elif event.key() == Qt.Key_Left:
if self.focus_square[0] == 0:
return
self.moveFocusSquare((self.focus_square[0] - 1, self.focus_square[1]))
self.update()
elif event.key() == Qt.Key_Up:
if self.focus_square[1] == 0:
return
self.moveFocusSquare((self.focus_square[0], self.focus_square[1] - 1))
self.update()
elif event.key() == Qt.Key_Down:
if self.focus_square[1] == 8:
return
self.moveFocusSquare((self.focus_square[0], self.focus_square[1] + 1))
self.update()
elif event.text() in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
num = int(event.text())
self.enter(num)
self.update()
elif event.key() == Qt.Key_Backspace:
self.enter(0)
self.update()
def paintEvent(self, event):
row_width = self.width() / 9
white = QColor(255, 255, 255)
black = QColor(0, 0, 0)
blue = QColor(0, 0, 255)
linePen = QPen(black)
thickPen = QPen(black)
thickPen.setWidth(2)
bluePen = QPen(blue)
bluePen.setWidth(2)
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate(0, 0)
painter.setPen(thickPen)
painter.setBrush(QColor(255, 255, 255))
painter.drawConvexPolygon(QPoint(0, 0), QPoint(0, self.height()), QPoint(self.width(), self.height()), QPoint(self.width(), 0))
painter.setPen(linePen)
for i in range(8):
x = (i + 1) * row_width
y = (i + 1) * row_width
if i in [2, 5]:
painter.setPen(thickPen)
painter.drawLine(x, 0, x, self.height())
painter.drawLine(0, y, self.width(), y)
if i in [2, 5]:
painter.setPen(linePen)
painter.setPen(bluePen)
x1 = (row_width * self.focus_square[0])
x2 = (row_width * (self.focus_square[0] + 1))
y1 = (row_width * self.focus_square[1])
y2 = (row_width * (self.focus_square[1] + 1))
painter.drawConvexPolygon(QPoint(x1, y1), QPoint(x1, y2), QPoint(x2, y2), QPoint(x2, y1))
painter.setPen(linePen)
painter.setFont(QFont("Arial", pointSize=20, weight=QFont.Normal))
for i in range(9):
for j in range(9):
if self.sudoku.data[i][j] != 0:
painter.drawText(row_width * i, row_width * j, row_width, row_width, Qt.AlignCenter, str(self.sudoku.data[i][j]))
class SudokuDialog(QDialog):
def __init__(self, parent=None):
super(SudokuDialog, self).__init__(parent)
layout = QHBoxLayout()
self.puzzle = SudokuWidget()
layout.addWidget(self.puzzle)
buttonLayout = QVBoxLayout()
self.solve_button = QPushButton("solve")
self.clear_button = QPushButton("clear")
self.solve_button.clicked.connect(self.puzzle.solve)
self.clear_button.clicked.connect(self.puzzle.clear)
buttonLayout.addWidget(self.solve_button)
buttonLayout.addWidget(self.clear_button)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.setFixedSize(650, 600)
self.puzzle.setFocus()
self.setWindowTitle("Sudoku Solver")
self.show()
def main():
app = QApplication([])
gui = SudokuDialog()
sys.exit(app.exec_())
| 3.15625 | 3 |
swarmlib/util/functions.py | nkoutsov/swarmlib | 0 | 16222 | <filename>swarmlib/util/functions.py
# ------------------------------------------------------------------------------------------------------
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
#pylint: disable=invalid-name
import inspect
from functools import wraps
import landscapes.single_objective
import numpy as np
# Wrapper for landscapes.single_objective functions for inputs > 1d
def wrap_landscapes_func(landscapes_func):
@wraps(landscapes_func)
def wrapper(x):
return np.apply_along_axis(func1d=landscapes_func, axis=0, arr=x)
return wrapper
# Add all functions from landscapes.single_objective
FUNCTIONS = {
name: wrap_landscapes_func(func)
for (name, func) in inspect.getmembers(
landscapes.single_objective, inspect.isfunction
)
if name not in ['colville', 'wolfe'] # Don't include 3D and 4D functions
}
| 2.25 | 2 |
nps/network_entity.py | Dry8r3aD/penta-nps | 6 | 16223 | <reponame>Dry8r3aD/penta-nps<filename>nps/network_entity.py
# -*- coding: UTF-8 -*-
from collections import deque
class NetworkEntity(object):
"""Client or Server simulation network entity"""
def __init__(self, name):
# "client" or "server"
self.name = name
# simulatation packet list(queue)
# _packet_list contains send/recv PacketBuff
self._packet_list = deque()
# for scapy sniff
# ex)tp0, eth0, ...
self._interface_name = ""
self._interface_mac_addr = "00:00:00:00:00:00"
# nat!!
# port random generator for DUT
#self._nat_port = 0
#self._nat_magic_number = 99999
#self._use_nat_port = "False"
def get_name(self):
return self.name
def append_packet_list(self, packet_buff):
self._packet_list.append(packet_buff)
def pop_packet_list(self):
return self._packet_list.popleft()
def get_packet_list(self):
return self._packet_list
def is_empty_packet_list(self):
return (len(self._packet_list) == 0)
def set_interface(self, iface_name, iface_mac):
self._interface_name = iface_name
self._interface_mac_addr = iface_mac
def get_interface_name(self):
return self._interface_name
def get_interface_mac_addr(self):
return self._interface_mac_addr
# def set_use_nat_port(self, use_or_not):
# self._use_nat_port = use_or_not
#
# def get_use_nat_port(self):
# return self._use_nat_port
#
# def set_dut_nat_port(self, port):
# self._nat_port = port
#
# def get_dut_nat_port(self):
# return self._nat_port
#
# def get_nat_magic_number(self):
# return self._nat_magic_number
#
| 2.40625 | 2 |
source/_sample/scipy/interp_spline_interest.py | showa-yojyo/notebook | 14 | 16224 | <filename>source/_sample/scipy/interp_spline_interest.py
#!/usr/bin/env python
"""interp_spline_interest.py: Demonstrate spline interpolation.
"""
from scipy.interpolate import splrep, splev
import numpy as np
import matplotlib.pyplot as plt
# pylint: disable=invalid-name
# Interest rates of Jan, Feb, Mar, Jun, Dec.
x = np.array([1, 2, 3, 6, 12])
y = np.array([0.080, 0.100, 0.112, 0.144, 0.266])
# Interpolate the rates.
tck = splrep(x, y)
# Print the spline curve.
np.set_printoptions(formatter={'float': '{:.3f}'.format})
print("knot vector:\n", tck[0])
print("control points:\n", tck[1])
print("degree:\n", tck[2])
# Evaluate interest rates for each month.
for i in range(1, 13):
print(f"month[{i:02d}]: {float(splev(i, tck)):.3f}%")
# Plot the interest curve.
time = np.linspace(1, 12, 1000, endpoint=True)
rate = splev(time, tck)
plt.figure()
plt.plot(time, rate, color='deeppink')
plt.xlabel("Month")
plt.ylabel("Rate (%)")
plt.show()
| 3.421875 | 3 |
Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_sgd.py | Kuga23/Deep-Learning | 3 | 16225 | <filename>Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_sgd.py
import unittest
import numpy as np
from optimizer import SGD
from modules import ConvNet
from .utils import *
class TestSGD(unittest.TestCase):
""" The class containing all test cases for this assignment"""
def setUp(self):
"""Define the functions to be tested here."""
pass
def test_sgd(self):
model_list = [dict(type='Linear', in_dim=128, out_dim=10)]
criterion = dict(type='SoftmaxCrossEntropy')
model = ConvNet(model_list, criterion)
optimizer = SGD(model)
# forward once
np.random.seed(1024)
x = np.random.randn(32, 128)
np.random.seed(1024)
y = np.random.randint(10, size=32)
tmp = model.forward(x, y)
model.backward()
optimizer.update(model)
# forward twice
np.random.seed(512)
x = np.random.randn(32, 128)
np.random.seed(512)
y = np.random.randint(10, size=32)
tmp = model.forward(x, y)
model.backward()
optimizer.update(model)
expected_weights = np.load('tests/sgd_weights/w.npy')
expected_bias = np.load('tests/sgd_weights/b.npy')
self.assertAlmostEquals(np.sum(np.abs(expected_weights - model.modules[0].weight)), 0, places=6)
self.assertAlmostEquals(np.sum(np.abs(expected_bias - model.modules[0].bias)), 0)
| 3.078125 | 3 |
Scripts/simulation/tunable_utils/create_object.py | velocist/TS4CheatsInfo | 0 | 16226 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\tunable_utils\create_object.py
# Compiled at: 2020-05-07 00:26:47
# Size of source mod 2**32: 4106 bytes
from crafting.crafting_tunable import CraftingTuning
from objects.components.state import TunableStateValueReference, CommodityBasedObjectStateValue
from objects.system import create_object
from sims4.random import weighted_random_item
from sims4.tuning.tunable import TunableReference, TunableTuple, TunableList, TunableRange, AutoFactoryInit, HasTunableSingletonFactory, TunableFactory
import crafting, services, sims4
logger = sims4.log.Logger('CreateObject')
class ObjectCreator(HasTunableSingletonFactory, AutoFactoryInit):
@TunableFactory.factory_option
def get_definition(pack_safe):
return {'definition': TunableReference(description='\n The definition of the object to be created.\n ',
manager=(services.definition_manager()),
pack_safe=pack_safe)}
FACTORY_TUNABLES = {'definition': TunableReference(description='\n The definition of the object to be created.\n ',
manager=(services.definition_manager()))}
def __call__(self, **kwargs):
return create_object((self.definition), **kwargs)
def get_object_definition(self):
return self.definition
def get_footprint(self):
return self.definition.get_footprint()
@property
def id(self):
return self.definition.id
def _verify_tunable_quality_value_callback(instance_class, tunable_name, source, quality, weight):
if quality not in CraftingTuning.QUALITY_STATE.values:
logger.error('A TunableRecipeCreator {} specifies an invalid quality {}.', source, quality)
class RecipeCreator(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'recipe':TunableReference(description='\n Recipe to produce an object with.\n ',
manager=services.get_instance_manager(sims4.resources.Types.RECIPE)),
'weighted_quality':TunableList(description='\n A list of weighted quality in which the object will be created.\n \n If empty, it will apply a default quality.\n ',
tunable=TunableTuple(description='\n A possible level of quality for this item that will be generated.\n This will be randomly chosen based off weight against other items in the list.\n ',
weight=TunableRange(tunable_type=int,
default=1,
minimum=1),
quality=TunableStateValueReference(class_restrictions=CommodityBasedObjectStateValue),
verify_tunable_callback=_verify_tunable_quality_value_callback))}
def __call__(self, crafter_sim=None, post_add=None, **kwargs):
choices = [(quality.weight, quality.quality) for quality in self.weighted_quality]
quality = weighted_random_item(choices) if choices else None
return crafting.crafting_interactions.create_craftable((self.recipe), crafter_sim, quality=quality, post_add=post_add)
def get_object_definition(self):
return self.recipe.final_product.definition | 2.078125 | 2 |
题源分类/LeetCode/LeetCode日刷/python/47.全排列-ii.py | ZhengyangXu/Algorithm-Daily-Practice | 0 | 16227 | <reponame>ZhengyangXu/Algorithm-Daily-Practice
#
# @lc app=leetcode.cn id=47 lang=python3
#
# [47] 全排列 II
#
# https://leetcode-cn.com/problems/permutations-ii/description/
#
# algorithms
# Medium (59.58%)
# Likes: 371
# Dislikes: 0
# Total Accepted: 78.7K
# Total Submissions: 132.1K
# Testcase Example: '[1,1,2]'
#
# 给定一个可包含重复数字的序列,返回所有不重复的全排列。
#
# 示例:
#
# 输入: [1,1,2]
# 输出:
# [
# [1,1,2],
# [1,2,1],
# [2,1,1]
# ]
#
#
# @lc code=start
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
def backtrack(nums,track,visited):
if len(nums) == len(track):
track = track[:]
res.append(track)
for i in range(len(nums)):
if visited[i]:
continue
if i > 0 and nums[i] == nums[i-1] and visited[i-1]:
break
track.append(nums[i])
visited[i] = True
backtrack(nums,track,visited)
track.pop()
visited[i] = False
nums.sort()
visited = [False]*len(nums)
res = []
track = []
backtrack(nums,track,visited)
return res
# @lc code=end
# def permuteUnique(self, nums: List[int]) -> List[List[int]]:
# def helper(nums,res,path):
# if not nums and path not in res:
# res.append(path)
# for i in range(len(nums)):
# helper(nums[:i]+nums[i+1:],res,path+[nums[i]])
# res = []
# helper(nums,res,[])
# return res
| 3.515625 | 4 |
testing/run-tests.py | 8enmann/blobfile | 21 | 16228 | <gh_stars>10-100
import subprocess as sp
import sys
sp.run(["pip", "install", "-e", "."], check=True)
sp.run(["pytest", "blobfile"] + sys.argv[1:], check=True)
| 1.320313 | 1 |
examples/Components/collision/PrimitiveCreation.py | sofa-framework/issofa | 0 | 16229 | import Sofa
import random
from cmath import *
############################################################################################
# this is a PythonScriptController example script
############################################################################################
############################################################################################
# following defs are used later in the script
############################################################################################
# utility methods
falling_speed = 0
capsule_height = 5
capsule_chain_height = 5
def createRigidCapsule(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
SurfNode = node.createChild('Surf')
SurfNode.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x_rand)+' '+str(y_rand)+' '+str(capsule_height/2)+' '+str(-x_rand)+' '+str(-y_rand)+' '+str(- capsule_height/2))
SurfNode.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
SurfNode.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
def createFlexCapsule(parentNode,name,x,y,z,*args):
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
node = parentNode.createChild(name)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x + x_rand)+' '+str(y + y_rand)+' '+str(z + z_rand + capsule_height)+' '+str(x - x_rand)+' '+str(y - y_rand)+' '+str(z - z_rand),velocity='0 0 '+str(falling_speed))
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return node
def createCapsuleChain(parentNode,name,length,x,y,z):
node = parentNode.createChild(name)
#radius=random.uniform(1,3)
radius=0.5
height=5
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
ray = 3.0
t = 0.0
delta_t = 0.7
topo_edges=''
particles=''
velocities = ''
springs=''
for i in range(0,length):
particles += str(x + (ray * cos(t)).real)+' '+str(y + (ray * sin(t)).real)+' '+str(z + i*capsule_chain_height)+' '
t += delta_t
if i < length -1:
topo_edges += str(i)+' '+str(i + 1)+' '
springs += str(i)+' '+str(i + 1)+' 10 1 '+str(capsule_chain_height)+' '
velocities+='0 0 '+str(falling_speed)+' '
topo_edges += str(length - 2)+' '+str(length -1)
springs += str(length - 2)+' '+str(length -1)+' 10 1 '+str(capsule_chain_height)
node.createObject('MechanicalObject',template='Vec3d',name='falling_particles',position=particles,velocity=velocities)
node.createObject('StiffSpringForceField',template='Vec3d',name='springforcefield',stiffness='100',damping='1',spring=springs)
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges=topo_edges,drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return node
def createOBB(parentNode,name,x,y,z,*args):
a=0
b=0
c=0
if len(args)==0:
a=random.uniform(0.5,1.5)
b=random.uniform(0.5,1.5)
c=random.uniform(0.5,1.5)
else:
a=args[0]
b=args[1]
c=args[2]
node = parentNode.createChild(name)
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
node.createObject('TOBBModel',template='Rigid',name='OBB_model',extents=str(a)+' '+str(b)+' '+str(c))
return node
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
return createRigidCapsule(parentNode,name,x,y,z)
else:
return createFlexCapsule(parentNode,name,x,y,z)
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
return createRigidCapsule(parentNode,name,x,y,z)
else:
return createFlexCapsule(parentNode,name,x,y,z)
def createSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z),velocity='0 0 '+str(falling_speed))
node.createObject('TSphereModel',template='Vec3d',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
def createRigidSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Rigid',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
node.createObject('TSphereModel',template='Rigid',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
| 2.578125 | 3 |
examples/tensorboard/nested.py | dwolfschlaeger/guildai | 694 | 16230 | <filename>examples/tensorboard/nested.py
import tensorboardX
with tensorboardX.SummaryWriter("foo") as w:
w.add_scalar("a", 1.0, 1)
w.add_scalar("a", 2.0, 2)
with tensorboardX.SummaryWriter("foo/bar") as w:
w.add_scalar("a", 3.0, 3)
w.add_scalar("a", 4.0, 4)
with tensorboardX.SummaryWriter("foo/bar/baz") as w:
w.add_scalar("a", 5.0, 5)
w.add_scalar("a", 6.0, 6)
| 2.328125 | 2 |
cobalt/__init__.py | NicolasDenoyelle/cobalt | 0 | 16231 | ###############################################################################
# Copyright 2020 UChicago Argonne, LLC.
# (c.f. AUTHORS, LICENSE)
# For more info, see https://xgitlab.cels.anl.gov/argo/cobalt-python-wrapper
# SPDX-License-Identifier: BSD-3-Clause
##############################################################################
import subprocess
from cobalt.cobalt import Cobalt, UserPolicy
__all__ = [ 'Cobalt', 'UserPolicy' ]
| 1.734375 | 2 |
anand.py | kyclark/py-grepper | 0 | 16232 | <gh_stars>0
#!/usr/bin/env python3
import os
orderNumbers = open("orders.txt", "r") #Order numbers to match
#Network path to a directory of files that has full details of the order
directoryEntries = os.scandir("")
outputFile = open("matchedData.dat", "w")
for entry in directoryEntries:
print("Currently parsing file ", entry.path)
fullOrderData = open(entry.path, "r")
#loop through each order from the ordernumber file
for orderNo in OrderNumbers:
for row in fullOrderData:
if orderNo.strip() in row:
outputFile.write(row)
#go back to start of orderdetails data to match on next order number
fullOrderData.seek(0)
#go back to order numbers again to match on the next order details file
orderNumbers.seek(0)
fullOrderData.close()
OrderNumbers.close()
outputFile.close()
print("done")
| 3.234375 | 3 |
tests/test_manager.py | Vizzuality/cog_worker | 24 | 16233 | import pytest
import rasterio as rio
from rasterio.io import DatasetWriter
from cog_worker import Manager
from rasterio import MemoryFile, crs
TEST_COG = "tests/roads_cog.tif"
@pytest.fixture
def molleweide_manager():
return Manager(
proj="+proj=moll",
scale=50000,
)
@pytest.fixture
def sample_function():
def myfunc(worker):
return worker.read(TEST_COG)
return myfunc
def test_preview(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.preview(sample_function, max_size=123)
assert max(arr.shape) == 123, "Expected maximum array dimension to be 123px"
def test_tile(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.tile(sample_function, x=1, y=2, z=3)
assert arr.shape == (1, 256, 256), "Expected 256x256 tile"
def test_chunk_execute(molleweide_manager, sample_function):
chunks = list(molleweide_manager.chunk_execute(sample_function, chunksize=123))
for arr, bbox in chunks:
assert max(arr.shape) <= 123, "Max chunk size should be 123px"
def test_chunk_params(molleweide_manager):
chunks = list(molleweide_manager.chunk_params(chunksize=123))
assert len(chunks) == 18, "Expected ~18 chunks for 123px tiles at 50km scale"
def test__open_writer(molleweide_manager):
with MemoryFile() as memfile:
with molleweide_manager._open_writer(memfile, 1, rio.ubyte) as writer:
assert isinstance(writer, DatasetWriter)
def test_chunk_save(molleweide_manager, sample_function):
full_arr = molleweide_manager.execute(sample_function)[0]
with MemoryFile() as memfile:
molleweide_manager.chunk_save(memfile, sample_function)
memfile.seek(0)
with rio.open(memfile) as src:
assert src.profile["crs"] == crs.CRS.from_string("+proj=moll")
assert src.profile["transform"][0] == 50000
arr = src.read()
assert arr.shape == full_arr.shape
assert (
abs(arr.sum() / full_arr.data.sum() - 1) < 0.002
), "Error should be less than 0.2%"
def test__write_chunk(molleweide_manager, sample_function):
with MemoryFile() as memfile:
arr, bbox = molleweide_manager.execute(sample_function)
print(arr.mask.sum())
with molleweide_manager._open_writer(memfile, 1, rio.ubyte) as writer:
molleweide_manager._write_chunk(writer, arr, bbox)
memfile.seek(0)
with rio.open(memfile) as src:
written = src.read(masked=True)
assert (written == arr).all()
assert (written.mask == arr.mask).all()
def test__chunk_bounds(molleweide_manager):
chunk = molleweide_manager._chunk_bounds(0, 0, 123)
assert chunk == (
-18040095.696147293,
2674978.852256801,
-11890095.696147293,
8824978.852256801,
)
def test__num_chunks(molleweide_manager):
assert molleweide_manager._num_chunks(123) == (6, 3)
| 1.882813 | 2 |
CLIP/experiments/tagger/main_binary.py | ASAPP-H/clip2 | 0 | 16234 | from train import train_model
from utils import *
import os
import sys
pwd = os.environ.get('CLIP_DIR')
DATA_DIR = "%s/data/processed/" % pwd
exp_name = "non_multilabel"
run_name = "sentence_structurel_with_crf"
train_file_name = "MIMIC_train_binary.csv"
dev_file_name = "MIMIC_val_binary.csv"
test_file_name = "test_binary.csv"
exp_name = "outputs_binary"
train = read_sentence_structure(os.path.join(DATA_DIR, train_file_name))
dev = read_sentence_structure(os.path.join(DATA_DIR, dev_file_name))
test = read_sentence_structure(os.path.join(DATA_DIR, test_file_name))
run_name = "binary"
def main(args):
train_model(
train,
dev,
test,
args[0],
exp_name,
use_crf=True,
learning_rate=float(args[1]),
epochs=int(args[2]),
writer_preds_freq=10,
embeddings_type="BioWord",
list_of_possible_tags=["followup"],
embeddings_path="%s/CLIP/experiments/tagger/embeddings" % pwd,
)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.4375 | 2 |
persons/urls.py | nhieckqo/lei | 0 | 16235 | from django.urls import path
from . import views
app_name = 'persons'
urlpatterns = [
path('', views.PersonsTableView.as_view(),name='persons_list'),
path('persons_details/<int:pk>',views.PersonsUpdateView.as_view(),name='persons_details_edit'),
path('persons_details/create',views.PersonsCreateView.as_view(),name='persons_details_add'),
path('persons_details/<int:pk>/delete',views.PersonsDeleteView.as_view(),name="persons_details_delete"),
path('persons_details/sort',views.event_gate, name='sort'),
]
| 1.8125 | 2 |
logxs/__version__.py | minlaxz/logxs | 0 | 16236 | <filename>logxs/__version__.py
__title__ = 'logxs'
__description__ = 'Replacing with build-in `print` with nice formatting.'
__url__ = 'https://github.com/minlaxz/logxs'
__version__ = '0.3.2'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__license__ = 'MIT' | 1.023438 | 1 |
src/PyMud/Systems/system.py | NichCritic/pymud | 0 | 16237 | <reponame>NichCritic/pymud
import time
class System(object):
manditory = []
optional = []
handles = []
def __init__(self, node_factory):
self.node_factory = node_factory
def process(self):
for node in self.get_nodes():
# print(f"{self.__class__.__name__} system got message from
# {node.id}")
self.handle(node)
self.clean(node)
def handle(self, node):
pass
def clean(self, node):
[node.remove_component(c) for c in self.handles]
def get_nodes(self):
return self.node_factory.create_node_list(self.manditory,
self.optional)
class TimedSystem(System):
def is_timed_out(self, lt, ct, timeout):
if lt is None:
return False
return ct - lt > timeout
def process(self):
t = time.time()
for node in self.get_nodes():
self.handle(node, t)
self.clean(node)
| 2.671875 | 3 |
hytra/plugins/transition_feature_vector_construction/transition_feature_subtraction.py | m-novikov/hytra | 0 | 16238 | from hytra.pluginsystem import transition_feature_vector_construction_plugin
import numpy as np
from compiler.ast import flatten
class TransitionFeaturesSubtraction(
transition_feature_vector_construction_plugin.TransitionFeatureVectorConstructionPlugin
):
"""
Computes the subtraction of features in the feature vector
"""
def constructFeatureVector(
self, featureDictObjectA, featureDictObjectB, selectedFeatures
):
assert "Global<Maximum >" not in selectedFeatures
assert "Global<Minimum >" not in selectedFeatures
assert "Histrogram" not in selectedFeatures
assert "Polygon" not in selectedFeatures
features = []
for key in selectedFeatures:
if key == "RegionCenter":
continue
else:
if (
not isinstance(featureDictObjectA[key], np.ndarray)
or featureDictObjectA[key].size == 1
):
features.append(
float(featureDictObjectA[key]) - float(featureDictObjectB[key])
)
else:
features.extend(
flatten(
(
featureDictObjectA[key].astype("float32")
- featureDictObjectB[key].astype("float32")
).tolist()
)
)
# there should be no nans or infs
assert np.all(np.isfinite(np.array(features)))
return features
def getFeatureNames(self, featureDictObjectA, featureDictObjectB, selectedFeatures):
assert "Global<Maximum >" not in selectedFeatures
assert "Global<Minimum >" not in selectedFeatures
assert "Histrogram" not in selectedFeatures
assert "Polygon" not in selectedFeatures
featuresNames = []
for key in selectedFeatures:
if key == "RegionCenter":
continue
else:
if (
not isinstance(featureDictObjectA[key], np.ndarray)
or featureDictObjectA[key].size == 1
):
featuresNames.append("A[{key}]-B[{key}]".format(key=key))
else:
featuresNames.extend(
[
"A[{key}][{i}]-B[{key}][{i}]".format(key=key, i=i)
for i in range(
len(
(
featureDictObjectA[key]
- featureDictObjectB[key]
).tolist()
)
)
]
)
return featuresNames
| 2.484375 | 2 |
sprites/player.py | hectorpadin1/FICGames | 0 | 16239 | <filename>sprites/player.py
from matplotlib.style import available
import pygame as pg
from sprites.character import Character
from pygame.math import Vector2
from settings import *
from math import cos, pi
from control import Controler
from sprites.gun import MachineGun, Pistol, Rifle
from managers.resourcemanager import ResourceManager as GR
from utils.observable import Observable
class Player(Character, Observable):
def __init__(self, x, y, bullets, collide_groups, observers, level):
Character.__init__(self, None, GR.PLAYER, PLAYER_HIT_RECT, x, y, PLAYER_HEALTH, collide_groups, GR.HERO_POSITIONS, 5, [8, 8, 8, 8, 3])
Observable.__init__(self, observers)
self.last_shot = 0
pg.mouse.set_pos((x+10) * SPRITE_BOX, y * SPRITE_BOX)
self.mouse = pg.mouse.get_pos()
self.controler = Controler()
self.guns = [Pistol(bullets), Rifle(bullets), MachineGun(bullets)][0:level]
self.gunSelector = 0
self.shooting = False
self.reloading = False
self.last_change = pg.time.get_ticks()
#Notificamos a observadores inicialización
self.notify("health", self.health)
if self.guns != []:
self.notify("gun", self.gunSelector)
self.notify("ammo", self.guns[self.gunSelector].current_mag)
self.notify("bullets", self.guns[self.gunSelector].bullets)
# Acciones según la configuración del controlador
def __callControler(self):
if self.health <= 0 :
if (self.numImagenPostura < 2) and (pg.time.get_ticks() - self.last_change > ANIM_DELAY*4):
self.numImagenPostura += 1
return
# Dinámicas del jugador
self.rot_speed = 0
self.vel = Vector2(0, 0)
speed = self.vel.copy()
# Movimiento de ejes
if self.controler.left():
self.vel.x = -PLAYER_SPEED
if self.controler.right():
self.vel.x = PLAYER_SPEED
if self.controler.up():
self.vel.y = -PLAYER_SPEED
if self.controler.down():
self.vel.y = PLAYER_SPEED
# Movimientos opuestos los cancelamos
if self.controler.left() and self.controler.right():
self.vel.x = 0
if self.controler.up() and self.controler.down():
self.vel.y = 0
# Movimientos diagonales
if self.vel.x!=0 and self.vel.y!=0:
self.vel *= cos(pi/4)
# Animaciones suaves
if pg.time.get_ticks() - self.last_change > ANIM_DELAY:
if speed != self.vel:
self.numImagenPostura = (self.numImagenPostura + 1)%8
else:
self.numImagenPostura = 0
self.last_change = pg.time.get_ticks()
# Comprobamos is hay que cambiar de pistola (y si podemos)
pistol = self.controler.switchPistol()
if self.guns != []:
if (pistol > 0) and (pistol <= len(self.guns)):
self.guns[self.gunSelector].cancel_reload()
self.gunSelector = pistol -1
self.notify("gun",pistol -1)
self.notify("ammo", self.guns[self.gunSelector].current_mag)
self.notify("bullets",self.guns[self.gunSelector].bullets)
else:
self.reloading = True
return
# Recargar
if (self.controler.reload()) and (self.guns[self.gunSelector].bullets > 0):
self.guns[self.gunSelector].do_reload()
self.reloading = True
self.notify("ammo",-1)
# Disparar
if self.controler.isShooting():
self.guns[self.gunSelector].shoot(self.pos, self.rot)
self.notify("ammo",self.guns[self.gunSelector].current_mag)
self.notify("bullets",self.guns[self.gunSelector].bullets)
def update_health(self, health):
if health <= 0:
self.health = 0
self.numPostura = 4
self.numImagenPostura = 0
else:
self.health = health
self.notify("health", self.health)
# Actualizamos la munición del jugador
def update_ammo(self):
for gun in self.guns:
gun.bullets = gun.MAG_SIZE
self.notify("bullets", self.guns[self.gunSelector].bullets)
def update(self, camera_pos, dt):
self.__callControler()
# Miramos a donde nos tenemos que mover y a donde mirar
direction = pg.mouse.get_pos() - Vector2(camera_pos) - self.pos
self.rot = direction.angle_to(Vector2(1, 0))
self.pos += self.vel * (dt/1000)
if self.guns != []:
self.guns[self.gunSelector].update()
if self.health <= 0:
super().update()
return
# Según si estamos recargando, o con un arma, seleccionamos una fila de la hoja u otra
if self.reloading:
self.numPostura = 3
if self.guns != [] and self.guns[self.gunSelector].reload == False:
self.notify("ammo",self.guns[self.gunSelector].current_mag)
self.notify("bullets",self.guns[self.gunSelector].bullets)
self.reloading = False
elif self.gunSelector == 0:
self.numPostura = 1
elif self.gunSelector == 1:
self.numPostura = 0
elif self.gunSelector == 2:
self.numPostura = 2
super().update()
| 2.921875 | 3 |
yocto/poky/bitbake/lib/bb/ui/crumbs/__init__.py | jxtxinbing/ops-build | 16 | 16240 | #
# Gtk+ UI pieces for BitBake
#
# Copyright (C) 2006-2007 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
| 0.839844 | 1 |
mytardisbf/migrations/0001_initial_data.py | keithschulze/mytardisbf | 2 | 16241 | <filename>mytardisbf/migrations/0001_initial_data.py
# -*- coding: utf-8 -*-
from django.db import migrations
from tardis.tardis_portal.models import (
Schema,
ParameterName,
DatafileParameter,
DatafileParameterSet
)
from mytardisbf.apps import (
OMESCHEMA,
BFSCHEMA
)
def forward_func(apps, schema_editor):
"""Create mytardisbf schemas and parameternames"""
db_alias = schema_editor.connection.alias
ome_schema, _ = Schema.objects\
.using(db_alias)\
.update_or_create(
name="OME Metadata",
namespace="http://tardis.edu.au/schemas/bioformats/1",
subtype=None,
hidden=True,
type=3,
immutable=True,
defaults={
'namespace': OMESCHEMA
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="ome",
data_type=5,
is_searchable=False,
choices="",
comparison_type=1,
full_name="OME Metadata",
units="xml",
order=1,
immutable=True,
schema=ome_schema,
defaults={
"full_name": "OMEXML Metadata"
}
)
series_schema, _ = Schema.objects\
.using(db_alias)\
.update_or_create(
name="Series Metadata",
namespace=BFSCHEMA,
subtype="",
hidden=False,
type=3,
immutable=True
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="id",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="ID",
units="",
order=9999,
immutable=True,
schema=series_schema,
defaults={
"is_searchable": False
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="name",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Name",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="type",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Pixel Type",
units="",
order=9999,
immutable=True,
schema=series_schema,
defaults={
"name": "pixel_type"
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="dimensionorder",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Dimension Order",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizex",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeX",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizey",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeY",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizeZ",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeZ",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizec",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeC",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizet",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeT",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizex",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size X",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizey",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size Y",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizez",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size Z",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="timeincrement",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Time Increment",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="excitationwavelength",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Excitation Wavelength",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="samplesperpixel",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Samples per Pixel",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="emissionwavelength",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Emission Wavelength",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="pinholesize",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Pinhole Size",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="previewImage",
data_type=5,
is_searchable=False,
choices="",
comparison_type=1,
full_name="Preview",
units="image",
order=1,
immutable=True,
schema=series_schema,
defaults={
"name": "preview_image"
}
)
def reverse_func(apps, schema_editor):
db_alias = schema_editor.connection.alias
ome_schema = Schema.objects\
.using(db_alias)\
.get(namespace=OMESCHEMA)
ome_pn = ParameterName.objects\
.using(db_alias)\
.get(schema=ome_schema)
DatafileParameter.objects\
.using(db_alias)\
.filter(name=ome_pn)\
.delete()
DatafileParameterSet.objects\
.using(db_alias)\
.filter(schema=ome_schema)\
.delete()
ome_pn.delete()
ome_schema.delete()
bf_schema = Schema.objects\
.using(db_alias)\
.get(namespace=BFSCHEMA)
bf_param_names = [
"id", "name", "pixel_type", "dimensionorder", "sizex", "sizey", "sizez",
"sizec", "sizet", "physicalsizex", "physicalsizey", "physicalsizez",
"timeincrement", "excitationwavelength", "samplesperpixel",
"emissionwavelength", "pinholesize", "preview_image"
]
def delete_param_names(param_name_str):
pn = ParameterName.objects\
.using(db_alias)\
.get(schema=bf_schema, name=param_name_str)
DatafileParameter.objects\
.using(db_alias)\
.filter(name=pn)\
.delete()
pn.delete()
[delete_param_names(pn) for pn in bf_param_names]
DatafileParameterSet.objects\
.using(db_alias)\
.filter(schema=bf_schema)\
.delete()
bf_schema.delete()
class Migration(migrations.Migration):
"""MyTardis Schema and ParameterName migrations"""
dependencies = [
("tardis_portal", "0001_initial"),
]
operations = [
migrations.RunPython(forward_func, reverse_func),
]
| 2.171875 | 2 |
ipmanagement/models.py | smilelhong/ip_manage | 0 | 16242 | # -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
# Create your models here.
class IP_Address(models.Model):
ip = models.GenericIPAddressField(verbose_name=u"IP地址")
gateway = models.GenericIPAddressField(verbose_name=u"网关")
network = models.GenericIPAddressField(verbose_name=u"网络号")
netmask = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"掩码")
system = models.CharField(max_length=64,default='',null=True,blank='',verbose_name=u"应用系统")
apply_person = models.CharField(max_length=64,default='',null=True,blank='',verbose_name=u"申请人")
state = models.CharField(max_length=20,choices=((u"已分配",u"已分配"),(u"未分配",u"未分配")),verbose_name=u"状态")
apply_time = models.DateField(default=datetime.now(),verbose_name=u"申请时间")
class IP_Range(models.Model):
start_ip = models.GenericIPAddressField(verbose_name=u"开始IP")
end_ip = models.GenericIPAddressField(verbose_name=u"结束IP")
network = models.GenericIPAddressField(verbose_name=u"网络号")
netmask = models.CharField(max_length=20,default='',verbose_name=u"掩码")
use_ip = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"已使用IP数")
left_ip = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"未使用IP数")
create_time = models.DateField(default=datetime.now(),verbose_name=u"创建时间")
des = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"描述") | 2.125 | 2 |
tests/scripts/thread-cert/border_router/MATN_05_ReregistrationToSameMulticastGroup.py | kkasperczyk-no/sdk-openthread | 0 | 16243 | <filename>tests/scripts/thread-cert/border_router/MATN_05_ReregistrationToSameMulticastGroup.py
#!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
import pktverify
from pktverify import packet_verifier, packet_filter, consts
from pktverify.consts import MA1, PBBR_ALOC
import config
import thread_cert
# Test description:
# The purpose of this test case is to verify that a Primary BBR (DUT) can manage
# a re-registration of a device on its network to remain receiving multicasts.
# The test also verifies the usage of UDP multicast packets across backbone and
# internal Thread network.
#
# Topology:
# ----------------(eth)------------------
# | | |
# BR_1 (Leader) ---- BR_2 HOST
# | |
# | |
# Router_1 -----------+
#
BR_1 = 1
BR_2 = 2
ROUTER_1 = 3
HOST = 4
REG_DELAY = 10
UDP_HEADER_LENGTH = 8
class MATN_05_ReregistrationToSameMulticastGroup(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR_1: {
'name': 'BR_1',
'is_otbr': True,
'allowlist': [BR_2, ROUTER_1],
'version': '1.2',
'router_selection_jitter': 2,
},
BR_2: {
'name': 'BR_2',
'allowlist': [BR_1, ROUTER_1],
'is_otbr': True,
'version': '1.2',
'router_selection_jitter': 2,
},
ROUTER_1: {
'name': 'Router_1',
'allowlist': [BR_1, BR_2],
'version': '1.2',
'router_selection_jitter': 2,
},
HOST: {
'name': 'Host',
'is_host': True
},
}
def test(self):
br1 = self.nodes[BR_1]
br2 = self.nodes[BR_2]
router1 = self.nodes[ROUTER_1]
host = self.nodes[HOST]
br1.set_backbone_router(reg_delay=REG_DELAY, mlr_timeout=consts.MLR_TIMEOUT_MIN)
br1.start()
self.simulator.go(10)
self.assertEqual('leader', br1.get_state())
self.assertTrue(br1.is_primary_backbone_router)
router1.start()
self.simulator.go(10)
self.assertEqual('router', router1.get_state())
br2.start()
self.simulator.go(10)
self.assertEqual('router', br2.get_state())
self.assertFalse(br2.is_primary_backbone_router)
host.start(start_radvd=False)
self.simulator.go(10)
# Router_1 registers for multicast address, MA1, at BR_1.
router1.add_ipmaddr(MA1)
self.simulator.go(5)
# 1. Host sends a ping packet to the multicast address, MA1.
self.assertTrue(
host.ping(MA1, backbone=True, ttl=10, interface=host.get_ip6_address(config.ADDRESS_TYPE.ONLINK_ULA)[0]))
# Ensure Router_1 renews its multicast registration
self.simulator.go(consts.MLR_TIMEOUT_MIN - 10)
# 4. Within MLR_TIMEOUT_MIN seconds, Host sends a ping packet to the
# multicast address, MA1. The destination port 5683 is used for the UDP
# Multicast packet transmission.
host.udp_send_host(data='PING', ipaddr=MA1, port=5683)
self.simulator.go(5)
# 6a. By internal means, Router_1 stops listening to the multicast
# address, MA1.
router1.del_ipmaddr(MA1)
# 7. After (MLR_TIMEOUT_MIN+2) seconds, Host multicasts a ping packet to
# multicast address, MA1, on the backbone link.
self.simulator.go(consts.MLR_TIMEOUT_MIN + 2)
self.assertFalse(
host.ping(MA1, backbone=True, ttl=10, interface=host.get_ip6_address(config.ADDRESS_TYPE.ONLINK_ULA)[0]))
self.collect_ipaddrs()
self.collect_rloc16s()
self.collect_rlocs()
self.collect_leader_aloc(BR_1)
self.collect_extra_vars()
def verify(self, pv: pktverify.packet_verifier.PacketVerifier):
pkts = pv.pkts
vars = pv.vars
pv.summary.show()
logging.info(f'vars = {vars}')
# Ensure the topology is formed correctly
pv.verify_attached('Router_1', 'BR_1')
pv.verify_attached('BR_2')
# Initial registration
# Router_1 registers for multicast address, MA1, at BR_1.
# Router_1 unicasts an MLR.req CoAP request to BR_1 as
# "coap://[<BR_1 RLOC or PBBR ALOC>]:MM/n/mr".
# The payload contains "IPv6Address TLV: MA1".
initial_registration_pkt = pkts.filter_wpan_src64(vars['Router_1']) \
.filter_ipv6_2dsts(vars['BR_1_RLOC'], PBBR_ALOC) \
.filter_coap_request('/n/mr') \
.filter(lambda p: p.thread_meshcop.tlv.ipv6_addr == [MA1]) \
.must_next()
# 1. Host sends a ping packet to the multicast address, MA1.
_pkt = pkts.filter_eth_src(vars['Host_ETH']) \
.filter_ipv6_dst(MA1) \
.filter_ping_request() \
.must_next()
# 2. BR_1 forwards the ping packet with multicast address, MA1, to its
# Thread Network encapsulated in an MPL packet.
pkts.filter_wpan_src64(vars['BR_1']) \
.filter_AMPLFMA(mpl_seed_id=vars['BR_1_RLOC']) \
.filter_ping_request(identifier=_pkt.icmpv6.echo.identifier) \
.must_next()
# 3. Router_1 receives the MPL packet containing an encapsulated ping
# packet to MA1, sent by Host, and unicasts a ping response packet back
# to Host.
pkts.filter_wpan_src64(vars['Router_1']) \
.filter_ipv6_dst(_pkt.ipv6.src) \
.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier) \
.must_next()
# 3a. Within MLR_TIMEOUT_MIN seconds of initial registration, Router_1
# re-registers for multicast address, MA1, at BR_1.
# Router_1 unicasts an MLR.req CoAP request to BR_1 as
# "coap://[<BR_1 RLOC or PBBR ALOC>]:MM/n/mr".
# The payload contains "IPv6Address TLV: MA1".
pkts.copy() \
.filter_wpan_src64(vars['Router_1']) \
.filter_ipv6_2dsts(vars['BR_1_RLOC'], PBBR_ALOC) \
.filter_coap_request('/n/mr') \
.filter(lambda p: p.thread_meshcop.tlv.ipv6_addr == [MA1] and
p.sniff_timestamp <= initial_registration_pkt.sniff_timestamp + consts.MLR_TIMEOUT_MIN) \
.must_next()
# 4. Within MLR_TIMEOUT_MIN seconds, Host sends a ping packet to the
# multicast address, MA1. The destination port 5683 is used for the UDP
# Multicast packet transmission.
_pkt = pkts.filter_eth_src(vars['Host_ETH']) \
.filter_ipv6_dst(MA1) \
.filter(lambda p: p.udp.length == UDP_HEADER_LENGTH + len('PING')
and p.udp.dstport == 5683) \
.must_next()
# 5. BR_1 forwards the UDP ping packet with multicast address, MA1, to
# its Thread Network encapsulated in an MPL packet.
pkts.filter_wpan_src64(vars['BR_1']) \
.filter_AMPLFMA(mpl_seed_id=vars['BR_1_RLOC']) \
.filter(lambda p: p.udp.length == _pkt.udp.length) \
.must_next()
# 6. Router_1 receives the ping packet.
# Use the port 5683 (CoAP port) to verify that the
# UDP Multicast packet is received.
pkts.filter_wpan_src64(vars['Router_1']) \
.filter(
lambda p: p.udp.length == _pkt.udp.length and p.udp.dstport == 5683) \
.must_next()
# 7. After (MLR_TIMEOUT_MIN+2) seconds, Host multicasts a ping packet to
# multicast address, MA1, on the backbone link.
_pkt = pkts.filter_eth_src(vars['Host_ETH']) \
.filter_ipv6_dst(MA1) \
.filter_ping_request() \
.must_next()
# 8. BR_1 does not forward the ping packet with multicast address, MA1,
# to its Thread Network.
pkts.filter_wpan_src64(vars['BR_1']) \
.filter_AMPLFMA(mpl_seed_id=vars['BR_1_RLOC']) \
.filter_ping_request(identifier=_pkt.icmpv6.echo.identifier) \
.must_not_next()
if __name__ == '__main__':
unittest.main()
| 1.5 | 2 |
senseye_cameras/input/camera_pylon.py | senseye-inc/senseye-cameras | 5 | 16244 | import time
import logging
try:
from pypylon import pylon
except:
pylon = None
from . input import Input
log = logging.getLogger(__name__)
# writes the framenumber to the 8-11 bytes of the image as a big-endian set of octets
def encode_framenumber(np_image, n):
for i in range(4):
np_image[0][i+7] = n & 0xFF
n>>=8
# converts time from a float in seconds to an int64 in microseconds
# writes the time to the first 7 bytes of the image as a big-endian set of octets
def encode_timestamp(np_image, timestamp):
t = int(timestamp*1e6)
for i in range(7):
np_image[0][i] = t & 0xFF
t>>=8
class CameraPylon(Input):
'''
Camera that interfaces with pylon/basler cameras.
Args:
id (int): Id of the pylon camera.
config (dict): Configuration dictionary. Accepted keywords:
pfs (str): path to a pfs file.
encode_metadata (bool): whether to bake in timestamps/frame number into the frame.
'''
def __init__(self, id=0, config={}):
if pylon is None:
raise ImportError('Pylon failed to import. Pylon camera initialization failed.')
defaults = {
'pfs': None,
'encode_metadata': False,
'format': 'rawvideo',
}
Input.__init__(self, id=id, config=config, defaults=defaults)
self.read_count = 0
def configure(self):
'''
Pylon camera configuration. Requires the pylon camera to have been opened already.
The order of these statements is important.
Populates self.config with set values.
Logs camera start.
'''
if self.config.get('pfs', None):
pylon.FeaturePersistence.Load(self.config.get('pfs'), self.input.GetNodeMap())
self.config['pixel_format'] = self.input.PixelFormat.Value
self.config['gain'] = self.input.Gain.Value
self.config['exposure_time'] = self.input.ExposureTime.Value
self.config['res'] = (self.input.Width.Value, self.input.Height.Value)
self.config['width'] = self.input.Width.Value
self.config['height'] = self.input.Height.Value
self.config['fps'] = self.input.ResultingFrameRate.GetValue()
def open(self):
self.read_count = 0
devices = pylon.TlFactory.GetInstance().EnumerateDevices()
self.input = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateDevice(devices[self.id]))
self.input.Open()
self.configure()
self.input.StopGrabbing()
self.input.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
def read(self):
frame = None
now = None
if self.input:
try:
ret = self.input.RetrieveResult(100, pylon.TimeoutHandling_ThrowException)
if ret.IsValid():
frame = ret.GetArray()
now = time.time()
if self.config.get('encode_metadata'):
encode_timestamp(frame,now)
encode_framenumber(frame,self.read_count)
self.read_count+=1
except TypeError as e:
log.error(f"{str(self)} read error: {e}")
raise
finally:
ret.Release()
return frame, now
def close(self):
self.read_count = 0
if self.input and self.input.IsOpen():
self.input.Close()
self.input = None
| 2.5625 | 3 |
vise/tests/util/phonopy/test_phonopy_input.py | kumagai-group/vise | 16 | 16245 | # -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from phonopy.interface.calculator import read_crystal_structure
from phonopy.structure.atoms import PhonopyAtoms
from vise.util.phonopy.phonopy_input import structure_to_phonopy_atoms
import numpy as np
def assert_same_phonopy_atoms(actual: PhonopyAtoms,
expected: PhonopyAtoms):
assert (actual.get_cell() == expected.get_cell()).all()
assert (actual.get_scaled_positions()
== expected.get_scaled_positions()).all()
assert actual.symbols == expected.symbols
def test_phonopy_atoms_behavior(sc_structure, tmpdir):
print(tmpdir)
tmpdir.chdir()
# actual = structure_to_phonopy_atoms(sc_structure)
sc_structure.to(fmt="poscar", filename="POSCAR")
a, _ = read_crystal_structure("POSCAR")
b = PhonopyAtoms(atoms=a)
print(type(a.get_cell()))
print(a.get_atomic_numbers())
assert_same_phonopy_atoms(a, b)
def test_structure_to_phonopy_atoms(sc_structure):
actual = structure_to_phonopy_atoms(sc_structure)
expected = PhonopyAtoms(symbols=["H"],
cell=np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]),
scaled_positions=np.array([[0.0, 0.0, 0.0]]))
assert_same_phonopy_atoms(actual, expected)
#
# def test_make_phonopy_input(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure,
# supercell_matrix=np.eye(3).tolist(),
# conventional_base=True)
# supercell_matrix = [[ 1., 1., 0.],
# [-1., 1., 0.],
# [ 0., 0., 1.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure)
# supercell_matrix = [[ 2., 2., 0.],
# [-2., 2., 0.],
# [ 0., 0., 2.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default_hexa():
# structure = Structure(Lattice.hexagonal(1.0, 2.0), species=["H"],
# coords=[[0.0]*3])
# actual = make_phonopy_input(unitcell=structure)
# supercell_matrix = [[2, -1, 0], [2, 1, 0], [0, 0, 2]]
# supercell = structure * supercell_matrix
# expected = PhonopyInput(unitcell=structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
| 2.375 | 2 |
2020/day15.py | andypymont/adventofcode | 0 | 16246 | """
2020 Day 15
https://adventofcode.com/2020/day/15
"""
from collections import deque
from typing import Dict, Iterable, Optional
import aocd # type: ignore
class ElfMemoryGame:
def __init__(self, starting_numbers: Iterable[int]):
self.appearances: Dict[int, deque[int]] = {}
self.length = 0
for number in starting_numbers:
self.add(number)
def __len__(self) -> int:
return self.length
def next_number(self, previous: Optional[int] = None) -> int:
previous = previous or self.latest
appeared = self.appearances[previous]
return abs(appeared[1] - appeared[0])
def extend(self, length: int) -> None:
while self.length < length:
self.add(self.next_number())
def add(self, number: int) -> None:
if number in self.appearances:
self.appearances[number].append(self.length)
else:
self.appearances[number] = deque([self.length, self.length], maxlen=2)
self.length += 1
self.latest = number
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2020, day=15)
emg = ElfMemoryGame(map(int, data.split(",")))
emg.extend(2020)
print(f"Part 1: {emg.latest}")
emg.extend(30_000_000)
print(f"Part 2: {emg.latest}")
if __name__ == "__main__":
main()
| 3.734375 | 4 |
src/spring-cloud/azext_spring_cloud/_validators_enterprise.py | SanyaKochhar/azure-cli-extensions | 2 | 16247 | <gh_stars>1-10
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, unused-argument, redefined-builtin
from azure.cli.core.azclierror import ClientRequestError
from ._util_enterprise import is_enterprise_tier
def only_support_enterprise(cmd, namespace):
if namespace.resource_group and namespace.service and not is_enterprise_tier(cmd, namespace.resource_group, namespace.service):
raise ClientRequestError("'{}' only supports for Enterprise tier Spring instance.".format(namespace.command))
def not_support_enterprise(cmd, namespace):
if namespace.resource_group and namespace.service and is_enterprise_tier(cmd, namespace.resource_group, namespace.service):
raise ClientRequestError("'{}' doesn't support for Enterprise tier Spring instance.".format(namespace.command))
| 2 | 2 |
fluentql/function.py | RaduG/fluentql | 4 | 16248 | <reponame>RaduG/fluentql
from typing import Any, TypeVar, Union
from types import MethodType, FunctionType
from .base_types import BooleanType, Constant, StringType, Collection, Referenceable
from .type_checking import TypeChecker
AnyArgs = TypeVar("AnyArgs")
NoArgs = TypeVar("NoArgs")
VarArgs = TypeVar("VarArgs")
T = TypeVar("T")
class WithOperatorSupport:
"""
Implements operator support.
"""
def __gt__(self, other):
return GreaterThan(self, other)
def __ge__(self, other):
return GreaterThanOrEqual(self, other)
def __lt__(self, other):
return LessThan(self, other)
def __le__(self, other):
return LessThanOrEqual(self, other)
def __eq__(self, other):
return Equals(self, other)
def __ne__(self, other):
return NotEqual(self, other)
def __add__(self, other):
return Add(self, other)
def __radd__(self, other):
return Add(other, self)
def __sub__(self, other):
return Subtract(self, other)
def __rsub__(self, other):
return Subtract(other, self)
def __mul__(self, other):
return Multiply(self, other)
def __rmul__(self, other):
return Multiply(other, self)
def __truediv__(self, other):
return Divide(self, other)
def __rtruediv__(self, other):
return Divide(other, self)
def __mod__(self, other):
return Modulo(self, other)
def __rmod__(self, other):
return Modulo(other, self)
def __and__(self, other):
return BitwiseAnd(self, other)
def __rand__(self, other):
return BitwiseAnd(other, self)
def __or__(self, other):
return BitwiseOr(self, other)
def __ror__(self, other):
return BitwiseOr(other, self)
def __xor__(self, other):
return BitwiseXor(self, other)
def __rxor__(self, other):
return BitwiseXor(other, self)
def __invert__(self):
return Not(self)
class F(Referenceable):
def __init_subclass__(cls, **kwargs):
"""
Use init_subclass to map the arguments / return value based on type
annotations, instead of going hard at it with a metaclass.
Args:
cls (type):
**kwargs (dict):
"""
cls._process_annotations()
@classmethod
def _process_annotations(cls):
"""
Set __args__ and __returns__ attributes to cls. Those will be set to
the user annotations, if any, or will default to:
AnyArgs - for __args__
Any - for __returns__
Args:
cls (object):
"""
try:
annotations = {**cls.__annotations__}
except AttributeError:
annotations = {}
# Check for "returns"
if "returns" in annotations:
cls.__returns__ = annotations.pop("returns")
elif hasattr(cls, "returns"):
cls.__returns__ = cls.returns
else:
cls.__returns__ = Any
if len(annotations) == 0:
cls.__args__ = AnyArgs
elif len(annotations) == 1 and list(annotations.values())[0] is NoArgs:
cls.__args__ = NoArgs
else:
cls.__args__ = tuple(annotations.values())
def __init__(self, *args):
self._validate_args(args)
self.__values__ = args
self.__returns__ = self._get_return_type()
def _get_return_type(self):
# If __returns__ is a function, the result of it called
# on args is the actual return type
if isinstance(self.__returns__, (FunctionType, MethodType)):
# Replace F arg types with their return values
return self.__returns__(
tuple(self.__type_checker__._matched_types),
self.__type_checker__._type_var_mapping,
)
return self.__returns__
@property
def values(self):
return self.__values__
@classmethod
def new(cls, name):
"""
Returns a new subclass of cls, with the given name.
Args:
name (str):
Returns:
type
"""
return type(name, (cls,), {})
def _validate_args(self, args):
if self.__args__ is AnyArgs:
if len(args) == 0:
raise TypeError(f"{type(self).__name__} takes at least one argument")
# All expected args are Any
arg_types = [Any] * len(args)
elif self.__args__ is NoArgs:
if len(args) > 0:
raise TypeError(f"{type(self).__name__} takes no arguments")
return
elif len(self.__args__) != len(args):
raise TypeError(
f"{type(self).__name__} takes {len(self.__args__)} arguments, {len(args)} given"
)
else:
# Replace F arg types with their return values
arg_types = [
arg.__returns__ if issubclass(type(arg), F) else type(arg)
for arg in args
]
self.__type_checker__ = TypeChecker(arg_types, self.__args__)
self.__type_checker__.validate()
class ArithmeticF(WithOperatorSupport, F):
@classmethod
def returns(cls, matched_types, type_var_mapping):
"""
If both args are Constant, the return value is Constant. Otherwise, the
return value is Collection.
Args:
args (list(type)): Argument types, in order
Returns:
type
"""
constant_type = type_var_mapping[Constant][1]
if any(Collection in t.__mro__ for t in matched_types if hasattr(t, "__mro__")):
return Collection[constant_type]
return constant_type
class BooleanF(F):
@classmethod
def returns(cls, matched_types, type_var_mapping):
"""
If both args are BooleanType, the return value is BooleanType.
Otherwise, the return value is collection.
Args:
args (list(type)): Argument types, in order
Returns:
type
"""
if any(Collection in t.__mro__ for t in matched_types if hasattr(t, "__mro__")):
return Collection[BooleanType]
return Collection[BooleanType]
class AggregateF(WithOperatorSupport, F):
@classmethod
def returns(cls, matched_types, type_var_mapping):
try:
return type_var_mapping[Constant][1]
except KeyError:
return Any
class ComparisonF(F):
pass
class OrderF(F):
pass
class Add(ArithmeticF):
a: Union[Constant, Collection[Constant]]
b: Union[Constant, Collection[Constant]]
class Subtract(ArithmeticF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class Multiply(ArithmeticF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class Divide(ArithmeticF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class Modulo(ArithmeticF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class BitwiseOr(BooleanF):
a: Union[Collection[BooleanType], BooleanType]
b: Union[Collection[BooleanType], BooleanType]
class BitwiseAnd(BooleanF):
a: Union[Collection[BooleanType], BooleanType]
b: Union[Collection[BooleanType], BooleanType]
class BitwiseXor(BooleanF):
a: Union[Collection[BooleanType], BooleanType]
b: Union[Collection[BooleanType], BooleanType]
class Equals(BooleanF):
a: Union[Constant, Collection[Constant]]
b: Union[Constant, Collection[Constant]]
class LessThan(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class LessThanOrEqual(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class GreaterThan(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class GreaterThanOrEqual(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class NotEqual(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class Not(BooleanF):
a: Union[BooleanType, Collection[BooleanType]]
class As(F):
a: T
b: str
returns: T
class TableStar(F):
a: Referenceable
returns: Any
class Star(F):
a: NoArgs
returns: Any
class Like(BooleanF):
a: Collection[StringType]
b: str
class In(BooleanF):
a: Collection[Any]
b: Any
class Max(AggregateF):
a: Collection[Constant]
class Min(AggregateF):
a: Collection[Constant]
class Sum(AggregateF):
a: Collection[Constant]
class Asc(OrderF):
a: Collection[Any]
returns: Collection[Any]
class Desc(OrderF):
a: Collection[Any]
returns: Collection[Any]
| 2.46875 | 2 |
gqn_v2/gqn_predictor.py | goodmattg/tf-gqn | 0 | 16249 | <reponame>goodmattg/tf-gqn<filename>gqn_v2/gqn_predictor.py
"""
Contains a canned predictor for a GQN.
"""
import os
import json
import numpy as np
import tensorflow as tf
from .gqn_graph import gqn_draw
from .gqn_params import create_gqn_config
def _normalize_pose(pose):
"""
Converts a camera pose into the GQN format.
Args:
pose: [x, y, z, yaw, pitch]; x, y, z in [-1, 1]; yaw, pitch in euler degree
Returns:
[x, y, z, cos(yaw), sin(yaw), cos(pitch), sin(pitch)]
"""
norm_pose = np.zeros((7, ))
norm_pose[0:3] = pose[0:3]
norm_pose[3] = np.cos(np.deg2rad(pose[3]))
norm_pose[4] = np.sin(np.deg2rad(pose[3]))
norm_pose[5] = np.cos(np.deg2rad(pose[4]))
norm_pose[6] = np.sin(np.deg2rad(pose[4]))
# print("Normalized pose: %s -> %s" % (pose, norm_pose)) # DEBUG
return norm_pose
class GqnViewPredictor(object):
"""
GQN-based view predictor.
"""
def __init__(self, model_dir):
"""
Instantiates a GqnViewPredictor from a saved checkpoint.
Args:
model_dir: Path to a GQN model. Must contain 'gqn_config.json', 'checkpoint'
and 'model.ckpt-nnnnnn'.
Returns:
GqnViewPredictor
"""
# load gqn_config.json
with open(os.path.join(model_dir, 'gqn_config.json'), 'r') as f:
gqn_config_dict = json.load(f)
self._cfg = create_gqn_config(gqn_config_dict)
self._ctx_size = self._cfg.CONTEXT_SIZE
self._dim_pose = self._cfg.POSE_CHANNELS
self._dim_img_h = self._cfg.IMG_HEIGHT
self._dim_img_w = self._cfg.IMG_WIDTH
self._dim_img_c = self._cfg.IMG_CHANNELS
# create input placeholders
self._ph_ctx_poses = tf.compat.v1.placeholder(
shape=[1, self._ctx_size, self._dim_pose],
dtype=tf.float32)
self._ph_ctx_frames = tf.compat.v1.placeholder(
shape=[1, self._ctx_size, self._dim_img_h, self._dim_img_w, self._dim_img_c],
dtype=tf.float32)
self._ph_query_pose = tf.compat.v1.placeholder(
shape=[1, self._dim_pose], dtype=tf.float32)
self._ph_tgt_frame = tf.compat.v1.placeholder( # just used for graph construction
shape=[1, self._dim_img_h, self._dim_img_w, self._dim_img_c],
dtype=tf.float32)
# re-create gqn graph
self._net, self._ep = gqn_draw(
query_pose=self._ph_query_pose,
context_frames=self._ph_ctx_frames,
context_poses=self._ph_ctx_poses,
target_frame=self._ph_tgt_frame,
model_params=self._cfg,
is_training=False)
print(">>> Instantiated GQN:") # DEBUG
for name, ep in self._ep.items():
print("\t%s\t%s" % (name, ep))
# create session
self._sess = tf.compat.v1.InteractiveSession()
# load snapshot
saver = tf.compat.v1.train.Saver()
ckpt_path = tf.train.latest_checkpoint(model_dir)
saver.restore(self._sess, save_path=ckpt_path)
print(">>> Restored parameters from: %s" % (ckpt_path, )) # DEBUG
# create data placeholders
self._context_frames = [] # list of RGB frames [H, W, C]
self._context_poses = [] # list of normalized poses [x, y, z, cos(yaw), sin(yaw), cos(pitch), sin(pitch)]
@property
def sess(self):
"""Expose the underlying tensorflow session."""
return self._sess
@property
def frame_resolution(self):
"""Returns the video resolution as (H, W, C)."""
return (self._dim_img_h, self._dim_img_w, self._dim_img_c)
def add_context_view(self, frame: np.ndarray, pose: np.ndarray):
"""
Adds a (frame, pose) tuple as context point for view interpolation.
Args:
frame: [H, W, C], in [0, 1]
pose: [x, y, z, yaw, pitch]; x, y, z in [-1, 1]; yaw, pitch in euler degree
"""
assert (frame >= 0.0).all() and (frame <= 1.0).all(), \
"The context frame is not normalized in [0.0, 1.0] (float)."
assert frame.shape == self.frame_resolution, \
"The context frame's shape %s does not fit the model's shape %s." % \
(frame.shape, self.frame_resolution)
assert pose.shape == (self._dim_pose, ) or pose.shape == (5, ), \
"The pose's shape %s does not match the specification (either %s or %s)." % \
(pose.shape, self._dim_pose, (5, ))
if pose.shape == (5, ): # assume un-normalized pose
pose = _normalize_pose(pose)
# add frame-pose pair to context
self._context_frames.append(frame)
self._context_poses.append(pose)
def clear_context(self):
"""Clears the current context."""
self._context_frames.clear()
self._context_poses.clear()
def render_query_view(self, pose: np.ndarray):
"""
Renders the scene from the given camera pose.
Args:
pose: [x, y, z, yaw, pitch]; x, y, z in [-1, 1]; yaw, pitch in euler degree
"""
assert len(self._context_frames) >= self._ctx_size \
and len(self._context_poses) >= self._ctx_size, \
"Not enough context points available. Required %d. Given: %d" % \
(self._ctx_size, np.min(len(self._context_frames), len(self._context_poses)))
assert pose.shape == (self._dim_pose, ) or pose.shape == (5, ), \
"The pose's shape %s does not match the specification (either %s or %s)." % \
(pose.shape, self._dim_pose, (5, ))
if pose.shape == (5, ): # assume un-normalized pose
pose = _normalize_pose(pose)
ctx_frames = np.expand_dims(
np.stack(self._context_frames[-self._ctx_size:]), axis=0)
ctx_poses = np.expand_dims(
np.stack(self._context_poses[-self._ctx_size:]), axis=0)
query_pose = np.expand_dims(pose, axis=0)
feed_dict = {
self._ph_query_pose : query_pose,
self._ph_ctx_frames : ctx_frames,
self._ph_ctx_poses : ctx_poses
}
[pred_frame] = self._sess.run([self._net], feed_dict=feed_dict)
pred_frame = np.clip(pred_frame, a_min=0.0, a_max=1.0)
return pred_frame
| 2.359375 | 2 |
mamba/post_solve_handling.py | xhochy/mamba | 0 | 16250 | # -*- coding: utf-8 -*-
# Copyright (C) 2019, QuantStack
# SPDX-License-Identifier: BSD-3-Clause
from conda.base.constants import DepsModifier, UpdateModifier
from conda._vendor.boltons.setutils import IndexedSet
from conda.core.prefix_data import PrefixData
from conda.models.prefix_graph import PrefixGraph
from conda._vendor.toolz import concatv
from conda.models.match_spec import MatchSpec
def post_solve_handling(context, prefix_data, final_precs, specs_to_add, specs_to_remove):
# Special case handling for various DepsModifier flags.
if context.deps_modifier == DepsModifier.NO_DEPS:
# In the NO_DEPS case, we need to start with the original list of packages in the
# environment, and then only modify packages that match specs_to_add or
# specs_to_remove.
#
# Help information notes that use of NO_DEPS is expected to lead to broken
# environments.
_no_deps_solution = IndexedSet(prefix_data.iter_records())
only_remove_these = set(prec
for spec in specs_to_remove
for prec in _no_deps_solution
if spec.match(prec))
_no_deps_solution -= only_remove_these
only_add_these = set(prec
for spec in specs_to_add
for prec in final_precs
if spec.match(prec))
remove_before_adding_back = set(prec.name for prec in only_add_these)
_no_deps_solution = IndexedSet(prec for prec in _no_deps_solution
if prec.name not in remove_before_adding_back)
_no_deps_solution |= only_add_these
# ssc.solution_precs = _no_deps_solution
solution_precs = _no_deps_solution
return solution_precs, specs_to_add, specs_to_remove
# TODO: check if solution is satisfiable, and emit warning if it's not
elif (context.deps_modifier == DepsModifier.ONLY_DEPS
and context.update_modifier != UpdateModifier.UPDATE_DEPS):
# Using a special instance of PrefixGraph to remove youngest child nodes that match
# the original specs_to_add. It's important to remove only the *youngest* child nodes,
# because a typical use might be `conda install --only-deps python=2 flask`, and in
# that case we'd want to keep python.
#
# What are we supposed to do if flask was already in the environment?
# We can't be removing stuff here that's already in the environment.
#
# What should be recorded for the user-requested specs in this case? Probably all
# direct dependencies of flask.
graph = PrefixGraph(final_precs, specs_to_add)
removed_nodes = graph.remove_youngest_descendant_nodes_with_specs()
specs_to_add = set(specs_to_add)
specs_to_add_names = set((s.name for s in specs_to_add))
for prec in removed_nodes:
for dep in prec.depends:
dep = MatchSpec(dep)
if dep.name not in specs_to_add_names:
specs_to_add.add(dep)
# unfreeze
specs_to_add = frozenset(specs_to_add)
# Add back packages that are already in the prefix.
specs_to_remove_names = set(spec.name for spec in specs_to_remove)
add_back = tuple(prefix_data.get(node.name, None) for node in removed_nodes
if node.name not in specs_to_remove_names)
solution_precs = tuple(
PrefixGraph(concatv(graph.graph, filter(None, add_back))).graph
)
return solution_precs, specs_to_add, specs_to_remove
return final_precs, specs_to_add, specs_to_remove
# # TODO: check if solution is satisfiable, and emit warning if it's not
# elif ssc.update_modifier == UpdateModifier.UPDATE_DEPS:
# # Here we have to SAT solve again :( It's only now that we know the dependency
# # chain of specs_to_add.
# #
# # UPDATE_DEPS is effectively making each spec in the dependency chain a user-requested
# # spec. We don't modify pinned_specs, track_features_specs, or specs_to_add. For
# # all other specs, we drop all information but name, drop target, and add them to
# # the specs_to_add that gets recorded in the history file.
# #
# # It's like UPDATE_ALL, but only for certain dependency chains.
# graph = PrefixGraph(ssc.solution_precs)
# update_names = set()
# for spec in specs_to_add:
# node = graph.get_node_by_name(spec.name)
# update_names.update(ancest_rec.name for ancest_rec in graph.all_ancestors(node))
# specs_map = {name: MatchSpec(name) for name in update_names}
# # Remove pinned_specs and any python spec (due to major-minor pinning business rule).
# # Add in the original specs_to_add on top.
# for spec in ssc.pinned_specs:
# specs_map.pop(spec.name, None)
# if "python" in specs_map:
# python_rec = prefix_data.get("python")
# py_ver = ".".join(python_rec.version.split(".")[:2]) + ".*"
# specs_map["python"] = MatchSpec(name="python", version=py_ver)
# specs_map.update({spec.name: spec for spec in specs_to_add})
# new_specs_to_add = tuple(itervalues(specs_map))
# # It feels wrong/unsafe to modify this instance, but I guess let's go with it for now.
# specs_to_add = new_specs_to_add
# ssc.solution_precs = self.solve_final_state(
# update_modifier=UpdateModifier.UPDATE_SPECS,
# deps_modifier=ssc.deps_modifier,
# prune=ssc.prune,
# ignore_pinned=ssc.ignore_pinned,
# force_remove=ssc.force_remove
# )
# ssc.prune = False
# if ssc.prune:
# graph = PrefixGraph(ssc.solution_precs, final_environment_specs)
# graph.prune()
# ssc.solution_precs = tuple(graph.graph)
# return ssc
| 1.882813 | 2 |
Young Physicist.py | techonair/Codeforces | 0 | 16251 | num = input()
lucky = 0
for i in num:
if i == '4' or i == '7':
lucky += 1
counter = 0
for c in str(lucky):
if c == '4' or c == '7':
counter += 1
if counter == len(str(lucky)):
print("YES")
else:
print("NO")
| 3.65625 | 4 |
snakebids/utils/__init__.py | tkkuehn/snakebids | 0 | 16252 | from snakebids.utils.output import (
Mode,
get_time_hash,
prepare_output,
retrofit_output,
write_config_file,
write_output_mode,
)
from snakebids.utils.snakemake_io import (
glob_wildcards,
regex,
update_wildcard_constraints,
)
__all__ = [
"Mode",
"get_time_hash",
"glob_wildcards",
"prepare_output",
"regex",
"retrofit_output",
"update_wildcard_constraints",
"write_config_file",
"write_output_mode",
]
| 1.226563 | 1 |
examples/custom_renderer/custom_renderer.py | victorbenichoux/vizno | 5 | 16253 | <reponame>victorbenichoux/vizno<gh_stars>1-10
import pydantic
from vizno.renderers import ContentConfiguration, render
from vizno.report import Report
class CustomObject(pydantic.BaseModel):
parameter: int
class CustomRenderConfiguration(ContentConfiguration):
parameter: int
@render.register
def _(obj: CustomObject):
return CustomRenderConfiguration(
component="MyCustomComponent",
component_module="./my_renderer.js",
parameter=obj.parameter,
)
r = Report()
r.widget(CustomObject(parameter=10))
r.render("./output")
r.widget(
CustomObject(parameter=1000),
name="It works with a name",
description="and a description",
)
r.render("./output")
| 2.375 | 2 |
coregent/net/core.py | landoffire/coregent | 1 | 16254 | import abc
import collections.abc
import socket
__all__ = ['get_socket_type', 'get_server_socket', 'get_client_socket',
'SocketReader', 'SocketWriter', 'JSONReader', 'JSONWriter']
def get_socket_type(host=None, ip_type=None):
if ip_type is not None:
return ip_type
if host and ':' in host:
return socket.AF_INET6
return socket.AF_INET
def get_server_socket(host, port, ip_type=None):
sock = socket.socket(get_socket_type(host, ip_type))
sock.bind((host, port))
return sock
def get_client_socket(host, port, ip_type=None):
sock = socket.socket(get_socket_type(host, ip_type))
sock.connect((host, port))
return sock
class SocketReader(collections.abc.Iterator):
@abc.abstractmethod
def close(self):
...
class SocketWriter(abc.ABC):
@abc.abstractmethod
def send(self, message):
...
@abc.abstractmethod
def close(self):
...
| 2.921875 | 3 |
hackdayproject/urls.py | alstn2468/Naver_Campus_Hackday_Project | 1 | 16255 | <gh_stars>1-10
from django.urls import path, include
from django.contrib import admin
import hackdayproject.main.urls as main_urls
import hackdayproject.repo.urls as repo_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('oauth/', include('social_django.urls', namespace='social')),
path('', include(main_urls)),
path('repo/', include(repo_urls))
]
| 1.609375 | 2 |
tests/test_ciftify_recon_all.py | lgrennan/ciftify | 0 | 16256 | #!/usr/bin/env python
import unittest
import logging
import importlib
import copy
import os
from mock import patch
from nose.tools import raises
logging.disable(logging.CRITICAL)
ciftify_recon_all = importlib.import_module('ciftify.bin.ciftify_recon_all')
class ConvertFreesurferSurface(unittest.TestCase):
meshes = ciftify_recon_all.define_meshes('/somewhere/hcp/subject_1',
"164", ["32"], '/tmp/temp_dir', False)
@patch('ciftify.bin.ciftify_recon_all.run')
def test_secondary_type_option_adds_to_set_structure_command(self, mock_run):
secondary_type = 'GRAY_WHITE'
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
surface_secondary_type=secondary_type)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
set_structure_present = False
for item in arg_list:
args = item[0][0]
if '-set-structure' in args:
set_structure_present = True
assert '-surface-secondary-type' in args
assert secondary_type in args
# If this fails the wb_command -set-structure call is not being made
# at all. Is expected at least once regardless of secondary-type option
assert set_structure_present
@patch('ciftify.bin.ciftify_recon_all.run')
def test_secondary_type_not_set_if_option_not_used(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'])
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
set_structure_present = False
for item in arg_list:
args = item[0][0]
if '-set-structure' in args:
set_structure_present = True
assert '-surface-secondary-type' not in args
# If this fails the wb_command -set-structure call is not being made
# at all. Is expected at least once regardless of secondary-type option
assert set_structure_present
@patch('ciftify.bin.ciftify_recon_all.run')
def test_wbcommand_surface_apply_affine_called_when_cras_option_set(self,
mock_run):
cras_file = '/somewhere/cras.mat'
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
cras_mat=cras_file)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
surface_apply_calls = 0
for item in arg_list:
args = item[0][0]
if '-surface-apply-affine' in args and cras_file in args:
surface_apply_calls += 1
# The wb_command -surface-apply-affine command should be run once for
# each hemisphere
assert surface_apply_calls == 2
@patch('ciftify.bin.ciftify_recon_all.run')
def test_no_wbcommand_added_when_cras_option_not_set(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'])
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
surface_apply_calls = 0
for item in arg_list:
args = item[0][0]
if '-surface-apply-affine' in args:
surface_apply_calls += 1
assert surface_apply_calls == 0
@patch('ciftify.bin.ciftify_recon_all.run')
def test_add_to_spec_option_adds_wbcommand_call(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
add_to_spec=True)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
spec_added_calls = 0
for item in arg_list:
args = item[0][0]
if '-add-to-spec-file' in args:
spec_added_calls += 1
# Should add one call for each hemisphere
assert spec_added_calls == 2
@patch('ciftify.bin.ciftify_recon_all.run')
def test_add_to_spec_option_not_present_when_option_not_set(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
add_to_spec=False)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
spec_added_calls = 0
for item in arg_list:
args = item[0][0]
if '-add-to-spec-file' in args:
spec_added_calls += 1
assert spec_added_calls == 0
class CreateRegSphere(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.run_MSMSulc_registration')
@patch('ciftify.bin.ciftify_recon_all.run_fs_reg_LR')
def test_reg_sphere_is_not_set_to_none_for_any_mode(self, mock_fs_reg,
mock_msm_reg):
"""
Should fail if MSMSulc registration is implemented without supplying a
value for reg_sphere
"""
# settings stub, to allow tests to be written.
class Settings(object):
def __init__(self, name):
self.high_res = 999
self.reg_name = name
self.ciftify_data_dir = '/somedir/'
self.msm_config = None
# Test reg_sphere set when in FS mode
settings = Settings('FS')
meshes = {'AtlasSpaceNative' : ''}
subject_id = 'some_id'
reg_sphere = ciftify_recon_all.create_reg_sphere(settings, subject_id, meshes)
assert reg_sphere is not None
# Test reg_sphere set when in MSMSulc mode
settings = Settings('MSMSulc')
reg_sphere = ciftify_recon_all.create_reg_sphere(settings, subject_id, meshes)
assert reg_sphere is not None
class CopyAtlasRoiFromTemplate(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.link_to_template_file')
def test_does_nothing_when_roi_src_does_not_exist(self, mock_link):
hcp_dir = '/somepath/hcp'
hcp_templates_dir = '/someotherpath/ciftify/data'
mesh_settings = {'meshname' : 'some_mesh'}
subject_id = 'some_id'
ciftify_recon_all.copy_atlas_roi_from_template(hcp_dir, hcp_templates_dir,
subject_id, mesh_settings)
assert mock_link.call_count == 0
class DilateAndMaskMetric(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.run')
def test_does_nothing_when_dscalars_map_doesnt_mask_medial_wall(self,
mock_run):
# Stubs to allow testing
dscalars = {'some_map' : {'mask_medialwall' : False}}
mesh = {'tmpdir' : '/tmp/temp_dir',
'meshname' : 'some_mesh'}
ciftify_recon_all.dilate_and_mask_metric('some_id', mesh, dscalars)
assert mock_run.call_count == 0
class TestSettings(unittest.TestCase):
arguments = {'--hcp-data-dir' : '/somepath/pipelines/hcp',
'--fs-subjects-dir' : '/somepath/pipelines/freesurfer',
'--resample-LowRestoNative' : False,
'<Subject>' : 'STUDY_SITE_ID_01',
'--settings-yaml' : None,
'--T2': False,
'--MSMSulc': False,
'--MSM-config': None}
yaml_config = {'high_res' : "164",
'low_res' : ["32"],
'grayord_res' : [2],
'dscalars' : {},
'registration' : {'src_dir' : 'T1w',
'dest_dir' : 'MNINonLinear',
'xfms_dir' : 'MNINonLinear/xfms'},
'FSL_fnirt' : {'2mm' : {'FNIRTConfig' : 'etc/flirtsch/T1_2_MNI152_2mm.cnf'}}}
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_fs_root_dir_set_to_user_value_when_given(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
assert settings.fs_root_dir == self.arguments['--fs-subjects-dir']
@raises(SystemExit)
@patch('ciftify.config.find_freesurfer_data')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_when_no_fs_dir_given_and_cannot_find_shell_value(self,
mock_ciftify, mock_fsl, mock_exists, mock_fs):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
# work with a deep copy of arguments to avoid modifications having any
# effect on later tests
args_copy = copy.deepcopy(self.arguments)
args_copy['--fs-subjects-dir'] = None
# Just in case the shell environment has the variable set...
mock_fs.return_value = None
settings = ciftify_recon_all.Settings(args_copy)
# Should never reach this line
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_fsl_dir_cannot_be_found(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
mock_fsl.return_value = None
settings = ciftify_recon_all.Settings(self.arguments)
# Should never reach this line
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_ciftify_data_dir_not_found(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
mock_ciftify.return_value = None
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_ciftify_data_dir_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists):
ciftify_data = '/somepath/ciftify/data'
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = ciftify_data
mock_fsl.return_value = '/somepath/FSL'
mock_exists.side_effect = lambda path : False if path == ciftify_data else True
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_default_config_read_when_no_config_yaml_given(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
config = settings._Settings__config
assert config is not None
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_yaml_config_file_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
yaml_file = '/somepath/fake_config.yaml'
mock_exists.side_effect = lambda path: False if path == yaml_file else True
# work with a deep copy of arguments to avoid modifications having any
# effect on later tests
args_copy = copy.deepcopy(self.arguments)
args_copy['--settings-yaml'] = yaml_file
settings = ciftify_recon_all.Settings(args_copy)
assert False
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_dscalars_doesnt_contain_msmsulc_settings_when_reg_name_is_FS(
self, mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
if settings.reg_name == 'FS':
assert 'ArealDistortion_MSMSulc' not in settings.dscalars.keys()
else:
assert True
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_msm_config_set_to_none_in_fs_mode(self, mock_ciftify, mock_fsl,
mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
assert settings.msm_config is None
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_msm_config_set_to_default_when_user_config_not_given(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Modify copy of arguments, so changes dont effect other tests
args = copy.deepcopy(self.arguments)
args['--MSMSulc'] = True
args['--MSM-config'] = None
settings = ciftify_recon_all.Settings(args)
assert settings.msm_config is not None
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_sys_exit_raised_when_user_msm_config_doesnt_exist(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
user_config = "/some/path/nonexistent_config"
mock_exists.side_effect = lambda path: False if path == user_config else True
args = copy.deepcopy(self.arguments)
args['--MSMSulc'] = True
args['--MSM-config'] = user_config
settings = ciftify_recon_all.Settings(args)
# Test should never reach this line
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_expected_registration_path_missing(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Use copy to avoid side effects in other tests
yaml_copy = copy.deepcopy(self.yaml_config)
del yaml_copy['registration']['src_dir']
mock_yaml_settings.return_value = yaml_copy
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_resolution_not_defined_for_given_method(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Use copy to avoid side effects in other tests
yaml_copy = copy.deepcopy(self.yaml_config)
del yaml_copy['FSL_fnirt']['2mm']
mock_yaml_settings.return_value = yaml_copy
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_registration_resolution_file_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
fsl_dir = '/somepath/FSL'
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = fsl_dir
mock_yaml_settings.return_value = self.yaml_config
required_file = os.path.join(os.path.dirname(fsl_dir),
self.yaml_config['FSL_fnirt']['2mm']['FNIRTConfig'])
mock_exists.side_effect = lambda x: False if x == required_file else True
settings = ciftify_recon_all.Settings(self.arguments)
assert False
| 2.15625 | 2 |
Wrangle OSM Dataset.py | Boykai/Project-3-Wrangle-OpenStreetMap-Dataset | 1 | 16257 | <reponame>Boykai/Project-3-Wrangle-OpenStreetMap-Dataset
# -*- coding: utf-8 -*-
'''
Created on Tue Jan 17 16:19:36 2017
@author: Boykai
'''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET # Use cElementTree or lxml if too slow
from collections import defaultdict
import re
import pprint
import string
import codecs
import json
import os
from pymongo import MongoClient
class OSMFile(object):
'''
OSM File handler
From Udacity
'''
def __init__(self, osm_file, sample_file, sample_size):
'''
Initialize a OSM File instance, saves all sampled top level tags
into sample_file.osm, saves all parameters as attributes of instance.
osm_file: Original OSM input file, downloaded from
OSM website, OSM file path. (a string)
sample_file: Sampled OSM output file, created in given sample_file
path (a string)
sample_size: A sample size that takes every sample_size-th
top level element (a non-zero, positive integer)
'''
self.osm_file = osm_file
self.sample_file = sample_file
self.sample_size = sample_size
def getSampleFile(self):
'''
@return sample file name and/or directory. (a string)
'''
return self.sample_file
def getOsmFile(self):
'''
@return OSM file name and/or directory. (a string)
'''
return self.osm_file
def getSampleSize(self):
'''
@return sample size. (a non-zero, positive integer)
'''
return self.sample_size
def getElement(self, tags=('node', 'way', 'relation')):
'''
XML tag element generator
tags: tag elements to search for in OSM file (a tuple of strings)
@yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
'''
context = iter(ET.iterparse(self.getOsmFile(), events=('start', 'end')))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def createSampleFile(self):
'''
Creates and writes to sample file, a new OSM file to work with
while cleaning. By created a sample file, the time it takes to
analysis, audit, clean, and write the clean data is greatly reduced.
'''
print('Creating sample XML file...')
with open(self.getSampleFile(), 'wb') as f:
f.write("<?xml version='1.0' encoding='UTF-8'?>\n")
f.write('<osm>\n ')
k = self.getSampleSize()
# Write every kth top level element
for i, element in enumerate(self.getElement()):
if i % k == 0:
f.write(ET.tostring(element, encoding='utf-8'))
f.write('</osm>')
class CleanStreets(object):
'''
Clean Streets of OSM File
From Udacity
'''
def __init__(self, sample_file):
'''
Initialize a Clean Streets instance, saves all parameters as attributes
of the instance. Finds and returns all instances of unexpected
street suffixes.
sample_file: Sampled OSM output file, created in given sample_file
path (a string)
street_type_re: Regex created to find the street suffix for
tag attributes. (a regex)
expected: Expected street names, street names which are deemed as
acceptable format (a list of strings)
mapping: Keys that are found as street suffix for tag attributes are
to be replaced by key's value (a string dictonary of strings)
clean_streets_dict: Dictionary mapping dirty street names to clean
street names (a dictionary of strings)
expected_zip: List of valid Brooklyn zip codes (a list of strings)
'''
self.sample_file = sample_file
self.street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
self.expected = ['Alley',
'Americas',
'Atrium',
'Avenue',
'Bayside',
'Boulevard',
'Bowery',
'Broadway',
'Bushwick',
'Center',
'Circle',
'Clinton',
'Close',
'Commons',
'Court',
'Crescent',
'Drive',
'East',
'Expressway',
'Extension',
'Finest',
'Fulton',
'Gardens',
'Gratta',
'Hamilton',
'Heights',
'Highway',
'Island',
'Lafayette',
'Lane',
'Loop',
'Macdougal',
'Mall',
'MetroTech',
'Mews',
'North',
'Oval',
'Park',
'Parkway',
'Path',
'Piers',
'Place',
'Plaza',
'Promenade',
'Remsen',
'Reservation',
'Rico',
'Road',
'Roadbed',
'Rockaways',
'Row',
'Slip',
'South',
'Southwest',
'Square',
'Street',
'Terrace',
'Trail',
'Turnpike',
'Vanderbilt',
'Village',
'Warren',
'Walk',
'West',
'WestBayside',
'Willoughby']
[self.expected.append(letter) for letter in string.ascii_uppercase]
self.dirty_to_clean_streets = {'Ave' : 'Avenue',
'Ave.' : 'Avenue',
'Avene' : 'Avenue',
'Avenue,' : 'Avenue',
'avenue' : 'Avenue',
'ave' : 'Avenue',
'Blvd' : 'Boulevard',
'Crt' : 'Court',
'Ctr' : 'Court',
'Dr' : 'Drive',
'Plz' : 'Plaza',
'Rd' : 'Road',
'ST' : 'Street',
'St': 'Street',
'St.': 'Street',
'st' : 'Street',
'St ' : 'Street',
'St. ' : 'Street',
'Steet' : 'Street',
'street' : 'Street',
'Streeet' : 'Street'}
self.clean_streets_dict = {'Graham Avenue #1' : 'Graham Avenue',
'Nostrand Avenue, #107' : 'Nostrand Avenue',
'305 Schermerhorn St., Brooklyn, NY 11217' : 'Schermerhorn Street',
'1st' : '1st Avenue',
'Coney Island Avenue, Ste 200' : 'Coney Island Avenue',
'Broadway #205' : 'Broadway',
'218650358': 'NaN',
'16th Street # 3' : '16th Street',
'Hanover Square #3' : 'Hanover Square',
'Union Avenue 4B' : 'Union Avenue',
'Joralemon Street, #4CF' : 'Joralemon Street',
'Main St., Suite 500' : 'Main Street',
'Broadway #506' : 'Broadway',
'Mott St #507' : 'Mott Street',
'32nd street with 7th' : '32nd Street',
'861' : 'NaN',
'wyckoff ave unit A28' : 'Wyckoff Avenue',
'Dekalb Ave, 2nd Floor' : 'Dekalb Avenue',
'Wall Street 12th Floor' : 'Wall Street',
'Manhattan Avenue (2nd Floor)' : 'Manhattan Avenue',
'University Plz' : ' University Plaza',
'Linden Boulevard Outer Eb Rb' : 'Linden Boulevard',
'bus_stop' : 'NaN',
'DeKalb Avenue 4 floor' : 'Dekalb Avenue'}
self.expected_zip = ['11201',
'11203',
'11204',
'11205',
'11206',
'11207',
'11208',
'11209',
'11210',
'11211',
'11212',
'11213',
'11214',
'11215',
'11216',
'11217',
'11218',
'11219',
'11220',
'11221',
'11222',
'11223',
'11224',
'11225',
'11226',
'11228',
'11229',
'11230',
'11231',
'11232',
'11233',
'11234',
'11235',
'11236',
'11237',
'11238',
'11239']
def getSampleFile(self):
'''
@return sample file name and/or directory. (a string)
'''
return self.sample_file
def getStreetTypeRegex(self):
'''
@return street name type regex. (a string regex)
'''
return self.street_type_re
def getExpected(self):
'''
@return street suffixes. (a list of strings)
'''
return self.expected
def getDirtyToCleanStreets(self):
'''
@return dirty to clean streets mapping dict. (a dictionary of strings)
'''
return self.dirty_to_clean_streets
def getCleanStreetsDict(self):
'''
@return clean street dict. (a dictionary of strings)
'''
return self.clean_streets_dict
def getExpectedZip(self):
'''
@return list of expected zip codes for Brooklyn. (a list of strings)
'''
return self.expected_zip
def auditStreetType(self, street_types, street_name):
'''
Audits street type by checking if the street type is in the list
of expected street type values.
Searches street_type aganist regex to find street suffix. If the street
type is not in defaultdict set, it is added to street_types defaultdict.
The string of street_name is the value set to the street_type key
in street_types defaultdict.
street_types: Street type is a dictionary set, which is mutated within
the function, passed from audit function.
(a string defaultdict set of strings)
street_name: Street name string value found in tag attribute. (a string)
'''
m = self.getStreetTypeRegex().search(street_name)
if m:
street_type = m.group()
if street_type not in self.getExpected():
street_types[street_type].add(street_name)
def auditZipType(self, zip_types, zip_name):
'''
Audits zip code type by checking if the zip type is in the list
of expected zip type values.
The string of zip_name is the value set to the zip_type key
in zip_types defaultdict.
zip_types: Zip type is a dictionary set, which is mutated within
the function, passed from audit function.
(a string defaultdict set of strings)
zip_name: Zip name string value found in tag attribute. (a string)
'''
if zip_name not in self.getExpectedZip():
zip_types[zip_name].add('NaN')
def isStreetName(self, elem):
'''
Evaluates if tag attribute is equal to a address of type street.
elem: XML tag element object (a object)
@return: Bool if the tag attribute is equal to a address of type street.
'''
return (elem.attrib['k'] == 'addr:street')
def isZipCode(self, elem):
'''
Evaluates if tag attribute is equal to a address of type postcode.
elem: XML tag element object (a object)
@return: Bool if the tag attribute is equal to a address of type postcode.
'''
return (elem.attrib['k'] == 'addr:postcode')
def audit(self, audit_file):
'''
Iterates over XML tag elements in order to find all of the addresses
of type street.
Evaluates the tag 'v' attributes to determine if the street suffixes
are within the expected street suffix list.
@return: Defaultdict of unexpected street suffixes as keys,
the full street names as values. (a defaultdict of strings)
'''
with open(audit_file, 'r') as f:
street_types = defaultdict(set)
zip_types = defaultdict(set)
f.seek(0)
for event, elem in ET.iterparse(f, events=('start',)):
if elem.tag == 'node' or elem.tag == 'way':
for tag in elem.iter('tag'):
if self.isStreetName(tag):
self.auditStreetType(street_types, tag.attrib['v'])
if self.isZipCode(tag):
self.auditZipType(zip_types, tag.attrib['v'])
elem.clear()
street_types = self.sortStreets(street_types)
return [street_types, zip_types]
def sortStreets(self, unsorted_streets):
'''
Sorts street types defaultdict by key, with proper values.
unsorted_streets: Unsorted defaultdict of street types with values
equal to the instances of street type
(a defaultdict of strings)
@return: Sorted defaultdict of unexpected street suffixes as keys,
the full street names as values. (a defaultdict of strings)
'''
sorted_streets = {}
sorted_keys = sorted(unsorted_streets.keys())
for key in sorted_keys:
sorted_streets[key] = unsorted_streets[key]
return sorted_streets
def clean(self, unexpected_dirty_streets):
'''
Get unexpected street suffixes and replace with acceptable street
suffixes when determined that the data is unacceptably dirty.
Assumes that every key given by self.audit() is of type string.
Assumes that every assigned to a key value given by self.adult() is of
type string.
Assumes that every key given by self.audit() has valid string value.
@return: Clean sorted defaultdict of street names with correct suffixes
(a defaultdict of strings)
'''
unexpected_streets = unexpected_dirty_streets.copy()
#Iterate over unexpected street types found
for key in unexpected_streets.keys():
# Determine if unexpected street type is not acceptable
if key in self.dirty_to_clean_streets.keys():
list_of_streets = list(unexpected_streets[key])
# Iterate over streets of unacceptable street type
for i, street in enumerate(list_of_streets):
street_name = street[ : -len(key)]
good_street = (street_name + self.dirty_to_clean_streets[key])
bad_street = str(list(unexpected_streets[key])[i])
# Save each unacceptabled street as [key] to
# acceptable street as [value] in clean_streets_dict
self.clean_streets_dict[bad_street] = good_street
return self.clean_streets_dict
def writeClean(self, cleaned_streets):
'''
Get cleaned streets mapping dictionary and use that dictionary to find
and replace all bad street name tag attributes within XML file.
Iterate through XML file to find all bad instances of tag attribute
street names, and replace with correct mapping value from cleaned_streets
mapping dictionary.
Stores new cleaned XML file in 'output.osm'
celaned_streets: Clean sorted defaultdict of street names with correct suffixes
(a defaultdict of strings)
'''
with open('output.osm', 'w') as output:
output.write("<?xml version='1.0' encoding='UTF-8'?>\n")
output.write('<osm>\n ')
osm_file = open(self.getSampleFile(), 'r')
for event, elem in ET.iterparse(osm_file, events=('start', 'end')):
# Begin processing when the end of the element is reached
# Include all elements, except 'osm', for processing (so that your files are identical)
if event == 'end' and (elem.tag in ['node', 'way', 'relation', 'bounds','meta','note'] ):
for tag in elem.iter('tag'):
# Check if tag is a street name tag, set street name to street
if self.isStreetName(tag):
street = tag.attrib['v']
# If street name is in clean streets dict, replace
# dirty street with clean street value
if street in cleaned_streets.keys():
tag.attrib['v'] = cleaned_streets[street]
# Check if tag is a zip code tag, set zip code to 'NaN' if not valid
if self.isZipCode(tag):
zip_code = tag.attrib['v']
if zip_code not in self.getExpectedZip():
tag.attrib['v'] = 'NaN'
# Move the write function inside the condition, so that it only writes
# tags that you specify (i.e. everything apart from the root <osm> element)
output.write(ET.tostring(elem, encoding='utf-8'))
elem.clear()
output.write('</osm>')
osm_file.close()
class JsonFile(object):
def __init__(self, output_file):
'''
Initialize a JSON File instance, saves all parameters as attributes
of the instance. Takes in an XML file and returns a JSON file
lower: Regex created to find lowercase characters for
tag elements (a regex)
lower_colon: Regex created to find lowercase characters for
tag elements when a colon is included (a regex)
problemchars: Regex created to find special characters for
tags and tag elements (a regex)
created_tags: Tag element names, which are deemed as acceptable for
adding information (a list of strings)
output_file: XML OSM output file, created in given output_file
path (a string)
'''
self.lower = re.compile(r'^([a-z]|_)*$')
self.lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
self.problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
self.created_tags = [ 'version', 'changeset', 'timestamp', 'user', 'uid']
self.output_file = output_file
def getElement(self, file_in, tags=('node', 'way', 'relation')):
'''
XML tag element generator
tags: tag elements to search for in OSM file (a tuple of strings)
@yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
'''
context = iter(ET.iterparse(file_in, events=('start', 'end')))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def shapeElement(self, element):
'''
Takes in XML element, shapes it into JSON node as dictionary, returns shaped element.
element: XML ElementTree element, which is shaped into JSON node (an ET object)
@return: node for JSON file creation (a dictionary)
'''
node = {}
address = {}
created = {}
node_refs = []
pos = []
if element.tag == 'node' or element.tag == 'way' :
node['type'] = element.tag
# Get and store GPS (lat, lon) cooridinates
if 'lat' in element.attrib.keys() and 'lon' in element.attrib.keys():
try:
lat = float(element.attrib['lat'])
lon = float(element.attrib['lon'])
pos.insert(0,lat)
pos.insert(1,lon)
except:
pass
# Get and set {tag : attrib} into dict
for k, m in element.attrib.items():
if k not in pos:
if k in self.created_tags:
created[k] = m
else:
node[k] = m
# Get and set node type into node dict
if created:
node['created'] = created
if pos:
node['pos'] = pos
if address:
node['address'] = address
if node_refs:
node['node_refs'] = node_refs
if 'lon' in node.keys():
node.pop('lon')
if 'lat' in node.keys():
node.pop('lat')
# Iterate over subtags in element, set attribs when valid
for child in element:
if child.tag == 'nd':
try:
node['node_refs'].append(child.attrib['ref'])
except:
node['node_refs'] = []
node['node_refs'].append(child.attrib['ref'])
elif child.tag == 'tag':
# Clean and set 'addr:' attrib
if self.problemchars.search(child.attrib['k']):
pass
elif child.attrib['k'].startswith('addr:'):
key = re.sub('addr:', '', child.attrib['k']).strip()
if self.lower_colon.match(key):
break
else:
try:
node['address'][key] = child.attrib['v']
except:
node['address'] = {}
node['address'][key] = child.attrib['v']
# Set already clean attrib
else:
node[child.attrib['k']] = child.attrib['v']
return node
else:
return None
def processMap(self, pretty = False):
'''
Takes an XML file, maps and creates a JSON file of the same information,
struction, and element nodes as the input XML file
pretty: If pretty, creates a human readable JSON file (a bool)
@return: List of JSON dictionary shaped node elements (a list)
'''
file_in = self.output_file
file_out = '{0}.json'.format(file_in)
data = []
'''
# Create JSON output file, shape and map each XML element
with codecs.open(file_out, 'w') as fo:
for _, element in ET.iterparse(file_in):
el = self.shapeElement(element)
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent=2) + '\n')
else:
fo.write(json.dumps(el) + '\n')
return data
'''
with codecs.open(file_out, 'w') as fo:
for i, element in enumerate(self.getElement(file_in)):
el = self.shapeElement(element)
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent = 2) + '\n')
else:
fo.write(json.dumps(el) + '\n')
return data
def mongoAggregate(cursor):
'''
Takes in pymongo aggregate cursor object, iterates through each element
within the aggregation, then returns the list of elements
cursor: pymongo aggreate cursor object, which is iterated (a cursor object)
@return: List of aggregation elements (a list)
'''
results_list = []
[results_list.append(result) for result in cursor]
return results_list
if __name__ == '__main__':
# Get OSM File, which is Brooklyn OpenStreetMap
# https://mapzen.com/data/metro-extracts/metro/brooklyn_new-york/
xml_original_file = 'brooklyn_new-york.osm' # Original OSM File input name
xml_sample_file = 'sample.osm' # Sample OSM File output name
xml_cleaned_file = 'output.osm'
sample_size = 1
# Initialize and create OSM original file and sample file
if sample_size == 1:
xml_sample_file = xml_original_file
osm = OSMFile(xml_original_file, xml_sample_file, sample_size)
if sample_size != 1:
osm.createSampleFile()
# Initialize and clean street type tag attributes
print('\nInitialzing and getting street type tag attributes...')
cleanSt = CleanStreets(xml_sample_file)
# Audit street tag attributes and store vales in unexpected_street dict
# returns street type keys with street name values dict
print('\nPerforming audit on street types...')
audit_results = cleanSt.audit(xml_sample_file)
unexpected_streets = audit_results[0]
unexpected_zips = audit_results[1]
print('There are ' + str(len(unexpected_streets.values())) + ' unique unexpected streets.')
print('Dictionary of unexpected street name types with street names: ')
pprint.pprint(unexpected_streets)
print('\nThere are ' + str(len(unexpected_zips.values())) + ' unique unexpected zip codes.')
print('Dictionary of unexpected zip code types with street names: ')
pprint.pprint(unexpected_zips)
# Clean street values and store cleaned streets in clean_street_dict
print('\nCleaning street type values...')
clean_streets_dict = cleanSt.clean(unexpected_streets)
print('There are ' + str(len(cleanSt.getCleanStreetsDict().values())) + ' street names to be replaced.')
print('Dictionary of dirty street keys and clean street values: ')
pprint.pprint(clean_streets_dict)
# Find and write clean street names to XML file, save updated XML file
print('\nCreating new output.osm file with cleaned street types...')
cleanSt.writeClean(clean_streets_dict)
clean_audit_results = cleanSt.audit(xml_sample_file)
clean_unexpected_streets = clean_audit_results[0]
print('There are ' + str(len(clean_unexpected_streets.values())) + ' unique unexpected streets.')
print('New audit after street names have been replaced with clean street names: ')
pprint.pprint(clean_unexpected_streets)
if sample_size != 1:
print('\nDeleting XML sample file...')
#os.remove(xml_sample_file)
# Initialize and create JSON file from cleaned XML output.osm file
print('\nCreating new JSON file from cleaned XML file...')
js = JsonFile(xml_cleaned_file)
data = js.processMap()
print('\nDeleting XML cleaned file...')
os.remove(xml_cleaned_file)
# Initialize and create MongoDB database from JSON document list 'data'
print('\nCreating new MongoDB database \'brooklyn\' from cleaned JSON file...')
client = MongoClient('mongodb://localhost:27017')
db = client.osm_results
db.brooklyn.insert_many(data, bypass_document_validation=True)
del data[:]
# Run and output MongoDB querires and results
print('\nRunning MongoDB queries...')
print('\nTotal number of documents: ')
print('db.brooklyn.find().count()')
print(str(db.brooklyn.find().count()))
print('\nNumber of \'way\' type documents: ')
print('db.brooklyn.find({\'type\' :\'way\'}).count()')
print(str(db.brooklyn.find({'type' :'way'}).count()))
print('\nNumber of \'node\' type documents: ')
print('db.brooklyn.find({\'type\' :\'node\'}).count()')
print(str(db.brooklyn.find({'type' :'node'}).count()))
print('\nNumber of unique users: ')
print('len(db.brooklyn.distinct(\'created.user\'))')
print(str(len(db.brooklyn.distinct('created.user'))))
print('\nTop 1 contributing user: ')
top_contributor_pipeline = [{'$group':
{'_id':'$created.user',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(top_contributor_pipeline) + ')')
top_contributor = mongoAggregate(db.brooklyn.aggregate(top_contributor_pipeline))
print(str(top_contributor[0]))
print('\nNumber of users appearing only once (having 1 post): ')
unique_user_count_pipeline =[{'$group':
{'_id':'$created.user',
'count':{'$sum':1}}},
{'$group':
{'_id':'$count',
'num_users':{'$sum':1}}},
{'$sort':
{'_id':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(unique_user_count_pipeline) + ')')
unique_user_count = mongoAggregate(db.brooklyn.aggregate(unique_user_count_pipeline))
print(str(unique_user_count[0]))
print('\nTop 10 appearing amenities: ')
top_10_amenities_pipeline = [{'$match':
{'amenity':{'$exists':1}}},
{'$group':
{'_id':'$amenity',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{"$limit":10}]
print('db.brooklyn.aggregate(' + str(top_10_amenities_pipeline) + ')')
top_10_amenities = mongoAggregate(db.brooklyn.aggregate(top_10_amenities_pipeline))
print(str(top_10_amenities))
print('\nHighest population religion: ')
most_pop_religion_pipeline = [{'$match':
{'amenity':{'$exists':1},
'amenity':'place_of_worship'}},
{'$group':
{'_id':'$religion',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(most_pop_religion_pipeline) + ')')
most_pop_religion = mongoAggregate(db.brooklyn.aggregate(most_pop_religion_pipeline))
print(str(most_pop_religion[0]))
print('\nMost popular cuisines: ')
most_pop_cuisine_pipeline = [{'$match':
{'amenity':{'$exists':1},
'amenity':'restaurant'}},
{'$group':
{'_id':'$cuisine',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':2}]
print('db.brooklyn.aggregate(' + str(most_pop_cuisine_pipeline) + ')')
most_pop_cuisine = mongoAggregate(db.brooklyn.aggregate(most_pop_cuisine_pipeline))
print(str(most_pop_cuisine[0]))
print('\nPostal Codes: ')
postal_codes_pipeline = [{'$match':
{'address.postcode':{'$exists':1},
'address.postcode':'NaN'}},
{'$group':
{'_id':'$address.postcode',
'count':{'$sum':1}}},
{'$sort':{'count':1}}]
print('db.brooklyn.aggregate(' + str(postal_codes_pipeline) + ')')
postal_codes = mongoAggregate(db.brooklyn.aggregate(postal_codes_pipeline))
print(str(postal_codes[0])) | 2.34375 | 2 |
Wrapping/Python/vtkmodules/__init__.py | cads-build/VTK | 1 | 16258 | <reponame>cads-build/VTK
r"""
Currently, this package is experimental and may change in the future.
"""
from __future__ import absolute_import
#------------------------------------------------------------------------------
# this little trick is for static builds of VTK. In such builds, if
# the user imports this Python package in a non-statically linked Python
# interpreter i.e. not of the of the VTK-python executables, then we import the
# static components importer module.
try:
from . import vtkCommonCore
except ImportError:
from . import _vtkpythonmodules_importer
#------------------------------------------------------------------------------
| 1.671875 | 2 |
Financely/basic_app/models.py | Frostday/Financely | 8 | 16259 | <filename>Financely/basic_app/models.py
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Client(models.Model):
user = models.OneToOneField(User,null=True,blank= True,on_delete=models.CASCADE)
name = models.CharField(max_length=100, null=True)
# def __str__(self):
# return self.name
class Portfolio(models.Model):
client = models.OneToOneField(Client,on_delete=models.CASCADE,blank=True,null=True)
# def __str__(self):
# return self.client.name + "'s Portfolio"
class Stock(models.Model):
id = models.BigAutoField(primary_key=True)
parent_portfolio = models.ForeignKey(Portfolio,related_name="stocks",on_delete=models.CASCADE,null=True,blank=True)
stock_symbol = models.CharField(max_length=100,null=True)
stock_price = models.CharField(max_length=100,null=True,blank=True)
stock_sector_performance = models.CharField(max_length=100,null=True,blank=True)
stock_name = models.CharField(max_length=100,null=True)
quantity = models.IntegerField(default=0,null=True,blank=True)
date_added = models.DateTimeField(auto_now_add=True)
# def __str__(self):
# return self.stock_name
| 2.546875 | 3 |
lims/models/shipping.py | razorlabs/BRIMS-backend | 1 | 16260 | from django.db import models
"""
ShipmentModels have a one to many relationship with boxes and aliquot
Aliquot and Box foreign keys to a ShipmentModel determine manifest contents
for shipping purposes (resolved in schema return for manifest view)
"""
class ShipmentModel(models.Model):
carrier = models.ForeignKey('CarrierModel',
on_delete=models.SET_NULL,
blank=True,
null=True)
shipment_number = models.CharField(max_length=255, blank=True, null=True)
# TODO What should we do if a destination is removed?
destination = models.ForeignKey('DestinationModel',
on_delete=models.SET_NULL,
blank=True,
null=True)
sent_date = models.DateTimeField(blank=True, null=True)
received_date = models.DateTimeField(blank=True, null=True)
notes = models.CharField(max_length=255, blank=True, null=True)
class DestinationModel(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class CarrierModel(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
| 2.53125 | 3 |
part-2/2-iterators/Example-consuming_iterators_manually.py | boconlonton/python-deep-dive | 0 | 16261 | """
Consuming Iterator manually
"""
from collections import namedtuple
def cast(data_type, value):
"""Cast the value into a correct data type"""
if data_type == 'DOUBLE':
return float(value)
elif data_type == 'STRING':
return str(value)
elif data_type == 'INT':
return int(value)
def cast_row(data_types1, data_row):
return [
cast(data_type, value)
for data_type, value in zip(data_types1, data_row)
]
# cars = []
# with open('cars.csv') as file:
# row_index = 0
# for line in file:
# if row_index == 0:
# # Header row
# headers = line.strip('\n').split(';')
# Car = namedtuple('Car', headers)
# elif row_index == 1:
# data_types = line.strip('\n').split(';')
# # print('types', data_types)
# else:
# # data row
# data = line.strip('\n').split(';')
# data = cast_row(data_types, data)
# car = Car(*data)
# cars.append(car)
# # print(data)
# row_index += 1
# with open('cars.csv') as file:
# file_iter = iter(file)
# headers = next(file_iter).strip('\n').split(';')
# Car = namedtuple('Car', headers)
# data_types = next(file_iter).strip('\n').split(';')
# for line in file_iter:
# data = line.strip('\n').split(';')
# data = cast_row(data_types, data)
# car = Car(*data)
# cars.append(car)
with open('cars.csv') as file:
file_iter = iter(file)
headers = next(file_iter).strip('\n').split(';')
Car = namedtuple('Car', headers)
data_types = next(file_iter).strip('\n').split(';')
cars = [Car(*cast_row(
data_types,
line.strip('\n').split(';')
))
for line in file_iter]
print(cars)
| 3.71875 | 4 |
instance_server/services/startpage.py | Geierhaas/developer-observatory | 4 | 16262 | <filename>instance_server/services/startpage.py
#! Copyright (C) 2017 <NAME>
#!
#! This software may be modified and distributed under the terms
#! of the MIT license. See the LICENSE file for details.
from flask import Flask, redirect, request, make_response
from shutil import copyfile
import json
import requests
import os.path
import uuid
import urllib
app = Flask(__name__)
remote_task_file = "%landingURL%/get_ipynb/"
target_file = "/home/jupyter/tasks.ipynb"
user_data_file = "/home/jupyter/.instanceInfo"
@app.route('/')
def init():
user_id = request.args.get('userId')
token = request.args.get('token')
user_data = {}
user_data["user_id"] = user_id
user_data["token"] = token
#Check if a task file already exists on this instance
if not os.path.isfile(target_file):
#If not, then request data for this user from the landing page
task_file = urllib.request.URLopener()
task_file.retrieve(remote_task_file+user_id+"/"+token, target_file)
#Prepare the response to the client -> Redirect + set cookies for uid and token
response = make_response(redirect('/nb/notebooks/tasks.ipynb'))
response.set_cookie('userId', user_id)
response.set_cookie('token', token)
# Check if we already stored user data on this instance
if not os.path.isfile(user_data_file):
with open(user_data_file, "w") as f:
#writing the data allows us to retrieve it anytime, if the user has cookies disabled for example.
json.dump(user_data, f)
return response
if __name__ == '__main__':
#app.debug = True
app.run(host='127.0.0.1', port=60000)
| 2.46875 | 2 |
utils/linalg.py | cimat-ris/TrajectoryInference | 6 | 16263 | import numpy as np
import math
import logging
from termcolor import colored
# Check a matrix for: negative eigenvalues, asymmetry and negative diagonal values
def positive_definite(M,epsilon = 0.000001,verbose=False):
# Symmetrization
Mt = np.transpose(M)
M = (M + Mt)/2
eigenvalues = np.linalg.eigvals(M)
for i in range(len(eigenvalues)):
if eigenvalues[i] <= epsilon:
if verbose:
logging.error("Negative eigenvalues")
return 0
for i in range(M.shape[0]):
if M[i][i] < 0:
if verbose:
logging.error("Negative value in diagonal")
return 0
return 1
| 3.28125 | 3 |
problemsets/Codeforces/Python/A1020.py | juarezpaulino/coderemite | 0 | 16264 | """
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
I=lambda:map(int,input().split())
f=abs
n,_,a,b,k=I()
while k:
p,q,u,v=I()
P=[a,b]
if a<=q<=b:P+=[q]
if a<=v<=b:P+=[v]
print([min(f(q-x)+f(v-x)for x in P)+f(p-u),f(q-v)][p==u])
k-=1 | 2.71875 | 3 |
get_tweet.py | Na27i/tweet_generator | 0 | 16265 | import json
import sys
import pandas
args = sys.argv
if len(args) == 1 :
import main as settings
else :
import sub as settings
from requests_oauthlib import OAuth1Session
CK = settings.CONSUMER_KEY
CS = settings.CONSUMER_SECRET
AT = settings.ACCESS_TOKEN
ATS = settings.ACCESS_TOKEN_SECRET
twitter = OAuth1Session(CK, CS, AT, ATS)
tweetlist = []
url = "https://api.twitter.com/1.1/statuses/user_timeline.json"
params = {"count" : 200}
for i range(5):
res = twitter.get(url, params = params)
if res.status_code == 200:
timelines = json.loads(res.text)
for tweet in timelines:
tweetlist.append(tweet["text"])
else:
print("取得失敗(%d)" % res.status_code)
datafile = pandas.DataFrame(tweetlist)
datafile.to_csv("tweetlist.csv", encoding='utf_8_sig') | 2.953125 | 3 |
idact/detail/config/validation/validate_scratch.py | intdata-bsc/idact | 5 | 16266 | """This module contains a function for validating a scratch config entry."""
import re
from idact.detail.config.validation.validation_error_message import \
validation_error_message
VALID_SCRATCH_DESCRIPTION = 'Non-empty absolute path, or environment' \
' variable name.'
VALID_SCRATCH_REGEX = r"^(/.*)|(\$[A-Za-z][A-Za-z0-9]*)$" # noqa, pylint: disable=line-too-long
__COMPILED = re.compile(pattern=VALID_SCRATCH_REGEX)
def validate_scratch(scratch) -> str:
"""Returns the parameter if it's a valid scratch config entry, otherwise
raises an exception.
Key path is optional, non-empty string.
:param scratch: Object to validate.
:raises TypeError: On wrong type.
:raises ValueError: On regex mismatch.
"""
if not isinstance(scratch, str):
raise TypeError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
if not __COMPILED.match(scratch):
raise ValueError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
return scratch
| 2.875 | 3 |
paramak/parametric_components/blanket_fp.py | zmarkan/paramak | 0 | 16267 |
import warnings
from typing import Callable, List, Optional, Union
import mpmath
import numpy as np
import paramak
import sympy as sp
from paramak import RotateMixedShape, diff_between_angles
from paramak.parametric_components.tokamak_plasma_plasmaboundaries import \
PlasmaBoundaries
from scipy.interpolate import interp1d
class BlanketFP(RotateMixedShape):
"""A blanket volume created from plasma parameters.
Args:
thickness (float or [float] or callable or [(float), (float)]):
the thickness of the blanket (cm). If the thickness is a float then
this produces a blanket of constant thickness. If the thickness is
a tuple of floats, blanket thickness will vary linearly between the
two values. If thickness is callable, then the blanket thickness
will be a function of poloidal angle (in degrees). If thickness is
a list of two lists (thicknesses and angles) then these will be
used together with linear interpolation.
start_angle: the angle in degrees to start the blanket, measured anti
clockwise from 3 o'clock.
stop_angle: the angle in degrees to stop the blanket, measured anti
clockwise from 3 o'clock.
plasma: If not None, the parameters of the plasma Object will be used.
minor_radius: the minor radius of the plasma (cm).
major_radius: the major radius of the plasma (cm).
triangularity: the triangularity of the plasma.
elongation: the elongation of the plasma.
vertical_displacement: the vertical_displacement of the plasma (cm).
offset_from_plasma: the distance between the plasma and the blanket
(cm). If float, constant offset. If list of floats, offset will
vary linearly between the values. If callable, offset will be a
function of poloidal angle (in degrees). If a list of two lists
(angles and offsets) then these will be used together with linear
interpolation.
num_points: number of points that will describe the shape.
"""
def __init__(self,
thickness,
start_angle: float,
stop_angle: float,
plasma: Optional[Union[paramak.Plasma,
paramak.PlasmaBoundaries,
paramak.PlasmaFromPoints]] = None,
minor_radius: Optional[float] = 150.0,
major_radius: Optional[float] = 450.0,
triangularity: Optional[float] = 0.55,
elongation: Optional[float] = 2.0,
vertical_displacement: Optional[float] = 0.0,
offset_from_plasma: Optional[float] = 0.0,
num_points: Optional[int] = 50,
**kwargs):
super().__init__(**kwargs)
self.thickness = thickness
self.start_angle, self.stop_angle = None, None
self.start_angle = start_angle
self.stop_angle = stop_angle
self.plasma = plasma
self.vertical_displacement = vertical_displacement
if plasma is None:
self.minor_radius = minor_radius
self.major_radius = major_radius
self.triangularity = triangularity
self.elongation = elongation
else: # if plasma object is given, use its parameters
self.minor_radius = plasma.minor_radius
self.major_radius = plasma.major_radius
self.triangularity = plasma.triangularity
self.elongation = plasma.elongation
self.offset_from_plasma = offset_from_plasma
self.num_points = num_points
@property
def start_angle(self):
return self._start_angle
@start_angle.setter
def start_angle(self, value):
self._start_angle = value
@property
def stop_angle(self):
return self._stop_angle
@stop_angle.setter
def stop_angle(self, value):
self._stop_angle = value
@property
def minor_radius(self):
return self._minor_radius
@minor_radius.setter
def minor_radius(self, minor_radius):
self._minor_radius = minor_radius
@property
def thickness(self):
return self._thickness
@thickness.setter
def thickness(self, thickness):
self._thickness = thickness
@property
def inner_points(self):
self.find_points()
return self._inner_points
@inner_points.setter
def inner_points(self, value):
self._inner_points = value
@property
def outer_points(self):
self.find_points()
return self._outer_points
@outer_points.setter
def outer_points(self, value):
self._outer_points = value
def make_callable(self, attribute):
"""This function transforms an attribute (thickness or offset) into a
callable function of theta
"""
# if the attribute is a list, create a interpolated object of the
# values
if isinstance(attribute, (tuple, list)):
if isinstance(attribute[0], (tuple, list)) and \
isinstance(attribute[1], (tuple, list)) and \
len(attribute) == 2:
# attribute is a list of 2 lists
if len(attribute[0]) != len(attribute[1]):
raise ValueError('The length of angles list must equal \
the length of values list')
list_of_angles = np.array(attribute[0])
offset_values = attribute[1]
else:
# no list of angles is given
offset_values = attribute
list_of_angles = np.linspace(
self.start_angle,
self.stop_angle,
len(offset_values),
endpoint=True)
interpolated_values = interp1d(list_of_angles, offset_values)
def fun(theta):
if callable(attribute):
return attribute(theta)
elif isinstance(attribute, (tuple, list)):
return interpolated_values(theta)
else:
return attribute
return fun
def find_points(self, angles=None):
self._overlapping_shape = False
# create array of angles theta
if angles is None:
thetas = np.linspace(
self.start_angle,
self.stop_angle,
num=self.num_points,
endpoint=True,
)
else:
thetas = angles
# create inner points
inner_offset = self.make_callable(self.offset_from_plasma)
inner_points = self.create_offset_points(thetas, inner_offset)
inner_points[-1][2] = "straight"
self.inner_points = inner_points
# create outer points
thickness = self.make_callable(self.thickness)
def outer_offset(theta):
return inner_offset(theta) + thickness(theta)
outer_points = self.create_offset_points(np.flip(thetas), outer_offset)
outer_points[-1][2] = "straight"
self.outer_points = outer_points
# assemble
points = inner_points + outer_points
if self._overlapping_shape:
msg = ("BlanketFP: Some points with negative R coordinate have "
"been ignored.")
warnings.warn(msg)
self.points = points
return points
def create_offset_points(self, thetas, offset):
"""generates a list of points following parametric equations with an
offset
Args:
thetas (np.array): the angles in degrees.
offset (callable): offset value (cm). offset=0 will follow the
parametric equations.
Returns:
list: list of points [[R1, Z1, connection1], [R2, Z2, connection2],
...]
"""
# create sympy objects and derivatives
theta_sp = sp.Symbol("theta")
R_sp, Z_sp = self.distribution(theta_sp, pkg=sp)
R_derivative = sp.diff(R_sp, theta_sp)
Z_derivative = sp.diff(Z_sp, theta_sp)
points = []
for theta in thetas:
# get local value of derivatives
val_R_derivative = float(R_derivative.subs("theta", theta))
val_Z_derivative = float(Z_derivative.subs("theta", theta))
# get normal vector components
nx = val_Z_derivative
ny = -val_R_derivative
# normalise normal vector
normal_vector_norm = (nx ** 2 + ny ** 2) ** 0.5
nx /= normal_vector_norm
ny /= normal_vector_norm
# calculate outer points
val_R_outer = self.distribution(theta)[0] + offset(theta) * nx
val_Z_outer = self.distribution(theta)[1] + offset(theta) * ny
if float(val_R_outer) > 0:
points.append(
[float(val_R_outer), float(val_Z_outer), "spline"])
else:
self._overlapping_shape = True
return points
def distribution(self, theta, pkg=np):
"""Plasma distribution theta in degrees
Args:
theta (float or np.array or sp.Symbol): the angle(s) in degrees.
pkg (module, optional): Module to use in the funciton. If sp, as
sympy object will be returned. If np, a np.array or a float
will be returned. Defaults to np.
Returns:
(float, float) or (sympy.Add, sympy.Mul) or
(numpy.array, numpy.array): The R and Z coordinates of the
point with angle theta
"""
if pkg == np:
theta = np.radians(theta)
else:
theta = mpmath.radians(theta)
R = self.major_radius + self.minor_radius * pkg.cos(
theta + self.triangularity * pkg.sin(theta)
)
Z = (
self.elongation * self.minor_radius * pkg.sin(theta)
+ self.vertical_displacement
)
return R, Z
| 2.609375 | 3 |
3.7.1/solution.py | luxnlex/stepic-python | 1 | 16268 | s=str(input())
a=[]
for i in range(len(s)):
si=s[i]
a.append(si)
b=[]
n=str(input())
for j in range(len(n)):
sj=n[j]
b.append(sj)
p={}
for pi in range(len(s)):
key=s[pi]
p[key]=0
j1=0
for i in range(0,len(a)):
key=a[i]
while j1<len(b):
bj=b[0]
if key in p:
p[key]=bj
b.remove(bj)
break
c=[]
si=str(input())
for si1 in range(0,len(si)):
ci=si[si1]
c.append(ci)
co=[]
for ci in range(0,len(c)):
if c[ci] in p:
cco=c[ci]
pco=p[cco]
co.append(pco)
d=[]
di=str(input())
for sj1 in range(0,len(di)):
dj=di[sj1]
d.append(dj)
do=[]
for di in range(0,len(d)):
for key in p:
pkey=key
if p.get(key) == d[di]:
ddo=pkey
do.append(ddo)
for i in range (0,len(co)):
print(co[i],end='')
print()
for j in range (0,len(do)):
print(do[j],end='') | 2.984375 | 3 |
airbyte-integrations/connectors/source-plaid/source_plaid/source.py | OTRI-Unipd/OTRI-airbyte | 2 | 16269 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import datetime
import json
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple, Union
import plaid
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from plaid.api import plaid_api
from plaid.model.accounts_balance_get_request import AccountsBalanceGetRequest
from plaid.model.transactions_get_request import TransactionsGetRequest
SPEC_ENV_TO_PLAID_ENV = {
"production": plaid.Environment.Production,
"development": plaid.Environment.Development,
"sandbox": plaid.Environment.Sandbox,
}
class PlaidStream(Stream):
def __init__(self, config: Mapping[str, Any]):
plaid_config = plaid.Configuration(
host=SPEC_ENV_TO_PLAID_ENV[config["plaid_env"]], api_key={"clientId": config["client_id"], "secret": config["api_key"]}
)
api_client = plaid.ApiClient(plaid_config)
self.client = plaid_api.PlaidApi(api_client)
self.access_token = config["access_token"]
class BalanceStream(PlaidStream):
@property
def name(self):
return "balance"
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return "account_id"
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
balance_response = self.client.accounts_balance_get(AccountsBalanceGetRequest(access_token=self.access_token))
for balance in balance_response["accounts"]:
message_dict = balance["balances"].to_dict()
message_dict["account_id"] = balance["account_id"]
yield message_dict
class IncrementalTransactionStream(PlaidStream):
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return "transaction_id"
@property
def name(self):
return "transaction"
@property
def source_defined_cursor(self) -> bool:
return True
@property
def cursor_field(self) -> Union[str, List[str]]:
return "date"
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
return {"date": latest_record.get("date")}
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
stream_state = stream_state or {}
date = stream_state.get("date")
if not date:
date = datetime.date.fromtimestamp(0)
else:
date = datetime.date.fromisoformat(date)
if date >= datetime.datetime.utcnow().date():
return
transaction_response = self.client.transactions_get(
TransactionsGetRequest(access_token=self.access_token, start_date=date, end_date=datetime.datetime.utcnow().date())
)
yield from map(lambda x: x.to_dict(), sorted(transaction_response["transactions"], key=lambda t: t["date"]))
class SourcePlaid(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
try:
plaid_config = plaid.Configuration(
host=SPEC_ENV_TO_PLAID_ENV[config["plaid_env"]], api_key={"clientId": config["client_id"], "secret": config["api_key"]}
)
api_client = plaid.ApiClient(plaid_config)
client = plaid_api.PlaidApi(api_client)
try:
request = AccountsBalanceGetRequest(access_token=config["access_token"])
client.accounts_balance_get(request)
return True, None
except plaid.ApiException as e:
response = json.loads(e.body)
return False, response
except Exception as error:
return False, error
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
return [BalanceStream(config), IncrementalTransactionStream(config)]
| 2.046875 | 2 |
demo/demo.py | taewhankim/DeepHRnet | 0 | 16270 | <reponame>taewhankim/DeepHRnet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import os
import shutil
from PIL import Image
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision
import cv2
import numpy as np
import time
import math
import _init_paths
import models
from config import cfg
from config import update_config
from core.function import get_final_preds
from utils.transforms import get_affine_transform
COCO_KEYPOINT_INDEXES = {
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
}
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
SKELETON = [
[5, 7], [7, 9],[5, 6],[6, 8], [8, 10]
]
## 수정 : 주석
# SKELETON = [
# [1, 3], [1, 0], [2, 4], [2, 0], [0, 5], [0, 6], [5, 7], [7, 9], [6, 8], [8, 10], [5, 11], [6, 12], [11, 12],
# [11, 13], [13, 15], [12, 14], [14, 16]
#]
CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
NUM_KPTS = 17
CTX = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def draw_pose(keypoints, img):
"""draw the keypoints and the skeletons.
:params keypoints: the shape should be equal to [17,2]
:params img:
"""
# 수정
# assert keypoints.shape == (NUM_KPTS, 2)
# for i in range(len(SKELETON)):
# kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]
# x_a, y_a = keypoints[kpt_a][0], keypoints[kpt_a][1]
# x_b, y_b = keypoints[kpt_b][0], keypoints[kpt_b][1]
# cv2.circle(img, (int(x_a), int(y_a)), 6, CocoColors[i], -1)
# cv2.circle(img, (int(x_b), int(y_b)), 6, CocoColors[i], -1)
# cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 2)
for i in range(len(SKELETON)):
kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]
x_a, y_a = keypoints[kpt_a][0], keypoints[kpt_a][1]
x_b, y_b = keypoints[kpt_b][0], keypoints[kpt_b][1]
cv2.circle(img, (int(x_a), int(y_a)), 10, CocoColors[i], -1)
cv2.circle(img, (int(x_b), int(y_b)), 10, CocoColors[i], -1)
cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 7)
def draw_bbox(box, img):
"""draw the detected bounding box on the image.
:param img:
"""
cv2.rectangle(img, box[0], box[1], color=(0, 255, 0), thickness=3)
def get_person_detection_boxes(model, img, threshold=0.5):
pred = model(img)
pred_classes = [COCO_INSTANCE_CATEGORY_NAMES[i]
for i in list(pred[0]['labels'].cpu().numpy())] # Get the Prediction Score
pred_boxes = [[(int(i[0]), int(i[1])), (int(i[2]), int(i[3]))]
for i in list(pred[0]['boxes'].detach().cpu().numpy())] # Bounding boxes
pred_score = list(pred[0]['scores'].detach().cpu().numpy())
if not pred_score or max(pred_score) < threshold:
return []
# Get list of index with score greater than threshold
pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1]
pred_boxes = pred_boxes[:pred_t + 1]
pred_classes = pred_classes[:pred_t + 1]
person_boxes = []
for idx, box in enumerate(pred_boxes):
if pred_classes[idx] == 'person':
person_boxes.append(box)
return person_boxes
def get_pose_estimation_prediction(pose_model, image, center, scale):
rotation = 0
# pose estimation transformation
trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE)
model_input = cv2.warpAffine(
image,
trans,
(int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
flags=cv2.INTER_LINEAR)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
# pose estimation inference
model_input = transform(model_input).unsqueeze(0)
# switch to evaluate mode
pose_model.eval()
with torch.no_grad():
# compute output heatmap
output = pose_model(model_input)
preds, _ = get_final_preds(
cfg,
output.clone().cpu().numpy(),
np.asarray([center]),
np.asarray([scale]))
return preds
def box_to_center_scale(box, model_image_width, model_image_height):
"""convert a box to center,scale information required for pose transformation
Parameters
----------
box : list of tuple
list of length 2 with two tuples of floats representing
bottom left and top right corner of a box
model_image_width : int
model_image_height : int
Returns
-------
(numpy array, numpy array)
Two numpy arrays, coordinates for the center of the box and the scale of the box
"""
center = np.zeros((2), dtype=np.float32)
bottom_left_corner = box[0]
top_right_corner = box[1]
box_width = top_right_corner[0] - bottom_left_corner[0]
box_height = top_right_corner[1] - bottom_left_corner[1]
bottom_left_x = bottom_left_corner[0]
bottom_left_y = bottom_left_corner[1]
center[0] = bottom_left_x + box_width * 0.5
center[1] = bottom_left_y + box_height * 0.5
aspect_ratio = model_image_width * 1.0 / model_image_height
pixel_std = 200
if box_width > aspect_ratio * box_height:
box_height = box_width * 1.0 / aspect_ratio
elif box_width < aspect_ratio * box_height:
box_width = box_height * aspect_ratio
scale = np.array(
[box_width * 1.0 / pixel_std, box_height * 1.0 / pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, default='./inference-config.yaml')
parser.add_argument('--video', type=str)
parser.add_argument('--webcam', action='store_true')
# parser.add_argument('--image', type=str)
parser.add_argument('--folder', type=str)
parser.add_argument('--write', action='store_true')
parser.add_argument('--showFps', action='store_true')
parser.add_argument('--outputDir', type=str, default='/output/', help='output path')
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# args expected by supporting codebase
args.modelDir = ''
args.logDir = ''
args.dataDir = ''
args.prevModelDir = ''
return args
def getAngle(a, b, c):
ang = math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) - math.atan2(a[1] - b[1], a[0] - b[0]))
if abs(ang)>=180:
return 360- abs(ang)
else:
return abs(ang)
def main():
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
args = parse_args()
update_config(cfg, args)
box_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
box_model.to(CTX)
box_model.eval()
pose_model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
cfg, is_train=False
)
if cfg.TEST.MODEL_FILE:
print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
else:
print('expected model defined in config at TEST.MODEL_FILE')
pose_model = torch.nn.DataParallel(pose_model, device_ids=cfg.GPUS)
pose_model.to(CTX)
pose_model.eval()
# Loading an video or an image or webcam
if args.webcam:
vidcap = cv2.VideoCapture(-1)
elif args.video:
vidcap = cv2.VideoCapture(args.video)
# 수정
# elif args.image:
# image_bgr = cv2.imread(args.image)
elif args.folder:
image_list = os.listdir(args.folder)
else:
print('please use --video or --webcam or --image to define the input.')
return
csv_output_rows = []
c=0
if args.webcam or args.video:
if args.write:
save_path = '/mnt/dms/prac/output.avi'
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(save_path, fourcc, 24.0, (int(vidcap.get(3)), int(vidcap.get(4))))
while True:
ret, image_bgr = vidcap.read()
if ret:
last_time = time.time()
image = image_bgr[:, :, [2, 1, 0]]
input = []
img = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
img_tensor = torch.from_numpy(img / 255.).permute(2, 0, 1).float().to(CTX)
input.append(img_tensor)
# object detection box
pred_boxes = get_person_detection_boxes(box_model, input, threshold=0.5)
pred_boxes = [pred_boxes[0]]
for box in pred_boxes:
cv2.rectangle(image_bgr, box[0], box[1], color=(0, 255, 0),
thickness=3)
new_csv_row = []
# pose estimation
if len(pred_boxes) >= 1:
for box in pred_boxes:
csv_row = []
center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])
image_pose = image.copy() if cfg.DATASET.COLOR_RGB else image_bgr.copy()
pose_preds = get_pose_estimation_prediction(pose_model, image_pose, center, scale)
if len(pose_preds) >= 1:
for kpt in pose_preds:
draw_pose(kpt, image_bgr)
for coord in kpt[5:11]:
x_coord, y_coord = int(coord[0]), int(coord[1])
new_csv_row.extend([x_coord, y_coord])
# draw the poses
new_coord = list(zip(new_csv_row[0::2], new_csv_row[1::2]))
ang1 = new_coord[4::-2]
ang2 = [new_coord[2], new_coord[0], new_coord[1]]
ang3 = [new_coord[0], new_coord[1], new_coord[3]]
ang4 = [new_coord[1], new_coord[3], new_coord[5]]
angles = [ang1, ang2, ang3, ang4]
for i in angles:
new_csv_row.append(getAngle(i[0], i[1], i[2]))
if args.showFps:
fps = 1 / (time.time() - last_time)
cv2.putText(image_bgr, 'fps: ' + "%.2f" % (fps), (25, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
(0, 255, 0), 2)
video_name = os.path.splitext(os.path.basename(args.video))[0]
img_file_name = video_name+'_frame_'+str(c)+'.jpg'
new_csv_row.insert(0, img_file_name)
csv_output_rows.append(new_csv_row)
img_path = os.path.join(args.outputDir, 'frame_img')
if not os.path.isdir(img_path):
os.mkdir(img_path)
cv2.imwrite(os.path.join(img_path,img_file_name), image_bgr)
c+=1
if args.write:
out.write(image_bgr)
print('{}_finish'.format(img_file_name))
# cv2.imshow('demo', image_bgr)
# if cv2.waitKey(1) & 0XFF == ord('q'):
# break
else:
print('cannot load the video.')
break
csv_headers = ['Frame']
for keypoint in self.COCO_KEYPOINT_INDEXES.values():
csv_headers.extend([keypoint+'_x', keypoint+'_y'])
new_csv_headers = [i for i in csv_headers[11:23]]
new_csv_headers.insert(0,csv_headers[0])
new_csv_headers.extend(["LW_LL_LS","LL_LS_RS","LS_RS_RL","RS_RL_RW"])
csv_output_filename = os.path.join(args.outputDir, f'{video_name}_coord_data.csv')
with open(csv_output_filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(new_csv_headers)
csvwriter.writerows(csv_output_rows)
cv2.destroyAllWindows()
vidcap.release()
if args.write:
print('video has been saved as {}'.format(save_path))
out.release()
return csv_output_rows
else:
image_list.sort()
if "Thumbs.db" in image_list:
image_list.remove("Thumbs.db")
if "@eaDir" in image_list:
image_list.remove("@eaDir")
if '.DS_Store' in image_list:
image_list.remove('.DS_Store')
for imgs in image_list:
img_ori_path = os.path.join(args.folder,imgs)
image_bgr = cv2.imread(img_ori_path)
last_time = time.time()
image = image_bgr[:, :, [2, 1, 0]]
input = []
img = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
img_tensor = torch.from_numpy(img / 255.).permute(2, 0, 1).float().to(CTX)
input.append(img_tensor)
# object detection box
pred_boxes = get_person_detection_boxes(box_model, input, threshold=0.9)
pred_boxes = [pred_boxes[0]]
new_csv_row = []
# pose estimation
if len(pred_boxes) >= 1:
for box in pred_boxes:
center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])
image_pose = image.copy() if cfg.DATASET.COLOR_RGB else image_bgr.copy()
pose_preds = get_pose_estimation_prediction(pose_model, image_pose, center, scale)
if len(pose_preds) >= 1:
for kpt in pose_preds:
draw_pose(kpt, image_bgr) # draw the poses
for coord in kpt[5:11]:
x_coord, y_coord = int(coord[0]), int(coord[1])
new_csv_row.extend([x_coord, y_coord])
# draw the poses
new_coord = list(zip(new_csv_row[0::2], new_csv_row[1::2]))
ang1 = new_coord[4::-2]
ang2 = [new_coord[2], new_coord[0], new_coord[1]]
ang3 = [new_coord[0], new_coord[1], new_coord[3]]
ang4 = [new_coord[1], new_coord[3], new_coord[5]]
angles = [ang1, ang2, ang3, ang4]
for i in angles:
new_csv_row.append(getAngle(i[0], i[1], i[2]))
if args.showFps:
fps = 1 / (time.time() - last_time)
cv2.putText(image_bgr, 'fps: ' + "%.2f" % (fps), (25, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0),
2)
kpt_img_file = 'kpt'+'_'+str(c)+'_'+imgs
new_csv_row.insert(0, imgs)
csv_output_rows.append(new_csv_row)
img_path = os.path.join(args.outputDir, 'kpt_img')
if not os.path.isdir(img_path):
os.mkdir(img_path)
cv2.imwrite(os.path.join(img_path, kpt_img_file), image_bgr)
c += 1
print('the result image has been saved as {}'.format(imgs))
csv_headers = ['Image']
for keypoint in COCO_KEYPOINT_INDEXES.values():
csv_headers.extend([keypoint + '_x', keypoint + '_y'])
new_csv_headers = [i for i in csv_headers[11:23]]
new_csv_headers.insert(0, csv_headers[0])
new_csv_headers.extend(["LW_LL_LS", "LL_LS_RS", "LS_RS_RL", "RS_RL_RW"])
for_csv = os.path.basename(os.path.dirname(args.outputDir))
csv_output_filename = os.path.join(args.outputDir, f'{for_csv}_coord_data.csv')
with open(csv_output_filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(new_csv_headers)
csvwriter.writerows(csv_output_rows)
return csv_output_rows
#
# cv2.imshow('demo', image_bgr)
# if cv2.waitKey(0) & 0XFF == ord('q'):
# cv2.destroyAllWindows()
if __name__ == '__main__':
main() | 1.726563 | 2 |
feemodeldata/plotting/plotwaits.py | bitcoinfees/bitcoin-feemodel-data | 2 | 16271 | <reponame>bitcoinfees/bitcoin-feemodel-data
from __future__ import division
import sqlite3
from bisect import bisect_left
import plotly.plotly as py
from plotly.graph_objs import Scatter, Figure, Layout, Data, YAxis, XAxis
from feemodel.util import DataSample
from feemodel.app.predict import PVALS_DBFILE
from feemodeldata.plotting.plotrrd import BASEDIR
def get_waits(dbfile=PVALS_DBFILE):
db = None
try:
db = sqlite3.connect(dbfile)
txs = db.execute("select feerate, waittime from txs").fetchall()
blockheights = db.execute("select blockheight from txs").fetchall()
blockheights = [tx[0] for tx in blockheights]
return txs, min(blockheights), max(blockheights)
finally:
if db is not None:
db.close()
def get_txgroups(txs, feerates=(10000, 15000, 20000, 50000)):
"""Sort the txs by feerate."""
txs.sort()
txfeerates, _dum = zip(*txs)
idxs = [bisect_left(txfeerates, feerate) for feerate in feerates]
idxs.insert(0, 0)
print("idxs are {}.".format(idxs))
txgroups = [txs[idxs[i]:idxs[i+1]] for i in range(len(idxs)-1)]
return txgroups
def get_traces(txgroups):
traces = []
for txgroup in txgroups:
feerates, waits = zip(*txgroup)
minfeerate = min(feerates)
maxfeerate = max(feerates)
waitdata = DataSample(waits)
percentilepts = [i / 100 for i in range(1, 99)]
percentiles = [waitdata.get_percentile(p) for p in percentilepts]
percentilepts.insert(0, 0)
percentiles.insert(0, 0)
trace = Scatter(
x=percentiles,
y=percentilepts,
name="{} <= feerate <= {}".format(minfeerate, maxfeerate)
)
traces.append(trace)
return traces
def plotwaits(traces, minheight, maxheight, basedir=BASEDIR):
title = ("Empirical CDF of waittimes from blocks {}-{}".
format(minheight, maxheight))
data = Data(traces)
layout = Layout(
title=title,
yaxis=YAxis(
title="Empirical CDF",
range=[0, 1]
),
xaxis=XAxis(
title="Wait time (s)",
rangemode="tozero",
type="log"
),
hovermode="closest"
)
fig = Figure(data=data, layout=layout)
basedir = basedir if basedir.endswith('/') else basedir + '/'
filename = basedir + "waits_cdf"
return py.plot(fig, filename=filename, auto_open=False)
def main(basedir=BASEDIR):
txs, minheight, maxheight = get_waits(PVALS_DBFILE)
print("Got {} txs.".format(len(txs)))
txgroups = get_txgroups(txs)
print("Got txgroups.")
traces = get_traces(txgroups)
print("Got traces.")
url = plotwaits(traces, minheight, maxheight, basedir=basedir)
print(url)
| 2.46875 | 2 |
utils/pytorch_utils.py | shoegazerstella/BTC-ISMIR19 | 1 | 16272 |
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import os
import math
from utils import logger
use_cuda = torch.cuda.is_available()
# utility
def to_var(x, dtype=None):
if type(x) is np.ndarray:
x = torch.from_numpy(x)
elif type(x) is list:
x = torch.from_numpy(np.array(x, dtype=dtype))
if use_cuda:
x = x.cuda()
return Variable(x)
# optimization
# reference: http://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#ReduceLROnPlateau
def adjusting_learning_rate(optimizer, factor=.5, min_lr=0.00001):
for i, param_group in enumerate(optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr*factor, min_lr)
param_group['lr'] = new_lr
logger.info('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))
def lr_annealing_function(step, start=0, end=1, r=0.9999, type="exp"):
if type == "exp":
lr = start - (start - end) * (1 - math.pow(r, step))
else:
print("not available %s annealing" % type)
return lr
def update_lr(optimizer, new_lr):
old_lr = optimizer.param_groups[0]['lr']
# logger.info("adjusting learning rate from %.6f to %.6f" % (old_lr, new_lr))
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = new_lr
def transformer_learning_rate(optimizer, model_dim, step_num, warmup_steps=4000):
for i, param_group in enumerate(optimizer.param_groups):
new_lr = model_dim**(-0.5) * min(step_num**(-0.5), step_num*warmup_steps**(-1.5))
old_lr = float(param_group['lr'])
# new_lr = max(old_lr*factor, min_lr)
param_group['lr'] = new_lr
logger.info('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))
# model save and loading
def load_model(asset_path, model, optimizer, restore_epoch=0):
if os.path.isfile(os.path.join(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch)):
checkpoint = torch.load(os.path.join(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch))
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
current_step = checkpoint['current_step']
logger.info("restore model with %d epoch" % restore_epoch)
else:
logger.info("no checkpoint with %d epoch" % restore_epoch)
current_step = 0
return model, optimizer, current_step
# class weighted_BCELoss(Module):
# def __init__(self, mode):
# self.mode = mode
#
# def forward(self, input, target, weight=10):
# if not (input.size() == target.size()):
# raise ValueError("Target and input must have the same size. target size ({}) "
# "!= input size ({})".format(target.size(), input.size()))
# loss_matrix = - (torch.mul(target, input.log()) + torch.mul(1 - target, (1 - input).log()))
# one_matrix = Variable(torch.ones(input.size()))
# if use_cuda:
# one_matrix = one_matrix.cuda()
# if self.mode == 'one':
# weight_matrix = (weight - 1) * target + one_matrix
# elif self.mode == 'pitch':
#
# weighted_loss_matrix = torch.mul(loss_matrix, weight_matrix)
# return torch.mean(weighted_loss_matrix)
# loss
def weighted_binary_cross_entropy(output, target, weights=None, eps=1e-12):
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output + eps)) + \
weights[0] * ((1 - target) * torch.log(1 - output + eps))
else:
loss = target * torch.log(output + eps) + (1 - target) * torch.log(1 - output + eps)
return torch.neg(torch.mean(loss))
def kl_divergence(mu, sig, num_latent_group=0, freebits_ratio=2., p_mu=None, p_sigma=None, eps=1e-8):
# calculate kl divergence between two normal distribution
# mu, sig, p_mu, p_sigma: batch_size * latent_size
batch_size = mu.size(0)
latent_size = mu.size(1)
mu_square = mu * mu
sig_square = sig * sig
if p_mu is None:
kl = 0.5 * (mu_square + sig_square - torch.log(sig_square + eps) - 1)
else:
p_sig_square = p_sigma * p_sigma
p_mu_diff_square = (mu - p_mu) * (mu - p_mu)
kl = (sig_square + p_mu_diff_square)/(2*p_sig_square)
kl += torch.log(p_sigma/sig + eps)
kl -= 0.5
if num_latent_group == 0:
kl = torch.sum(kl) / batch_size
else:
group_size = latent_size // num_latent_group
kl = kl.mean(0) # mean along batch dimension
kl = kl.view(-1, group_size).sum(1) # summation along group dimension
kl = torch.clamp(kl, min=freebits_ratio) # clipping kl value
kl = kl.sum()
return kl
def vae_loss(target, prediction, mu, sig,
num_latent_group=0, freebits_ratio=2., kl_ratio=1., p_mu=None, p_sigma=None):
rec_loss = F.binary_cross_entropy(prediction, target)
kl_loss = kl_divergence(mu, sig, num_latent_group, freebits_ratio, p_mu, p_sigma)
total_loss = rec_loss + kl_ratio * kl_loss
return total_loss, rec_loss, kl_loss
| 2.515625 | 3 |
ecosante/users/schemas/__init__.py | betagouv/recosante-api | 3 | 16273 | from dataclasses import field
from marshmallow import Schema, ValidationError, post_load, schema
from marshmallow.validate import OneOf, Length
from marshmallow.fields import Bool, Str, List, Nested, Email
from flask_rebar import ResponseSchema, RequestSchema, errors
from ecosante.inscription.models import Inscription
from ecosante.utils.custom_fields import TempList
from ecosante.api.schemas.commune import CommuneSchema
from ecosante.extensions import celery
from indice_pollution.history.models import Commune as CommuneModel
from flask import request
def list_str(choices, max_length=None, temp=False, **kwargs):
t = TempList if temp else List
return t(
Str(validate=OneOf(choices=choices)),
required=False,
allow_none=True,
validate=Length(min=0, max=max_length) if max_length else None,
**kwargs
)
class User(Schema):
commune = Nested(CommuneSchema, required=False, allow_none=True)
uid = Str(dump_only=True)
mail = Email(required=True)
deplacement = list_str(["velo", "tec", "voiture", "aucun"])
activites = list_str(["jardinage", "bricolage", "menage", "sport", "aucun"])
enfants = list_str(["oui", "non", "aucun"], temp=True)
chauffage = list_str(["bois", "chaudiere", "appoint", "aucun"])
animaux_domestiques = list_str(["chat", "chien", "aucun"])
connaissance_produit = list_str(["medecin", "association", "reseaux_sociaux", "publicite", "ami", "autrement"])
population = list_str(["pathologie_respiratoire", "allergie_pollens", "aucun"])
indicateurs = list_str(["indice_atmo", "raep", "indice_uv", "vigilance_meteorologique"])
indicateurs_frequence = list_str(["quotidien", "hebdomadaire", "alerte"], 1)
indicateurs_media = list_str(["mail", "notifications_web"])
recommandations = list_str(["oui", "non"], 1, attribute='recommandations_actives')
recommandations_frequence = list_str(["quotidien", "hebdomadaire", "pollution"], 1)
recommandations_media = list_str(["mail", "notifications_web"])
webpush_subscriptions_info = Str(required=False, allow_none=True, load_only=True)
class Response(User, ResponseSchema):
is_active = Bool(attribute='is_active')
class RequestPOST(User, RequestSchema):
@post_load
def make_inscription(self, data, **kwargs):
inscription = Inscription.query.filter(Inscription.mail.ilike(data['mail'])).first()
if inscription:
raise ValidationError('mail already used', field_name='mail')
inscription = Inscription(**data)
return inscription
class RequestPOSTID(User, RequestSchema):
def __init__(self, **kwargs):
super_kwargs = dict(kwargs)
partial_arg = super_kwargs.pop('partial', ['mail'])
super(RequestPOSTID, self).__init__(partial=partial_arg, **super_kwargs)
@post_load
def make_inscription(self, data, **kwargs):
uid = request.view_args.get('uid')
if not uid:
raise ValidationError('uid is required')
inscription = Inscription.query.filter_by(uid=uid).first()
if not inscription:
raise errors.NotFound('uid unknown')
if 'mail' in data:
inscription_same_mail = Inscription.query.filter(
Inscription.uid != uid,
Inscription.mail == data['mail']
).first()
if inscription_same_mail:
raise errors.Conflict('user with this mail already exists')
for k, v in data.items():
setattr(inscription, k, v)
return inscription
class RequestUpdateProfile(Schema):
mail = Email(required=True) | 2.171875 | 2 |
Course 01 - Getting Started with Python/Extra Studies/Basics/ex022.py | marcoshsq/python_practical_exercises | 9 | 16274 | <reponame>marcoshsq/python_practical_exercises
import math
# Exercise 017: Right Triangle
"""Write a program that reads the length of the opposite side and the adjacent side of a right triangle.
Calculate and display the length of the hypotenuse."""
# To do this we will use the Pythagorean theorem: a^2 = b^2 + c^2
# Method 01, without the module Math:
# First we ask for the leg values
leg_a = float(input("Enter the value of leg a: "))
leg_b = float(input("Enter the value of leg b: "))
# Then we do the Pythagorean theorem: sqrt((leg_a^2)+(leg_b^2))
hyp = ((leg_a**2) + (leg_b**2)) ** 0.5
print(f"The triangle hypotenuse measures {hyp:.2f} m.u. ")
# Method 02, with the module using pow function:
hypo = math.sqrt(math.pow(leg_a, 2) + math.pow(leg_b, 2))
print(f"The triangle hypotenuse measures {hypo:.2f} m.u. ")
# Method 03 using the module with the hypotenuse function u.u
hypot = math.hypot(leg_a, leg_b)
print(f"The triangle hypotenuse measures {hypot:.2f} m.u. ")
| 4.34375 | 4 |
annuaire/commands/__init__.py | djacomy/layer-annuaire | 0 | 16275 | <reponame>djacomy/layer-annuaire<filename>annuaire/commands/__init__.py
"""Package groups the different commands modules."""
from annuaire.commands import download, import_lawyers
__all__ = [download, import_lawyers]
| 1.65625 | 2 |
eventsourcing/application/actors.py | vladimirnani/eventsourcing | 1 | 16276 | import logging
from thespian.actors import *
from eventsourcing.application.process import ProcessApplication, Prompt
from eventsourcing.application.system import System, SystemRunner
from eventsourcing.domain.model.events import subscribe, unsubscribe
from eventsourcing.interface.notificationlog import RecordManagerNotificationLog
logger = logging.getLogger()
# Todo: Send timer message to run slave every so often (in master or slave?).
DEFAULT_ACTORS_LOGCFG = {
'version': 1,
'formatters': {
'normal': {
'format': '%(levelname)-8s %(message)s'
}
},
'handlers': {
# 'h': {
# 'class': 'logging.FileHandler',
# 'filename': 'hello.log',
# 'formatter': 'normal',
# 'level': logging.INFO
# }
},
'loggers': {
# '': {'handlers': ['h'], 'level': logging.DEBUG}
}
}
def start_actor_system(system_base=None, logcfg=DEFAULT_ACTORS_LOGCFG):
ActorSystem(
systemBase=system_base,
logDefs=logcfg,
)
def shutdown_actor_system():
ActorSystem().shutdown()
def start_multiproc_tcp_base_system():
start_actor_system(system_base='multiprocTCPBase')
# def start_multiproc_udp_base_system():
# start_actor_system(system_base='multiprocUDPBase')
#
#
# def start_multiproc_queue_base_system():
# start_actor_system(system_base='multiprocQueueBase')
class ActorModelRunner(SystemRunner):
"""
Uses actor model framework to run a system of process applications.
"""
def __init__(self, system: System, pipeline_ids, system_actor_name='system', shutdown_on_close=False, **kwargs):
super(ActorModelRunner, self).__init__(system=system, **kwargs)
self.pipeline_ids = list(pipeline_ids)
self.pipeline_actors = {}
self.system_actor_name = system_actor_name
# Create the system actor (singleton).
self.system_actor = self.actor_system.createActor(
actorClass=SystemActor,
globalName=self.system_actor_name
)
self.shutdown_on_close = shutdown_on_close
@property
def actor_system(self):
return ActorSystem()
def start(self):
"""
Starts all the actors to run a system of process applications.
"""
# Subscribe to broadcast prompts published by a process
# application in the parent operating system process.
subscribe(handler=self.forward_prompt, predicate=self.is_prompt)
# Initialise the system actor.
msg = SystemInitRequest(
self.system.process_classes,
self.infrastructure_class,
self.system.followings,
self.pipeline_ids
)
response = self.actor_system.ask(self.system_actor, msg)
# Keep the pipeline actor addresses, to send prompts directly.
assert isinstance(response, SystemInitResponse), type(response)
assert list(response.pipeline_actors.keys()) == self.pipeline_ids, (
"Configured pipeline IDs mismatch initialised system {} {}").format(
list(self.pipeline_actors.keys()), self.pipeline_ids
)
self.pipeline_actors = response.pipeline_actors
# Todo: Somehow know when to get a new address from the system actor.
# Todo: Command and response messages to system actor to get new pipeline address.
@staticmethod
def is_prompt(event):
return isinstance(event, Prompt)
def forward_prompt(self, prompt):
if prompt.pipeline_id in self.pipeline_actors:
pipeline_actor = self.pipeline_actors[prompt.pipeline_id]
self.actor_system.tell(pipeline_actor, prompt)
# else:
# msg = "Pipeline {} is not running.".format(prompt.pipeline_id)
# raise ValueError(msg)
def close(self):
"""Stops all the actors running a system of process applications."""
super(ActorModelRunner, self).close()
unsubscribe(handler=self.forward_prompt, predicate=self.is_prompt)
if self.shutdown_on_close:
self.shutdown()
def shutdown(self):
msg = ActorExitRequest(recursive=True)
self.actor_system.tell(self.system_actor, msg)
class SystemActor(Actor):
def __init__(self):
super(SystemActor, self).__init__()
self.pipeline_actors = {}
self.is_initialised = False
def receiveMessage(self, msg, sender):
if isinstance(msg, SystemInitRequest):
if not self.is_initialised:
self.init_pipelines(msg)
self.is_initialised = True
msg = SystemInitResponse(self.pipeline_actors.copy())
self.send(sender, msg)
def init_pipelines(self, msg):
self.process_classes = msg.process_classes
self.infrastructure_class = msg.infrastructure_class
self.system_followings = msg.system_followings
for pipeline_id in msg.pipeline_ids:
pipeline_actor = self.createActor(PipelineActor)
self.pipeline_actors[pipeline_id] = pipeline_actor
msg = PipelineInitRequest(
self.process_classes,
self.infrastructure_class,
self.system_followings,
pipeline_id
)
self.send(pipeline_actor, msg)
class PipelineActor(Actor):
def __init__(self):
super(PipelineActor, self).__init__()
self.system = None
self.process_actors = {}
self.pipeline_id = None
def receiveMessage(self, msg, sender):
if isinstance(msg, PipelineInitRequest):
# logger.info("pipeline received init: {}".format(msg))
self.init_pipeline(msg)
elif isinstance(msg, Prompt):
# logger.info("pipeline received prompt: {}".format(msg))
self.forward_prompt(msg)
def init_pipeline(self, msg):
self.pipeline_id = msg.pipeline_id
self.process_classes = msg.process_classes
self.infrastructure_class = msg.infrastructure_class
self.system_followings = msg.system_followings
self.followers = {}
for process_class_name, upstream_class_names in self.system_followings.items():
for upstream_class_name in upstream_class_names:
process_name = upstream_class_name.lower()
if process_name not in self.followers:
self.followers[process_name] = []
downstream_class_names = self.followers[process_name]
if process_class_name not in downstream_class_names:
downstream_class_names.append(process_class_name)
process_class_names = self.system_followings.keys()
for process_class_name in process_class_names:
process_actor = self.createActor(ProcessMaster)
process_name = process_class_name.lower()
self.process_actors[process_name] = process_actor
for process_class_name in process_class_names:
process_name = process_class_name.lower()
upstream_application_names = [c.lower() for c in self.system_followings[process_class_name]]
downstream_actors = {}
for downstream_class_name in self.followers[process_name]:
downstream_name = downstream_class_name.lower()
# logger.warning("sending prompt to process application {}".format(downstream_name))
process_actor = self.process_actors[downstream_name]
downstream_actors[downstream_name] = process_actor
process_class = self.process_classes[process_class_name]
msg = ProcessInitRequest(
process_class,
self.infrastructure_class,
self.pipeline_id,
upstream_application_names,
downstream_actors,
self.myAddress
)
self.send(self.process_actors[process_name], msg)
def forward_prompt(self, msg):
for downstream_class_name in self.followers[msg.process_name]:
downstream_name = downstream_class_name.lower()
process_actor = self.process_actors[downstream_name]
self.send(process_actor, msg)
class ProcessMaster(Actor):
def __init__(self):
super(ProcessMaster, self).__init__()
self.is_slave_running = False
self.last_prompts = {}
self.slave_actor = None
def receiveMessage(self, msg, sender):
if isinstance(msg, ProcessInitRequest):
self.init_process(msg)
elif isinstance(msg, Prompt):
# logger.warning("{} master received prompt: {}".format(self.process_application_class.__name__, msg))
self.consume_prompt(prompt=msg)
elif isinstance(msg, SlaveRunResponse):
# logger.info("process application master received slave finished run: {}".format(msg))
self.handle_slave_run_response()
def init_process(self, msg):
self.process_application_class = msg.process_application_class
self.infrastructure_class = msg.infrastructure_class
self.slave_actor = self.createActor(ProcessSlave)
self.send(self.slave_actor, msg)
self.run_slave()
def consume_prompt(self, prompt):
self.last_prompts[prompt.process_name] = prompt
self.run_slave()
def handle_slave_run_response(self):
self.is_slave_running = False
if self.last_prompts:
self.run_slave()
def run_slave(self):
# Don't send to slave if we think it's running, or we'll
# probably get blocked while sending the message and have
# to wait until the slave runs its loop (thespian design).
if self.slave_actor and not self.is_slave_running:
self.send(self.slave_actor, SlaveRunRequest(self.last_prompts, self.myAddress))
self.is_slave_running = True
self.last_prompts = {}
class ProcessSlave(Actor):
def __init__(self):
super(ProcessSlave, self).__init__()
self.process = None
def receiveMessage(self, msg, sender):
if isinstance(msg, ProcessInitRequest):
# logger.info("process application slave received init: {}".format(msg))
self.init_process(msg)
elif isinstance(msg, SlaveRunRequest):
# logger.info("{} process application slave received last prompts: {}".format(self.process.name, msg))
self.run_process(msg)
elif isinstance(msg, ActorExitRequest):
# logger.info("{} process application slave received exit request: {}".format(self.process.name, msg))
self.close()
def init_process(self, msg):
self.pipeline_actor = msg.pipeline_actor
self.downstream_actors = msg.downstream_actors
self.pipeline_id = msg.pipeline_id
self.upstream_application_names = msg.upstream_application_names
# Construct the process application class.
process_class = msg.process_application_class
if msg.infrastructure_class:
process_class = process_class.mixin(msg.infrastructure_class)
# Reset the database connection (for Django).
process_class.reset_connection_after_forking()
# Construct the process application.
self.process = process_class(
pipeline_id=self.pipeline_id,
)
assert isinstance(self.process, ProcessApplication)
# Subscribe the slave actor's send_prompt() method.
# - the process application will call publish_prompt()
# and the actor will receive the prompt and send it
# as a message.
subscribe(
predicate=self.is_my_prompt,
handler=self.send_prompt
)
# Close the process application persistence policy.
# - slave actor process application doesn't publish
# events, so we don't need this
self.process.persistence_policy.close()
# Unsubscribe process application's publish_prompt().
# - slave actor process application doesn't publish
# events, so we don't need this
unsubscribe(
predicate=self.process.persistence_policy.is_event,
handler=self.process.publish_prompt
)
# Construct and follow upstream notification logs.
for upstream_application_name in self.upstream_application_names:
record_manager = self.process.event_store.record_manager
# assert isinstance(record_manager, ACIDRecordManager), type(record_manager)
notification_log = RecordManagerNotificationLog(
record_manager=record_manager.clone(
application_name=upstream_application_name,
pipeline_id=self.pipeline_id
),
section_size=self.process.notification_log_section_size
)
self.process.follow(upstream_application_name, notification_log)
def run_process(self, msg):
notification_count = 0
# Just process one notification so prompts are dispatched promptly, sent
# messages only dispatched from actor after receive_message() returns.
advance_by = 1
if msg.last_prompts:
for prompt in msg.last_prompts.values():
notification_count += self.process.run(prompt, advance_by=advance_by)
else:
notification_count += self.process.run(advance_by=advance_by)
if notification_count:
# Run again, until nothing was done.
self.send(self.myAddress, SlaveRunRequest(last_prompts={}, master=msg.master))
else:
# Report back to master.
self.send(msg.master, SlaveRunResponse())
def close(self):
unsubscribe(
predicate=self.is_my_prompt,
handler=self.send_prompt
)
self.process.close()
def is_my_prompt(self, prompt):
return (
isinstance(prompt, Prompt)
and prompt.process_name == self.process.name
and prompt.pipeline_id == self.pipeline_id
)
def send_prompt(self, prompt):
for downstream_name, downstream_actor in self.downstream_actors.items():
self.send(downstream_actor, prompt)
class SystemInitRequest(object):
def __init__(self, process_classes, infrastructure_class, system_followings, pipeline_ids):
self.process_classes = process_classes
self.infrastructure_class = infrastructure_class
self.system_followings = system_followings
self.pipeline_ids = pipeline_ids
class SystemInitResponse(object):
def __init__(self, pipeline_actors):
self.pipeline_actors = pipeline_actors
class PipelineInitRequest(object):
def __init__(self, process_classes, infrastructure_class, system_followings, pipeline_id):
self.process_classes = process_classes
self.infrastructure_class = infrastructure_class
self.system_followings = system_followings
self.pipeline_id = pipeline_id
class ProcessInitRequest(object):
def __init__(self, process_application_class, infrastructure_class, pipeline_id,
upstream_application_names,
downstream_actors,
pipeline_actor):
self.process_application_class = process_application_class
self.infrastructure_class = infrastructure_class
self.pipeline_id = pipeline_id
self.upstream_application_names = upstream_application_names
self.downstream_actors = downstream_actors
self.pipeline_actor = pipeline_actor
class SlaveRunRequest(object):
def __init__(self, last_prompts, master):
self.last_prompts = last_prompts
self.master = master
class SlaveRunResponse(object):
pass
| 2.1875 | 2 |
sudoku/board.py | DariaMinieieva/sudoku_project | 5 | 16277 | <reponame>DariaMinieieva/sudoku_project
"""This module implements backtracking algorithm to solve sudoku."""
class Board:
"""
Class for sudoku board representation.
"""
NUMBERS = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def __init__(self, board):
"""
Create a new board.
"""
self.board = board
def __str__(self) -> str:
"""
Return string reprentation of a board.
"""
result = ''
for line in self.board:
result += str(line) + '\n'
return result.strip()
@staticmethod
def check_rows(board) -> bool:
"""
Check if rows are filled correctly and don't have empty cells.
"""
for row in board:
numbers = list(range(1,10))
for cell in row:
if cell in numbers:
numbers.remove(cell)
else:
return False
return True
def check_colums(self) -> bool:
"""
Check if colums are filled correctly and don't have empty cells.
"""
board_1 = [[self.board[i][j] for i in range(9)] for j in range(9)]
return self.check_rows(board_1)
def check_subgrids(self) -> bool:
"""
Check if subgrids are filled correctly and don't have empty cells.
"""
board_2 = [[self.board[i][j], self.board[i][j+1], self.board[i][j+2],
self.board[i+1][j], self.board[i+1][j+1], self.board[i+1][j+2],
self.board[i+2][j], self.board[i+2][j+1], self.board[i+2][j+2]] \
for i in range(0, 9, 3) for j in range(0, 9, 3)]
return self.check_rows(board_2)
def check_board(self) -> bool:
"""
Check if board if filled correctly and doesn't have empty words.
"""
return self.check_rows(self.board) and self.check_colums() and self.check_subgrids()
def get_cell(self) -> tuple or None:
"""
Return coordinates of a first empty cell.
"""
for row in range(9):
for column in range(9):
if self.board[row][column] == 0:
return row, column
@staticmethod
def filter_values(values, used) -> set:
"""
Return set of valid numbers from values that do not appear in used
"""
return set([number for number in values if number not in used])
def filter_row(self, row) -> set:
"""
Return set of numbers that can be placed into a certain row.
"""
in_row = [number for number in self.board[row] if number != 0]
options = self.filter_values(self.NUMBERS, in_row)
return options
def filter_column(self, column) -> set:
"""
Return set of numbers that can be placed into a certain column.
"""
in_column = [self.board[i][column] for i in range(9)]
options = self.filter_values(self.NUMBERS, in_column)
return options
def filter_subgrid(self, row: int, column: int) -> set:
"""
Return set of numbers that can be placed into a certain subgrid.
"""
row_start = int(row / 3) * 3
column_start = int(column / 3) * 3
in_subgrid = []
for i in range(3):
for j in range(3):
in_subgrid.append(self.board[row_start+i][column_start+j])
options = self.filter_values(self.NUMBERS, in_subgrid)
return options
def available_options(self, row: int, column: int) -> list:
"""
Return a list of possible numbers that can be placed into a cell.
"""
for_row = self.filter_row(row)
for_column = self.filter_column(column)
for_subgrid = self.filter_subgrid(row, column)
result = for_row.intersection(for_column, for_subgrid)
return list(result)
def backtracking(self) -> list or None:
"""
Main function that implements backtracking algorithm to solve sudoku.
"""
if self.check_board():
return self.board
# get first empty cell
row, column = self.get_cell()
# get viable options
options = self.available_options(row, column)
for option in options:
self.board[row][column] = option # try viable option
# recursively fill in the board
if self.backtracking():
return self.board # return board if success
self.board[row][column] = 0 # otherwise backtracks
| 4.0625 | 4 |
openfl/component/ca/ca.py | saransh09/openfl-1 | 0 | 16278 | <reponame>saransh09/openfl-1<filename>openfl/component/ca/ca.py<gh_stars>0
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Aggregator module."""
import base64
import json
import os
import platform
import shutil
import signal
import subprocess
import time
import urllib.request
from logging import getLogger
from pathlib import Path
from subprocess import call
import requests
from click import confirm
logger = getLogger(__name__)
TOKEN_DELIMITER = '.'
CA_STEP_CONFIG_DIR = Path('step_config')
CA_PKI_DIR = Path('cert')
CA_PASSWORD_FILE = Path('pass_file')
CA_CONFIG_JSON = Path('config/ca.json')
def get_system_and_architecture():
"""Get system and architecture of machine."""
uname_res = platform.uname()
system = uname_res.system.lower()
architecture_aliases = {
'x86_64': 'amd64',
'armv6l': 'armv6',
'armv7l': 'armv7',
'aarch64': 'arm64'
}
architecture = uname_res.machine.lower()
for alias in architecture_aliases:
if architecture == alias:
architecture = architecture_aliases[alias]
break
return system, architecture
def download_step_bin(url, grep_name, architecture, prefix='.', confirmation=True):
"""
Donwload step binaries from github.
Args:
url: address of latest release
grep_name: name to grep over github assets
architecture: architecture type to grep
prefix: folder path to download
confirmation: request user confirmation or not
"""
if confirmation:
confirm('CA binaries from github will be downloaded now', default=True, abort=True)
result = requests.get(url)
if result.status_code != 200:
logger.warning('Can\'t download binaries from github. Please try lately.')
return
assets = result.json().get('assets', [])
archive_urls = [
a['browser_download_url']
for a in assets
if (grep_name in a['name'] and architecture in a['name']
and 'application/gzip' in a['content_type'])
]
if len(archive_urls) == 0:
raise Exception('Applicable CA binaries from github were not found '
f'(name: {grep_name}, architecture: {architecture})')
archive_url = archive_urls[-1]
archive_url = archive_url.replace('https', 'http')
name = archive_url.split('/')[-1]
logger.info(f'Downloading {name}')
urllib.request.urlretrieve(archive_url, f'{prefix}/{name}')
shutil.unpack_archive(f'{prefix}/{name}', f'{prefix}/step')
def get_token(name, ca_url, ca_path='.'):
"""
Create authentication token.
Args:
name: common name for following certificate
(aggregator fqdn or collaborator name)
ca_url: full url of CA server
ca_path: path to ca folder
"""
ca_path = Path(ca_path)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
pki_dir = ca_path / CA_PKI_DIR
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
raise Exception('Step-CA is not installed!\nRun `fx pki install` first')
priv_json = step_config_dir / 'secrets' / 'priv.json'
pass_file = pki_dir / CA_PASSWORD_FILE
root_crt = step_config_dir / 'certs' / 'root_ca.crt'
try:
token = subprocess.check_output(
f'{step_path} ca token {name} '
f'--key {priv_json} --root {root_crt} '
f'--password-file {pass_file} 'f'--ca-url {ca_url}', shell=True)
except subprocess.CalledProcessError as exc:
logger.error(f'Error code {exc.returncode}: {exc.output}')
return
token = token.strip()
token_b64 = base64.b64encode(token)
with open(root_crt, mode='rb') as file:
root_certificate_b = file.read()
root_ca_b64 = base64.b64encode(root_certificate_b)
return TOKEN_DELIMITER.join([
token_b64.decode('utf-8'),
root_ca_b64.decode('utf-8'),
])
def get_ca_bin_paths(ca_path):
"""Get paths of step binaries."""
ca_path = Path(ca_path)
step = None
step_ca = None
if (ca_path / 'step').exists():
dirs = os.listdir(ca_path / 'step')
for dir_ in dirs:
if 'step_' in dir_:
step = ca_path / 'step' / dir_ / 'bin' / 'step'
if 'step-ca' in dir_:
step_ca = ca_path / 'step' / dir_ / 'bin' / 'step-ca'
return step, step_ca
def certify(name, cert_path: Path, token_with_cert, ca_path: Path):
"""Create an envoy workspace."""
os.makedirs(cert_path, exist_ok=True)
token, root_certificate = token_with_cert.split(TOKEN_DELIMITER)
token = base64.b64decode(token).decode('utf-8')
root_certificate = base64.b64decode(root_certificate)
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
url = 'http://api.github.com/repos/smallstep/cli/releases/latest'
system, arch = get_system_and_architecture()
download_step_bin(url, f'step_{system}', arch, prefix=ca_path)
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
raise Exception('Step-CA is not installed!\nRun `fx pki install` first')
with open(f'{cert_path}/root_ca.crt', mode='wb') as file:
file.write(root_certificate)
call(f'{step_path} ca certificate {name} {cert_path}/{name}.crt '
f'{cert_path}/{name}.key --kty EC --curve P-384 -f --token {token}', shell=True)
def remove_ca(ca_path):
"""Kill step-ca process and rm ca directory."""
_check_kill_process('step-ca')
shutil.rmtree(ca_path, ignore_errors=True)
def install(ca_path, ca_url, password):
"""
Create certificate authority for federation.
Args:
ca_path: path to ca directory
ca_url: url for ca server like: 'host:port'
password: <PASSWORD> root private keys
"""
logger.info('Creating CA')
ca_path = Path(ca_path)
ca_path.mkdir(parents=True, exist_ok=True)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
os.environ['STEPPATH'] = str(step_config_dir)
step_path, step_ca_path = get_ca_bin_paths(ca_path)
if not (step_path and step_ca_path and step_path.exists() and step_ca_path.exists()):
confirm('CA binaries from github will be downloaded now', default=True, abort=True)
system, arch = get_system_and_architecture()
url = 'http://api.github.com/repos/smallstep/certificates/releases/latest'
download_step_bin(url, f'step-ca_{system}', arch, prefix=ca_path, confirmation=False)
url = 'http://api.github.com/repos/smallstep/cli/releases/latest'
download_step_bin(url, f'step_{system}', arch, prefix=ca_path, confirmation=False)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
if (not step_config_dir.exists()
or confirm('CA exists, do you want to recreate it?', default=True)):
_create_ca(ca_path, ca_url, password)
_configure(step_config_dir)
def run_ca(step_ca, pass_file, ca_json):
"""Run CA server."""
if _check_kill_process('step-ca', confirmation=True):
logger.info('Up CA server')
call(f'{step_ca} --password-file {pass_file} {ca_json}', shell=True)
def _check_kill_process(pstring, confirmation=False):
"""Kill process by name."""
pids = []
proc = subprocess.Popen(f'ps ax | grep {pstring} | grep -v grep',
shell=True, stdout=subprocess.PIPE)
text = proc.communicate()[0].decode('utf-8')
for line in text.splitlines():
fields = line.split()
pids.append(fields[0])
if len(pids):
if confirmation and not confirm('CA server is already running. Stop him?', default=True):
return False
for pid in pids:
os.kill(int(pid), signal.SIGKILL)
time.sleep(2)
return True
def _create_ca(ca_path: Path, ca_url: str, password: str):
"""Create a ca workspace."""
import os
pki_dir = ca_path / CA_PKI_DIR
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
pki_dir.mkdir(parents=True, exist_ok=True)
step_config_dir.mkdir(parents=True, exist_ok=True)
with open(f'{pki_dir}/pass_file', 'w') as f:
f.write(password)
os.chmod(f'{pki_dir}/pass_file', 0o600)
step_path, step_ca_path = get_ca_bin_paths(ca_path)
assert (step_path and step_ca_path and step_path.exists() and step_ca_path.exists())
logger.info('Create CA Config')
os.environ['STEPPATH'] = str(step_config_dir)
shutil.rmtree(step_config_dir, ignore_errors=True)
name = ca_url.split(':')[0]
call(f'{step_path} ca init --name name --dns {name} '
f'--address {ca_url} --provisioner prov '
f'--password-file {pki_dir}/pass_file', shell=True)
call(f'{step_path} ca provisioner remove prov --all', shell=True)
call(f'{step_path} crypto jwk create {step_config_dir}/certs/pub.json '
f'{step_config_dir}/secrets/priv.json --password-file={pki_dir}/pass_file', shell=True)
call(
f'{step_path} ca provisioner add provisioner {step_config_dir}/certs/pub.json',
shell=True
)
def _configure(step_config_dir):
conf_file = step_config_dir / CA_CONFIG_JSON
with open(conf_file, 'r+') as f:
data = json.load(f)
data.setdefault('authority', {}).setdefault('claims', {})
data['authority']['claims']['maxTLSCertDuration'] = f'{365 * 24}h'
data['authority']['claims']['defaultTLSCertDuration'] = f'{365 * 24}h'
data['authority']['claims']['maxUserSSHCertDuration'] = '24h'
data['authority']['claims']['defaultUserSSHCertDuration'] = '24h'
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
| 2.09375 | 2 |
bin/optimization/cosmo_optimizer_hod_only.py | mclaughlin6464/pearce | 0 | 16279 | <reponame>mclaughlin6464/pearce
from pearce.emulator import OriginalRecipe, ExtraCrispy
import numpy as np
training_file = '/home/users/swmclau2/scratch/PearceRedMagicWpCosmo.hdf5'
em_method = 'gp'
split_method = 'random'
a = 1.0
z = 1.0/a - 1.0
fixed_params = {'z':z, 'cosmo': 1}#, 'r':0.18477483}
n_leaves, n_overlap = 5, 2
emu = ExtraCrispy(training_file,n_leaves, n_overlap, split_method, method = em_method, fixed_params=fixed_params,\
custom_mean_function = None)
results = emu.train_metric()
print results
print
print dict(zip(emu.get_param_names(), np.exp(results.x)))
| 2 | 2 |
tests/test_xmllint_map_html.py | sthagen/python-xmllint_map_html | 0 | 16280 | <reponame>sthagen/python-xmllint_map_html<filename>tests/test_xmllint_map_html.py
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import json
import pytest # type: ignore
import xmllint_map_html.xmllint_map_html as xmh
def test_parse_ok_minimal():
job = ['[]']
parser = xmh.parse(job)
assert next(parser) == NotImplemented
| 1.9375 | 2 |
apps/transmissions/views/transmissions.py | felipebarraza6/amamaule | 0 | 16281 | from rest_framework import mixins, viewsets, status
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from apps.transmissions.models import Transmission
from apps.transmissions.serializers import TransmissionModelSerializer, CommentModelserializer
from django_filters import rest_framework as filters
class TransmissionsViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = Transmission.objects.all().order_by('is_yt_stream')
serializer_class = TransmissionModelSerializer
filter_backends = (filters.DjangoFilterBackend,)
lookup_field = 'uuid'
def get_permissions(self):
if self.action in ['retrieve', 'list']:
permissions = [AllowAny]
else:
permissions = [IsAuthenticated]
return [p() for p in permissions]
class TransmissionFilter(filters.FilterSet):
class Meta:
model = Transmission
fields = {
'category':['exact'],
'is_live': ['exact'] ,
'required_auth': ['exact'],
'broadcast_date': ['exact', 'contains']
}
filterset_class = TransmissionFilter
| 2.015625 | 2 |
amstramdam/events/game.py | felix-martel/multigeo | 3 | 16282 | <gh_stars>1-10
from amstramdam import app, socketio, timers, manager
from flask import session
from flask_socketio import emit
from .types import GameEndNotification, GameEndPayload
from .utils import safe_cancel, wait_and_run
from ..game.types import GameName, Coordinates
def terminate_game(game_name: GameName) -> None:
game = manager.get_game(game_name)
if game is None or not game.done:
return
game.terminate()
payload = GameEndPayload(
leaderboard=game.get_current_leaderboard(),
full=game.get_final_results(), # TODO: remove useless data
)
with app.test_request_context("/"):
status = game.status
print(
f"Ending game <{game_name}> (emitting <event:status-update> "
f"with status={status})"
)
socketio.emit(
"status-update",
GameEndNotification(status=status, payload=payload),
json=True,
broadcast=True,
room=game_name,
)
manager.relaunch_game(game_name)
def end_game(game_name: GameName, run_id: int) -> None:
# global game
game = manager.get_game(game_name)
if game is None or game.curr_run_id != run_id or game.done:
return
print(f"Ending run {game.curr_run_id+1}")
with app.test_request_context("/"):
# 1: get current place
(city_name, hint), (lon, lat) = game.current.place
answer = dict(name=city_name, lon=lon, lat=lat)
# 2: end game
records = game.current.records
results, done = game.end()
payload = dict(
results=records,
answer=answer,
leaderboard=game.get_current_leaderboard(),
done=done,
)
socketio.emit(
"status-update",
dict(status=game.status, payload=payload),
json=True,
broadcast=True,
room=game_name,
)
# 3: continue?
if done:
timers[game_name] = wait_and_run(game.wait_time, terminate_game, game_name)
else:
timers[game_name] = wait_and_run(
game.wait_time, launch_run, game_name, game.curr_run_id
)
def launch_run(game_name: GameName, run_id: int) -> None:
# global duration_thread
game = manager.get_game(game_name)
if game is None or game.curr_run_id != run_id:
return
print(f"Launching run {game.curr_run_id+1} for game <{game_name}>")
with app.test_request_context("/"):
hint = game.launch_run()
payload = dict(hint=hint, current=game.curr_run_id, total=game.n_run)
print(f"Hint is '{hint}'")
socketio.emit(
"status-update",
dict(status=game.status, payload=payload),
json=True,
room=game_name,
broadcast=True,
)
timers[game_name] = wait_and_run(
game.current.duration, end_game, game_name, game.curr_run_id
)
@socketio.on("launch")
def launch_game() -> None:
game_name = session["game"]
player = session.get("player")
if player is None:
return
game = manager.get_game(game_name)
if game is None:
return
game.launch() # GameRun(players)
payload = dict(
game=game.map_name,
runs=game.n_run,
diff=game.difficulty,
by=player,
small_scale=game.small_scale,
)
emit(
"status-update",
dict(status=game.status, payload=payload),
json=True,
broadcast=True,
room=game_name,
)
wait_and_run(3, launch_run, game_name, game.curr_run_id)
@socketio.on("guess")
def process_guess(data: Coordinates) -> None:
# global duration_thread
game_name = session["game"]
game = manager.get_game(game_name)
player = session.get("player")
if player is None or game is None:
return
# player = data["player"]
print("Receiving guess from", player)
lon, lat = data["lon"], data["lat"]
res, done = game.current.process_answer((lon, lat), player)
res["total_score"] = (
game.scores[player] + res["score"]
) # We need to add res["score"] between game.scores isn't updated yet
# emit("log", f"Player <{player}> has scored {res['score']} points", broadcast=True,
# room=game_name)
emit(
"new-guess",
dict(player=player, dist=res["dist"], delta=res["delta"], score=res["score"]),
broadcast=True,
room=game_name,
)
emit("score", res, json=True)
if done:
try:
print(f"Interrupting run {game.curr_run_id+1}\n")
safe_cancel(timers[game_name])
except AttributeError:
pass
end_game(game_name, game.curr_run_id)
| 2.328125 | 2 |
src/glod/unittests/in_out/test_statement_csv.py | gordon-elliott/glod | 0 | 16283 | <gh_stars>0
__copyright__ = 'Copyright(c) <NAME> 2017'
"""
"""
from datetime import date
from decimal import Decimal
from io import StringIO
from unittest import TestCase
from glod.model.statement_item import StatementItem
from glod.model.account import Account
from glod.in_out.statement_item import statement_item_csv
class TestStatementCSV(TestCase):
def test_export(self):
account_no = '400400'
account = Account(8001, 'current', account_no=account_no)
date_fixture = date.today()
details = 'details fixture {}'
currency = 'EUR'
debit = Decimal('500.00')
credit = None
balance = Decimal('3433.22')
statement_items = [
StatementItem(
account,
date_fixture,
details.format(i),
currency,
debit,
credit,
balance,
)
for i in range(4)
]
actual = statement_item_csv(statement_items, StringIO()).getvalue()
expected = """account date details currency debit credit balance\r
{0} {1} details fixture 0 {2} {3} {4}\r
{0} {1} details fixture 1 {2} {3} {4}\r
{0} {1} details fixture 2 {2} {3} {4}\r
{0} {1} details fixture 3 {2} {3} {4}\r
""".format(
account_no,
date_fixture.strftime('%d/%m/%Y'),
currency,
debit,
balance
)
self.maxDiff = None
self.assertEqual(expected, actual)
| 2.5 | 2 |
ExifExtractor.py | MalwareJunkie/PythonScripts | 0 | 16284 | <reponame>MalwareJunkie/PythonScripts
# Tested with Python 3.6
# Install Pillow: pip install pillow
""" This script extracts exif data from JPEG images """
from PIL import Image
from PIL.ExifTags import TAGS
import sys
def getExif(img):
res = {}
exif = img._getexif()
if exif == None:
print("No exif data found!!")
sys.exit(0)
for k, v in exif.items():
dcd = TAGS.get(k, k)
res[dcd] = v
return res
def main():
try:
imgName = input("Enter the name of the JPEG image: ")
img = Image.open(imgName)
if img.format != "JPEG":
print("This only works with JPG images!!")
sys.exit(0)
except KeyboardInterrupt:
print("\nExiting!!")
sys.exit(0)
except:
print("Something went wrong!! check your input!!")
sys.exit(0)
print("Gathering exif data...")
for k, v in getExif(img).items():
try:
v = v.decode("utf-8")
except AttributeError:
pass
print(str(k) + ": ", v)
main()
| 2.921875 | 3 |
nbgrader/nbgraderformat/__init__.py | FrattisUC/nbgrader | 2 | 16285 | SCHEMA_VERSION = 2
from .common import ValidationError, SchemaMismatchError
from .v2 import MetadataValidatorV2 as MetadataValidator
from .v2 import read_v2 as read, write_v2 as write
from .v2 import reads_v2 as reads, writes_v2 as writes
| 1.046875 | 1 |
testFiles/test_script.py | Janga-Lab/Penguin-1 | 0 | 16286 | import h5py
from ont_fast5_api.conversion_tools import multi_to_single_fast5
from ont_fast5_api import fast5_interface
import SequenceGenerator.align as align
import SignalExtractor.Nanopolish as events
from testFiles.test_commands import *
import os, sys
import subprocess
#todo get basecall data
def basecall_test(fastPath):
files = os.listdir("Data/basecall")
#check if basecall file already exists
for f in files:
if f.endswith(".fasta") or f.endswith(".fa") or f.endswith(".fastq") or f.endswith(".fq"):
if os.stat("Data/basecall/" + f).st_size > 1000:
return
print("missing basecall file****/creating basecall file")
bcCmd = "scrappie raw " + fastPath + " > " + os.getcwd() + "/Data/basecall/reads.fa"
#create basecall file
try:
subprocess.run([bcCmd], check = True)
#scrappie_basecall(fastPath)
#checking if file not in right fast5 format(multi/single)
except subprocess.CalledProcessError:
export_scrappie_path()
print("got error / process error")
#export scrappie cmd (might not be exported correctly)
export_scrappie_path()
#checking if already in single directory
if 'single' in fastPath:
print("|||\/|| Already in single folder")
#todo insert flappie
#convert multi fast5 to single fast5 and move files into single directory.
elif 'single' not in os.listdir(fastPath):
print("converting fast5 to single fast5")
convert_fast5_type(fastPath)
scrappie_basecall_single(fastPath)
#if path doesn't exist or no files
except FileNotFoundError:
#export_scrappie_path()
print("got error / no file found ")
#scrappie_basecall_single(fastPath)
sys.exit()
#any error (default error"export scrappie and try again")
except:
export_scrappie_path()
scrappie_basecall(fastPath)
#check if basecall created successfully
if os.stat("Data/basecall/reads.fa").st_size > 0:
print("created basecall file****")
else:
print("Couldn't create basecall file")
#test to check if required files are created
def file_test(bed_file, ref_file, sam_file):
if bed_file == None:
print("bed file test failed****")
raise FileNotFoundError
#set ref file
if ref_file != None:
#fasta input
fastfile = os.getcwd() + "/Data/basecall/"
for ffile in os.listdir(fastfile):
if ffile.endswith(".fastq") or ffile.endswith(".fasta") or ffile.endswith(".fa"):
#check if fasta files exist in directory
fastfile = os.getcwd() + "/Data/basecall/" + ffile
#check if you found a fasta/fastq file in directory
if fastfile.endswith(".fastq") != True and fastfile.endswith(".fasta") != True and fastfile.endswith(".fa") != True:
print("basecall test failed****")
raise FileNotFoundError
#download reference file
else:
#use default ref files
refFlag = False
#defaultReferenceFile = "Homo_sapiens.GRCh38.dna.alt.fa"
#defaultReferenceFile = "refgenome"
defaultReferenceFile = "grch38.fna"
#defaultReferenceFile = "coli-ref.fa"
downloadedFlag = False
#check if default reference file exists
for f in os.listdir(os.getcwd()):
if f == defaultReferenceFile:
print("reference downloaded already****")
downloadedFlag = True
#download reference file
if downloadedFlag != True:
#os.system("wget -O refgenome.tar.gz ftp://igenome:G3nom3s4u@ussd-ftp.illumina.com/Homo_sapiens/Ensembl/GRCh37/Homo_sapiens_Ensembl_GRCh37.tar.gz")
#os.system("wget -O refgenome.gz ftp://ftp.ncbi.nlm.nih.gov/refseq/H_sapiens/annotation/GRCh37_latest/refseq_identifiers/GRCh37_latest_genomic.fna.gz")
os.system("wget -O grch38.fna.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/001/405/GCA_000001405.15_GRCh38/GCA_000001405.15_GRCh38_genomic.fna.gz")
#os.system("wget -O ftp://ftp.ensembl.org/pub/release-100/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.alt.fa.gz")
#os.system("tar -xzf refgenome.tar.gz")
#os.system("gunzip refgenome.gz")
os.system("gzip -d grch38.fna.gz")
print("gunzipping reference genome****")
#os.system("gunzip -v Homo_sapiens.GRCh38.dna.alt.fa.gz")
for f in os.listdir(os.getcwd()):
if f == "Homo_sapiens" or f == defaultReferenceFile or f == "refgenome":
refFlag = True
break
ref_file = defaultReferenceFile
#if file download wasn't successful
if refFlag == False and downloadedFlag != True:
print("ref file test failed****")
raise FileNotFoundError
#get basecalled file
fastfile = os.getcwd() + "/Data/basecall/"
for ffile in os.listdir(fastfile):
if ffile.endswith(".fastq") or ffile.endswith(".fasta") or ffile.endswith(".fa"):
#check if fast files exist in directory
fastfile += ffile
break
#if no fasta/fastq file found
if fastfile == os.getcwd() + "/Data/basecall/":
print("basecall file test failed****")
raise FileNotFoundError
if sam_file == None:
#ref file exists so align here
sam_file = get_sam_file(fastfile, ref_file)
elif sam_file == None:
print("sam file test failed****")
raise FileNotFoundError
if bed_file != None:
print("\nbed file test passed****")
if sam_file != None:
print("sam file test passed****")
return bed_file, ref_file, sam_file
def id_file_test():
for f in os.listdir("./Data/"):
if f == "Fast5_ids.txt":
print("id test passed****")
return
def get_sam_file(fastfile, ref_file):
#check if sam file exists on our directory
if "Alignment.sam" in os.listdir("Data"):
#prompt to create new sam file
choice = input("Do you want to create a new sam file?(y/n)")
if choice == 'y':
sam_file = align.minimapAligner(fastfile, ref_file)
else:
return "Data/Alignment.sam"
else:
sam_file = align.minimapAligner(fastfile, ref_file)
return sam_file
#create event info file for machine learning models
def event_check(fpath=None, filename=None, ref=None, NanopolishOnly=True):
#check if event info already exists
if "reads-ref.eventalign.txt" in os.listdir("Data") and os.stat("Data/reads-ref.eventalign.txt").st_size > 1000:
return "Data/reads-ref.eventalign.txt"
#no events
if ref != None:
#todo fix this bug
if event_align_check() == None:
print("Creating Event Align file****")
#create events(nanopolish code goes here)
#is it a single file or path
if fpath == None:
event_file = events.nanopolish_events(filename, "Data/basecall/", referenceFile=ref)
else:
event_file = events.nanopolish_events(fpath, "Data/basecall/", referenceFile=ref)
print("event file ", event_file)
show_penguin()
return event_file
else:
show_penguin()
return "Data/reads-ref.eventalign.txt"
else:
print("reference file test failed")
raise FileNotFoundError
def show_penguin():
penguin = """
=============================================================
**-..L```|
\ |
* \ |```| |```` |\ | |```| | | ``|`` |\ |
| | \ |___| |___ | \ | |___ | | | | \ |
/*\ | \ | | | \| | | | | | | | \|
|***\ | | | |____ | | |___| \|/ _|_ | |
\****\ \ | |
\***/ \ / |
\*/ /
/___/_____\
=============================================================
"""
print(penguin)
def sequence_check():
pass
def event_align_check():
for file in os.listdir("Data"):
if file == "reads-ref.eventalign.txt" and os.stat("Data/reads-ref.eventalign.txt").st_size > 1000:
print("Event Align Test Passed****")
return "Data/reads-ref.eventalign.txt"
print("Event Align Test Failed****")
return None
def convert_fast5_type(directory):
#go through fast5 files and check if the files is multi or single fast5 file
#we need a single fast5 file
for root, dirs, files in os.walk(directory):
for name in files:
if name.endswith(".fast5"):
fobj = fast5_interface.get_fast5_file(os.path.join(root, name))
if fast5_interface.check_file_type(fobj) == "multi-read":
#convert file to single fast5
print("converting fast5 file****")
multi_to_single_fast5.convert_multi_to_single(os.path.join(root, name), directory, "single")
| 2.34375 | 2 |
channels/italiaserie.py | sodicarus/channels | 0 | 16287 | <reponame>sodicarus/channels
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand-pureita.- XBMC Plugin
# Canale italiaserie
# http://www.mimediacenter.info/foro/viewtopic.php?f=36&t=7808
# ------------------------------------------------------------
import re
from core import httptools
from core import logger
from core import config
from core import servertools
from core import scrapertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "italiaserie"
host = "https://italiaserie.org"
headers = [['Referer', host]]
def isGeneric():
return True
def mainlist(item):
logger.info("streamondemand-pureita -[italiaserie mainlist]")
itemlist = [Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]Serie TV - [COLOR orange]Ultime Aggiunte[/COLOR]",
url="%s/category/serie-tv/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_serie_P.png"),
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]Serie TV - [COLOR orange]Aggiornamenti[/COLOR]",
url="%s/ultimi-episodi/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/tv_series_P.png"),
Item(channel=__channel__,
action="categorie",
title="[COLOR azure]Serie TV - [COLOR orange]Categorie[/COLOR]",
url=host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genres_P.png"),
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]Serie TV - [COLOR orange]Animazione[/COLOR]",
url="%s/category/serie-tv/animazione-e-bambini/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/animation2_P.png"),
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]Serie TV - [COLOR orange]TV Show[/COLOR]",
url="%s/category/serie-tv/tv-show/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png"),
Item(channel=__channel__,
action="search",
title="[COLOR orange]Search ...[/COLOR]",
url=host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png")]
return itemlist
# ==================================================================================================================================================
def search(item, texto):
logger.info("streamondemand-pureita - [italiaserie search]")
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ==================================================================================================================================================
def categorie(item):
logger.info("streamondemand-pureita -[italiaserie categorie]")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.get_match(data, r'<h3 class="title">Categorie</h3>(.*?)</ul>')
patron = r'<li class=".*?"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
if "Serie TV" in scrapedtitle or "Tv Show" in scrapedtitle or "Animazione e Bambini" in scrapedtitle:
continue
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title=scrapedtitle,
url=scrapedurl,
thumbnail='https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genre_P.png',
folder=True))
return itemlist
# ==================================================================================================================================================
def peliculas(item):
logger.info("streamondemand-pureita -[serietvonline_co peliculas]")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<a href="([^"]+)"\s*title="([^"]+)">\s*<img src="([^<]+)"\s*alt[^>]+>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
scrapedplot=""
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodes",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scrapedtitle,
folder=True), tipo="tv"))
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">Next »</a>')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivi >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ==================================================================================================================================================
def episodes(item):
logger.info("streamondemand-pureita -[italiaserie episodes]")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<a rel="nofollow"\s*target="_blank" act=".*?"\s*href="([^"]+)"\s*class="green-link">\s*<strong>([^<]+)</strong>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
action="findvideos",
title=scrapedtitle,
fulltitle=item.fulltitle + " - " + scrapedtitle,
show=item.show + " - " + scrapedtitle,
url=scrapedurl,
plot="[COLOR orange]" + item.title + "[/COLOR]" + item.plot,
thumbnail=item.thumbnail,
folder=True))
return itemlist
# ==================================================================================================================================================
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
servername = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[COLOR azure][[COLOR orange]' + servername.capitalize() + '[/COLOR]] - ', item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.plot = item.plot
videoitem.channel = __channel__
return itemlist
| 2.046875 | 2 |
setup.py | jeffleary00/greenery | 0 | 16288 | from setuptools import setup
setup(
name='potnanny-api',
version='0.2.6',
packages=['potnanny_api'],
include_package_data=True,
description='Part of the Potnanny greenhouse controller application. Contains Flask REST API and basic web interface.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jeffleary00/potnanny-api',
install_requires=[
'requests',
'passlib',
'sqlalchemy',
'marshmallow',
'flask',
'flask-restful',
'flask-jwt-extended',
'flask-wtf',
'potnanny-core==0.2.9',
],
)
| 1.40625 | 1 |
cpp-linux/Release/envcpp.py | thu-media/Comyco | 40 | 16289 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.0
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError('Python 2.7 or later required')
# Import the low-level C/C++ module
if __package__ or '.' in __name__:
from . import _envcpp
else:
import _envcpp
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if name == "thisown":
return self.this.own(value)
if name == "this":
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if not static:
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == "thisown":
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class SwigPyIterator(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _envcpp.delete_SwigPyIterator
def value(self):
return _envcpp.SwigPyIterator_value(self)
def incr(self, n=1):
return _envcpp.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _envcpp.SwigPyIterator_decr(self, n)
def distance(self, x):
return _envcpp.SwigPyIterator_distance(self, x)
def equal(self, x):
return _envcpp.SwigPyIterator_equal(self, x)
def copy(self):
return _envcpp.SwigPyIterator_copy(self)
def next(self):
return _envcpp.SwigPyIterator_next(self)
def __next__(self):
return _envcpp.SwigPyIterator___next__(self)
def previous(self):
return _envcpp.SwigPyIterator_previous(self)
def advance(self, n):
return _envcpp.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _envcpp.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _envcpp.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _envcpp.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _envcpp.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _envcpp.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _envcpp.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
# Register SwigPyIterator in _envcpp:
_envcpp.SwigPyIterator_swigregister(SwigPyIterator)
class vectori(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectori_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectori___nonzero__(self)
def __bool__(self):
return _envcpp.vectori___bool__(self)
def __len__(self):
return _envcpp.vectori___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectori___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectori___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectori___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectori___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectori___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectori___setitem__(self, *args)
def pop(self):
return _envcpp.vectori_pop(self)
def append(self, x):
return _envcpp.vectori_append(self, x)
def empty(self):
return _envcpp.vectori_empty(self)
def size(self):
return _envcpp.vectori_size(self)
def swap(self, v):
return _envcpp.vectori_swap(self, v)
def begin(self):
return _envcpp.vectori_begin(self)
def end(self):
return _envcpp.vectori_end(self)
def rbegin(self):
return _envcpp.vectori_rbegin(self)
def rend(self):
return _envcpp.vectori_rend(self)
def clear(self):
return _envcpp.vectori_clear(self)
def get_allocator(self):
return _envcpp.vectori_get_allocator(self)
def pop_back(self):
return _envcpp.vectori_pop_back(self)
def erase(self, *args):
return _envcpp.vectori_erase(self, *args)
def __init__(self, *args):
_envcpp.vectori_swiginit(self, _envcpp.new_vectori(*args))
def push_back(self, x):
return _envcpp.vectori_push_back(self, x)
def front(self):
return _envcpp.vectori_front(self)
def back(self):
return _envcpp.vectori_back(self)
def assign(self, n, x):
return _envcpp.vectori_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectori_resize(self, *args)
def insert(self, *args):
return _envcpp.vectori_insert(self, *args)
def reserve(self, n):
return _envcpp.vectori_reserve(self, n)
def capacity(self):
return _envcpp.vectori_capacity(self)
__swig_destroy__ = _envcpp.delete_vectori
# Register vectori in _envcpp:
_envcpp.vectori_swigregister(vectori)
class vectord(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectord_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectord___nonzero__(self)
def __bool__(self):
return _envcpp.vectord___bool__(self)
def __len__(self):
return _envcpp.vectord___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectord___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectord___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectord___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectord___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectord___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectord___setitem__(self, *args)
def pop(self):
return _envcpp.vectord_pop(self)
def append(self, x):
return _envcpp.vectord_append(self, x)
def empty(self):
return _envcpp.vectord_empty(self)
def size(self):
return _envcpp.vectord_size(self)
def swap(self, v):
return _envcpp.vectord_swap(self, v)
def begin(self):
return _envcpp.vectord_begin(self)
def end(self):
return _envcpp.vectord_end(self)
def rbegin(self):
return _envcpp.vectord_rbegin(self)
def rend(self):
return _envcpp.vectord_rend(self)
def clear(self):
return _envcpp.vectord_clear(self)
def get_allocator(self):
return _envcpp.vectord_get_allocator(self)
def pop_back(self):
return _envcpp.vectord_pop_back(self)
def erase(self, *args):
return _envcpp.vectord_erase(self, *args)
def __init__(self, *args):
_envcpp.vectord_swiginit(self, _envcpp.new_vectord(*args))
def push_back(self, x):
return _envcpp.vectord_push_back(self, x)
def front(self):
return _envcpp.vectord_front(self)
def back(self):
return _envcpp.vectord_back(self)
def assign(self, n, x):
return _envcpp.vectord_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectord_resize(self, *args)
def insert(self, *args):
return _envcpp.vectord_insert(self, *args)
def reserve(self, n):
return _envcpp.vectord_reserve(self, n)
def capacity(self):
return _envcpp.vectord_capacity(self)
__swig_destroy__ = _envcpp.delete_vectord
# Register vectord in _envcpp:
_envcpp.vectord_swigregister(vectord)
class vectors(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectors_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectors___nonzero__(self)
def __bool__(self):
return _envcpp.vectors___bool__(self)
def __len__(self):
return _envcpp.vectors___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectors___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectors___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectors___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectors___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectors___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectors___setitem__(self, *args)
def pop(self):
return _envcpp.vectors_pop(self)
def append(self, x):
return _envcpp.vectors_append(self, x)
def empty(self):
return _envcpp.vectors_empty(self)
def size(self):
return _envcpp.vectors_size(self)
def swap(self, v):
return _envcpp.vectors_swap(self, v)
def begin(self):
return _envcpp.vectors_begin(self)
def end(self):
return _envcpp.vectors_end(self)
def rbegin(self):
return _envcpp.vectors_rbegin(self)
def rend(self):
return _envcpp.vectors_rend(self)
def clear(self):
return _envcpp.vectors_clear(self)
def get_allocator(self):
return _envcpp.vectors_get_allocator(self)
def pop_back(self):
return _envcpp.vectors_pop_back(self)
def erase(self, *args):
return _envcpp.vectors_erase(self, *args)
def __init__(self, *args):
_envcpp.vectors_swiginit(self, _envcpp.new_vectors(*args))
def push_back(self, x):
return _envcpp.vectors_push_back(self, x)
def front(self):
return _envcpp.vectors_front(self)
def back(self):
return _envcpp.vectors_back(self)
def assign(self, n, x):
return _envcpp.vectors_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectors_resize(self, *args)
def insert(self, *args):
return _envcpp.vectors_insert(self, *args)
def reserve(self, n):
return _envcpp.vectors_reserve(self, n)
def capacity(self):
return _envcpp.vectors_capacity(self)
__swig_destroy__ = _envcpp.delete_vectors
# Register vectors in _envcpp:
_envcpp.vectors_swigregister(vectors)
class Environment(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, filedir):
_envcpp.Environment_swiginit(self, _envcpp.new_Environment(filedir))
__swig_destroy__ = _envcpp.delete_Environment
def get_download_time(self, video_chunk_size):
return _envcpp.Environment_get_download_time(self, video_chunk_size)
def reset_download_time(self):
return _envcpp.Environment_reset_download_time(self)
def get_video_chunk(self, quality):
return _envcpp.Environment_get_video_chunk(self, quality)
def get_optimal(self, last_video_vmaf):
return _envcpp.Environment_get_optimal(self, last_video_vmaf)
optimal = property(_envcpp.Environment_optimal_get, _envcpp.Environment_optimal_set)
delay0 = property(_envcpp.Environment_delay0_get, _envcpp.Environment_delay0_set)
sleep_time0 = property(_envcpp.Environment_sleep_time0_get, _envcpp.Environment_sleep_time0_set)
return_buffer_size0 = property(_envcpp.Environment_return_buffer_size0_get, _envcpp.Environment_return_buffer_size0_set)
rebuf0 = property(_envcpp.Environment_rebuf0_get, _envcpp.Environment_rebuf0_set)
video_chunk_size0 = property(_envcpp.Environment_video_chunk_size0_get, _envcpp.Environment_video_chunk_size0_set)
end_of_video0 = property(_envcpp.Environment_end_of_video0_get, _envcpp.Environment_end_of_video0_set)
video_chunk_remain0 = property(_envcpp.Environment_video_chunk_remain0_get, _envcpp.Environment_video_chunk_remain0_set)
video_chunk_vmaf0 = property(_envcpp.Environment_video_chunk_vmaf0_get, _envcpp.Environment_video_chunk_vmaf0_set)
all_cooked_bw = property(_envcpp.Environment_all_cooked_bw_get, _envcpp.Environment_all_cooked_bw_set)
all_cooked_time = property(_envcpp.Environment_all_cooked_time_get, _envcpp.Environment_all_cooked_time_set)
CHUNK_COMBO_OPTIONS = property(_envcpp.Environment_CHUNK_COMBO_OPTIONS_get, _envcpp.Environment_CHUNK_COMBO_OPTIONS_set)
all_file_names = property(_envcpp.Environment_all_file_names_get, _envcpp.Environment_all_file_names_set)
video_chunk_counter = property(_envcpp.Environment_video_chunk_counter_get, _envcpp.Environment_video_chunk_counter_set)
buffer_size = property(_envcpp.Environment_buffer_size_get, _envcpp.Environment_buffer_size_set)
trace_idx = property(_envcpp.Environment_trace_idx_get, _envcpp.Environment_trace_idx_set)
cooked_time = property(_envcpp.Environment_cooked_time_get, _envcpp.Environment_cooked_time_set)
cooked_bw = property(_envcpp.Environment_cooked_bw_get, _envcpp.Environment_cooked_bw_set)
mahimahi_start_ptr = property(_envcpp.Environment_mahimahi_start_ptr_get, _envcpp.Environment_mahimahi_start_ptr_set)
mahimahi_ptr = property(_envcpp.Environment_mahimahi_ptr_get, _envcpp.Environment_mahimahi_ptr_set)
last_mahimahi_time = property(_envcpp.Environment_last_mahimahi_time_get, _envcpp.Environment_last_mahimahi_time_set)
virtual_mahimahi_ptr = property(_envcpp.Environment_virtual_mahimahi_ptr_get, _envcpp.Environment_virtual_mahimahi_ptr_set)
virtual_last_mahimahi_time = property(_envcpp.Environment_virtual_last_mahimahi_time_get, _envcpp.Environment_virtual_last_mahimahi_time_set)
# Register Environment in _envcpp:
_envcpp.Environment_swigregister(Environment)
| 1.851563 | 2 |
eda_rf.py | lel23/Student-Performance-Prediction | 1 | 16290 | <filename>eda_rf.py
"""
Final Project
EDA
"""
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.plotting import scatterplotmatrix
import numpy as np
import seaborn as sns
from imblearn.over_sampling import SMOTE
from sklearn.utils import resample
from mlxtend.plotting import heatmap
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.feature_selection import SelectFromModel
import sys
from sklearn.model_selection import train_test_split
from collections import Counter
df = pd.read_csv('student-mat-edited.csv')
df['school'] = df['school'].replace(['GP', 'MS'], [1, 0])
df['sex'] = df['sex'].replace(['M', 'F'], [1, 0])
df['address'] = df['address'].replace(['U', 'R'], [1, 0])
df['famsize'] = df['famsize'].replace(['GT3', 'LE3'], [1, 0])
df['Pstatus'] = df['Pstatus'].replace(['T', 'A'], [1, 0])
df = df.replace(to_replace={'yes':1, 'no':0})
df = pd.get_dummies(df, prefix= ['Mjob', 'Fjob', 'reason', 'guardian'])
#code from: https://stackoverflow.com/questions/46168450/replace-a-specific-range-of-values-in-a-pandas-dataframe
#convert the scores to integers representing the letter grade range specified in the paper. higher the number, the higher the grade
df['scores'] = df[['G1', 'G2', 'G3']].mean(axis=1)
df['scores'] = np.where(df['scores'].between(0, 10), 0, df['scores'])
df['scores'] = np.where(df['scores'].between(10, 12), 1, df['scores'])
df['scores'] = np.where(df['scores'].between(12, 14), 2, df['scores'])
df['scores'] = np.where(df['scores'].between(14, 16), 3, df['scores'])
df['scores'] = np.where(df['scores'].between(16, 21), 4, df['scores'])
df['scores'] = df['scores'].astype(np.int)
df = df.drop(index=1, columns=['G1', 'G2', 'G3'])
#separate into features and target
X = df[[i for i in list(df.columns) if i != 'scores']]
y = df['scores']
# fixing class imbalance
#https://machinelearningmastery.com/multi-class-imbalanced-classification/
oversample = SMOTE(random_state=0)
X, y = oversample.fit_resample(X, y)
# splitting training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y)
# min-max scaling
mms = MinMaxScaler()
X_train_norm = mms.fit_transform(X_train)
X_test_norm = mms.transform(X_test)
# standardizing the data
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
# Random Forest Feature Selection
feat_labels = X.columns
forest = RandomForestClassifier(n_estimators=500, random_state=0)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))
plt.title('Feature Importance')
plt.bar(range(X_train.shape[1]), importances[indices], align='center')
plt.xticks(range(X_train.shape[1]), feat_labels[indices], rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.savefig("rf_selection.png")
plt.show()
sfm = SelectFromModel(forest, threshold=0.04, prefit=True)
X_selected = sfm.transform(X_train)
print('Number of features that meet this threshold', 'criterion:', X_selected.shape[1])
# # Now, let's print the features that met the threshold criterion for feature selection that we set earlier (note that this code snippet does not appear in the actual book but was added to this notebook later for illustrative purposes):
cols = []
for f in range(X_selected.shape[1]):
cols.append(feat_labels[indices[f]])
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
# Correlation heatmap
cols.append("scores")
cm = np.corrcoef(df[cols].values.T)
hm = heatmap(cm, row_names=cols, column_names=cols, figsize=(10, 8))
plt.savefig("corr_matrix.png")
plt.show()
| 2.78125 | 3 |
feastruct/fea/utils.py | geosharma/feastruct | 37 | 16291 | <gh_stars>10-100
import numpy as np
def gauss_points(el_type, n):
"""Returns the Gaussian weights and locations for *n* point Gaussian integration of a finite
element. Refer to xxx for a list of the element types.
:param string el_type: String describing the element type
:param int n: Number of Gauss points
:returns: The integration weights *(n x 1)* and an *(n x i)* matrix consisting of the values of
the *i* shape functions for *n* Gauss points
:rtype: tuple(list[float], :class:`numpy.ndarray`)
"""
if el_type == 'Tri6':
# one point gaussian integration
if n == 1:
weights = [1]
gps = np.array([[1.0 / 3, 1.0 / 3, 1.0 / 3]])
# three point gaussian integration
elif n == 3:
weights = [1.0 / 3, 1.0 / 3, 1.0 / 3]
gps = np.array([
[2.0 / 3, 1.0 / 6, 1.0 / 6],
[1.0 / 6, 2.0 / 3, 1.0 / 6],
[1.0 / 6, 1.0 / 6, 2.0 / 3]
])
# six point gaussian integration
elif n == 6:
g1 = 1.0 / 18 * (8 - np.sqrt(10) + np.sqrt(38 - 44 * np.sqrt(2.0 / 5)))
g2 = 1.0 / 18 * (8 - np.sqrt(10) - np.sqrt(38 - 44 * np.sqrt(2.0 / 5)))
w1 = (620 + np.sqrt(213125 - 53320 * np.sqrt(10))) / 3720
w2 = (620 - np.sqrt(213125 - 53320 * np.sqrt(10))) / 3720
weights = [w2, w2, w2, w1, w1, w1]
gps = np.array([
[1 - 2 * g2, g2, g2],
[g2, 1 - 2 * g2, g2],
[g2, g2, 1 - 2 * g2],
[g1, g1, 1 - 2 * g1],
[1 - 2 * g1, g1, g1],
[g1, 1 - 2 * g1, g1]
])
return (weights, gps)
def shape_function(el_type, coords, gp):
"""Computes shape functions, shape function derivatives and the determinant of the Jacobian
matrix for a number of different finite elements at a given Gauss point. Refer to xxx for a
list of the element types.
:param string el_type: String describing the element type
:param coords: Global coordinates of the element nodes *(n x 3)*, where *n* is the number of
nodes
:type coords: :class:`numpy.ndarray`
:param gp: Isoparametric location of the Gauss point
:type gp: :class:`numpy.ndarray`
:returns: The value of the shape functions *N(i)* at the given Gauss point *(1 x n)*, the
derivative of the shape functions in the j-th global direction *B(i,j)* *(3 x n)* and the
determinant of the Jacobian matrix *j*
:rtype: tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`, float)
"""
if el_type == 'Tri6':
# location of isoparametric co-ordinates for each Gauss point
eta = gp[0]
xi = gp[1]
zeta = gp[2]
# value of the shape functions
N = np.array([
eta * (2 * eta - 1),
xi * (2 * xi - 1),
zeta * (2 * zeta - 1),
4 * eta * xi,
4 * xi * zeta,
4 * eta * zeta
])
# derivatives of the sf wrt the isoparametric co-ordinates
B_iso = np.array([
[4 * eta - 1, 0, 0, 4 * xi, 0, 4 * zeta],
[0, 4 * xi - 1, 0, 4 * eta, 4 * zeta, 0],
[0, 0, 4 * zeta - 1, 0, 4 * xi, 4 * eta]
])
# form Jacobian matrix
J_upper = np.array([[1, 1, 1]])
J_lower = np.dot(coords, np.transpose(B_iso))
J = np.vstack((J_upper, J_lower))
# calculate the jacobian
j = 0.5 * np.linalg.det(J)
# cacluate the P matrix
P = np.dot(np.linalg.inv(J), np.array([[0, 0], [1, 0], [0, 1]]))
# calculate the B matrix in terms of cartesian co-ordinates
B = np.transpose(np.dot(np.transpose(B_iso), P))
return (N, B, j)
| 3.109375 | 3 |
webscrap.py | ircykk/webscrap | 0 | 16292 | <filename>webscrap.py
import requests
import time
import argparse
import sys
import os
from bs4 import BeautifulSoup
from urllib.parse import urlparse
def is_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def fetch_urls(page):
r = requests.get(page)
soup = BeautifulSoup(r.text, 'lxml')
for a in soup.find_all('a', href=True):
url = a.get('href')
# http://example.com == http://example.com/
url = url.rstrip('/')
if is_url(url) and url not in urls:
urls.append(url)
def print_progress (iteration, total):
print('\r%s/%s [%s...]' % (iteration, total, urls[-1][:64]), end = '\r')
# Instantiate the parser
parser = argparse.ArgumentParser(description='URL scrapper')
parser.add_argument('--url', help='Root URL page')
parser.add_argument('--limit', type=int, default=1000, help='Limit urls to scrape')
parser.add_argument('--output', default='output.csv', help='Path to output file')
args = parser.parse_args()
urls = []
urls_visited = []
if is_url(args.url) != True:
print('Invalid root URL [--url]')
sys.exit(1)
fetch_urls(args.url)
urls_visited.append(args.url);
for url in urls:
if len(urls) > args.limit:
break
print_progress(len(urls), args.limit)
if url not in urls_visited:
urls_visited.append(url);
fetch_urls(url)
# Save output
os.remove(args.output)
with open(args.output, 'a') as output:
for url in urls:
output.write(url + '\n')
| 3.078125 | 3 |
lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri_checksum.py | shift-left-test/meta-shift | 2 | 16293 | <filename>lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri_checksum.py<gh_stars>1-10
from shift_oelint_parser.cls_item import Variable
from shift_oelint_adv.cls_rule import Rule
from shift_oelint_parser.helper_files import get_scr_components
from shift_oelint_parser.parser import INLINE_BLOCK
class VarSRCUriOptions(Rule):
def __init__(self):
super(VarSRCUriOptions, self).__init__(id="oelint.vars.srcurichecksum",
severity="error",
message="<FOO>")
def check(self, _file, stash):
res = []
items = stash.GetItemsFor(filename=_file, classifier=Variable.CLASSIFIER,
attribute=Variable.ATTR_VAR, attributeValue="SRC_URI")
md5sum = []
sha256sum = []
res_candidate = []
for i in items:
if i.Flag.endswith("md5sum"):
if i.Flag == "md5sum":
md5sum.append("")
else:
md5sum.append(i.Flag.rsplit(".", 1)[0])
elif i.Flag.endswith("sha256sum"):
if i.Flag == "sha256sum":
sha256sum.append("")
else:
sha256sum.append(i.Flag.rsplit(".", 1)[0])
else:
lines = [y.strip('"') for y in i.get_items() if y]
for x in lines:
if x == INLINE_BLOCK:
continue
_url = get_scr_components(x)
if _url["scheme"] in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
name = ""
if "name" in _url["options"]:
name = _url["options"]["name"]
res_candidate.append((name, i.Origin, i.InFileLine + lines.index(x)))
res_candidate.sort(key=lambda tup: tup[0])
no_name_src_uri = False
for (name, filename, filelines) in res_candidate:
message = ""
if name == "":
if no_name_src_uri:
message = "if SRC_URI have multiple URLs, each URL has checksum"
else:
if "" not in md5sum:
message = "SRC_URI[md5sum]"
if "" not in sha256sum:
if len(message) > 0:
message += ", "
message += "SRC_URI[sha256sum]"
if len(message) > 0:
message += " is(are) needed"
no_name_src_uri = True
else:
if name not in md5sum:
message = "SRC_URI[%s.md5sum]" % name
if name not in sha256sum:
if len(message) > 0:
message += ", "
message += "SRC_URI[%s.sha256sum]" % name
if len(message) > 0:
message += " is(are) needed"
if len(message) > 0:
res += self.finding(filename, filelines, message)
return res
| 2.328125 | 2 |
verba/apps/auth/backends.py | nhsuk/verba | 0 | 16294 | <reponame>nhsuk/verba<filename>verba/apps/auth/backends.py
from github import User as GitHubUser
from github.auth import get_token
from github.exceptions import AuthValidationError
from . import get_user_model
class VerbaBackend(object):
"""
Django authentication backend which authenticates against the GitHub API.
"""
def authenticate(self, code=None):
"""
Returns a valid `VerbaUser` if the authentication is successful
or None if the token is invalid.
"""
try:
token = get_token(code)
except AuthValidationError:
return
github_user = GitHubUser.get_logged_in(token)
UserModel = get_user_model() # noqa
return UserModel(
pk=github_user.username,
token=token,
user_data={
'name': github_user.name,
'email': github_user.email,
'avatar_url': github_user.avatar_url
}
)
def get_user(self, pk, token, user_data={}):
UserModel = get_user_model() # noqa
return UserModel(pk, token, user_data=user_data)
| 2.34375 | 2 |
examples/apds9960_color_simpletest.py | tannewt/Adafruit_CircuitPython_APDS9960 | 0 | 16295 | <reponame>tannewt/Adafruit_CircuitPython_APDS9960
import time
import board
import busio
import digitalio
from adafruit_apds9960.apds9960 import APDS9960
from adafruit_apds9960 import colorutility
i2c = busio.I2C(board.SCL, board.SDA)
int_pin = digitalio.DigitalInOut(board.A2)
apds = APDS9960(i2c)
apds.enable_color = True
while True:
#create some variables to store the color data in
#wait for color data to be ready
while not apds.color_data_ready:
time.sleep(0.005)
#get the data and print the different channels
r, g, b, c = apds.color_data
print("red: ", r)
print("green: ", g)
print("blue: ", b)
print("clear: ", c)
print("color temp {}".format(colorutility.calculate_color_temperature(r, g, b)))
print("light lux {}".format(colorutility.calculate_lux(r, g, b)))
time.sleep(0.5)
| 2.921875 | 3 |
abc/abc205/abc205b.py | c-yan/atcoder | 1 | 16296 | N, *A = map(int, open(0).read().split())
A.sort()
for i in range(N):
if i == A[i] - 1:
continue
print('No')
break
else:
print('Yes')
| 3.046875 | 3 |
CPAC/cwas/tests/features/steps/base_cwas.py | Lawreros/C-PAC | 1 | 16297 | <filename>CPAC/cwas/tests/features/steps/base_cwas.py
from behave import *
from hamcrest import assert_that, is_not, greater_than
import numpy as np
import nibabel as nib
import rpy2.robjects as robjects
from rpy2.robjects.numpy2ri import numpy2ri
from rpy2.robjects.packages import importr
robjects.conversion.py2ri = numpy2ri
from os import path as op
import sys
curfile = op.abspath(__file__)
testpath = op.dirname(op.dirname(op.dirname(curfile)))
rpath = op.join(testpath, "R")
pypath = op.dirname(testpath)
sys.path.append(pypath)
from cwas import *
from utils import *
def custom_corrcoef(X, Y=None):
"""Each of the columns in X will be correlated with each of the columns in
Y. Each column represents a variable, with the rows containing the observations."""
if Y is None:
Y = X
if X.shape[0] != Y.shape[0]:
raise Exception("X and Y must have the same number of rows.")
X = X.astype(float)
Y = Y.astype(float)
X -= X.mean(axis=0)[np.newaxis,...]
Y -= Y.mean(axis=0)
xx = np.sum(X**2, axis=0)
yy = np.sum(Y**2, axis=0)
r = np.dot(X.T, Y)/np.sqrt(np.multiply.outer(xx,yy))
return r
| 2.265625 | 2 |
Analysis/CardioVascularLab/ExVivo/exvivo.py | sassystacks/TissueMechanicsLab | 0 | 16298 | <reponame>sassystacks/TissueMechanicsLab
import sys
sys.path.append('..')
from Analyzer.TransitionProperties import ProcessTransitionProperties
from tkinter import *
from tkinter import messagebox, ttk, filedialog
# from tkFileDialog import *
import uniaxanalysis.getproperties as getprops
from uniaxanalysis.plotdata import DataPlotter
from uniaxanalysis.saveproperties import write_props_csv
from exvivoframes import *
from matplotlib import pyplot as plt
import time
'''
The GUI for uniax data analysis of soft tissue.
inputs:
- Dimensions file - a file with format: sample name, width, thickness and initial distance
- directory - Folder with raw uniax data files in csv format with format: time, distance, force
To Do:
- polymorphic method for handling input data (variable names to get) <done>
- control when line for manual control shows up <done>
- test rdp for finding linear region - done (check implementation)
- fix point picking on plot so that can work in desceding order of x value - <done>
- tick boxes for properties <done>
- config file
- scroll bar for large data sets <done>
Bugs:
- work out bug in the 2nd order gaussian - done
- work out bug in the display for automatic linear find
- destroy instance of toolbar on graph create
- destroy instance of plot everytime
'''
class StartPage:
def __init__(self, master):
# print "Start Page class started"
# Some properties that Rubab and Mohammaded complained soooooooooo much
# to get..... jesus Muba
self.straintype = 'engineering' # can change to engineering, and lamda
self.stresstype = 'cauchy' # can change between cauchy and piola
self.master = master
self.buttonsdict = {}
self.fig = plt.figure(1)
self.transitionProps = ProcessTransitionProperties(eps=0.025)
self.plotter = DataPlotter()
# For Data Extraction
self.specimenHeaders = ["Sample", "Zone", "Region", "Specimen", "Direction"]
self.dimensionHeaders = ["Width","Thickness","Length"]
self.headersOut = ["Sample", "Zone", "Region", "Specimen", "Direction", "PointID","Strength","Stiffness"]
# this is the format of file so
# self.fileform = ["Sample", "_", "Zone", "Region","Specimen", "Direction"] #AAA data
self.fileform = ["Sample", "_","Z", "Zone", "Region","Specimen", "_","Direction"] #NIH BAV data
self.fname = '/Volumes/Biomechanics_LabShare/Abdominal\ Aortic\ Aneurysms\ Ex-vivo\ testing/Mechanical\ Testing/Uniaxial/2016-Jun10/AAA_Dimensions_2016-Jun10.csv'
self.dirname = '/Volumes/Biomechanics_LabShare/Abdominal\ Aortic\ Aneurysms\ Ex-vivo\ testing/Mechanical\ Testing/Uniaxial/2016-Jun10/FAIL'
# test things
self.fnameOut = 'TestOutputs.csv'
'''
#~~~~~~~~~~~~~~~~~~~~~~~~~ Main Layout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
border = 3
self.frame1 = Frame(self.master, borderwidth=border, relief='raised')
self.frame1.grid(row=0, column=0, sticky='news')
self.frame2 = Frame(self.master, borderwidth=border, relief='raised')
self.frame2.grid(row=1, column=0, sticky='news', ipady=20)
self.frame3 = Frame(self.master, borderwidth=border, relief='raised')
self.frame3.grid(row=2, column=0, sticky='ew', ipady=20)
self.frame4 = Frame(self.master, borderwidth=border, relief='raised')
self.frame4.grid(row=1, column=1, sticky='ew', ipady=20)
self.frame5 = Frame(self.master, borderwidth=border, relief='raised')
self.frame5.grid(row=0, column=1, sticky='nsew', ipady=20)
self.t_frame6 = Frame(self.master, width=200,height=150, relief='raised')
self.frame6 = Frame6.Frame_6(self.t_frame6)
self.t_frame6.grid(row=0, column=2,sticky='news')
self.t_frame7 = Frame(self.master, borderwidth=border, relief='raised')
self.frame7 = Frame7.Frame_7(self.t_frame7,self.plotter)
self.t_frame7.grid(row=1, column=2,sticky='ns', ipady=20)
self.t_frame8 = Frame(self.master, borderwidth=border, relief='raised')
self.frame8 = Frame8.Frame_8(self.t_frame8, self.transitionProps)
self.t_frame8.grid(row=2, column=2,sticky='ns', ipady=20)
'''
~~~~~~~~~~~~~~~~~~~~~~ Frame 1 Widgets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
label = Label(self.frame1, text="Start Page")
label.grid(row=0, column=0)
button1 = Button(self.frame1, text="Dimensions File",
command=self.chooseDims)
button1.grid(row=1, column=0)
button2 = Button(self.frame1, text="Top Directory",
command=self.chooseDir)
button2.grid(row=2, column=0)
button3 = Button(self.frame1, text="Run SetupData",
command=self.setupData)
button3.grid(row=3, column=0)
'''
~~~~~~~~~~~~~~~~~~~~~~ Frame 2 Widgets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# self.frame2.grid_rowconfigure(0, weight=1)
# self.frame2.grid_columnconfigure(0, weight=1)
# self.frame2.grid_propagate(False)
self.buttonCanvas = Canvas(self.frame2)
self.xButtonScroller = Scrollbar(self.frame2,orient="horizontal",
command=self.buttonCanvas.xview)
self.yButtonScroller = Scrollbar(self.frame2,
command=self.buttonCanvas.yview)
self.buttonFrame = Frame(self.buttonCanvas)
self.buttonCanvas.create_window((4,10), window=self.buttonFrame, anchor="nw",
tags="self.frame")
self.buttonFrame.bind("<Configure>", self.onFrameConfigure)
self.buttonCanvas.config(yscrollcommand=self.yButtonScroller.set)
self.buttonCanvas.config(xscrollcommand=self.xButtonScroller.set)
self.buttonCanvas.grid(row=0,column=0,sticky='nwse')
self.yButtonScroller.grid(row=0,column=1,sticky='ns')
self.xButtonScroller.grid(row=1,column=0,sticky='ew')
'''
~~~~~~~~~~~~~~~~~~~~~~ Frame 3 Widgets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
button4 = Button(self.frame3, text="Good", bg='green',
command=self.write_analysis)
button4.grid(row=0, column=0, sticky='w')
changeLabel = Label(self.frame3, text="Properties to Change")
changeLabel.grid(row=0, column=1)
button5 = Button(self.frame3, text="Ultimate Stress",
command=self.get_uts)
button5.grid(row=1, column=1)
button5 = Button(self.frame3, text="Linear Stiffness",
command=self.get_linear)
button5.grid(row=2, column=1)
'''
~~~~~~~~~~~~~~~~~~~~~~ Frame 4 Widgets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
canvas = self.plotter.plot_graph(self.frame4, self.frame5, Row=0, Col=0)
'''
~~~~~~~~~~~~~~~ key Bindings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
self.master.bind('<Escape>', lambda e: self.master.destroy())
self.master.bind('<Return>', self.frame8._UpdateEpsilonCallback())
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~ Frame 1 functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def chooseDims(self):
self.fname = filedialog.askopenfilename()
def chooseDir(self):
self.dirname = filedialog.askdirectory()
def setupData(self):
# check if there is an filename for dimensions and Directory
# name for the corresponding raw data files
if self.fname and self.dirname:
import uniaxanalysis.parsecsv
# Dictionary to pass to parsecsv for obtaining data on specimen
args_dict = {
'dimsfile': self.fname,
'topdir': self.dirname,
'timestep': 0.05,
'headersOut': self.headersOut,
'specimenHeaders': self.specimenHeaders,
'dimsHeaders': self.dimensionHeaders,
'fileform': self.fileform,
}
# instantiate parsecsv class to get the data to plot and analyze
self.csvDataParser = uniaxanalysis.parsecsv(**args_dict)
# Create the list of specimens to be tested from Dimensions file
self.sampleList = self.csvDataParser.getMatchingData(
self.csvDataParser.dimsFile,
self.csvDataParser.topDir)
self.addButtons()
else:
print("please get a directory and a dimensions file for the analysis")
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~ Frame 2 functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def addButtons(self):
# place a button for each sample in a panel
import math
# create button names from each sample in the list
buttonnames = [name[0] for name in self.sampleList]
# Make 3 columns of buttons
row = math.ceil(len(buttonnames)/3.0)
col = 3
padlist = [(i, j) for i in range(int(row)) for j in range(col)]
diff = len(padlist) - len(buttonnames)
if diff > 0:
padlist = padlist[:-diff]
# Create a rectangular list of objects to store all of the sample names as
# tk button objects
fullList = zip(buttonnames, padlist)
#
for name, indx in fullList:
self.buttonsdict[name] = Button(self.buttonFrame, text=name)
self.buttonsdict[name]['command'] = lambda sample = name: self.getGraph(sample)
self.buttonsdict[name].grid(row=indx[0], column=indx[1])
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.buttonCanvas.configure(scrollregion=self.buttonCanvas.bbox("all"))
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~ Frame 3 functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def get_uts(self):
# get the ultimate stress and strain at ultimate stress on the graph
utstr, uts = self.props.manual_max(self.props.strain, self.props.stress,
self.plotter.xs, self.plotter.ys)
self.plotter.set_max_point(utstr, uts)
def get_linear(self):
modulusElasticity, regionData = self.props.manual_linear(self.props.strain, self.props.stress,
self.plotter.xs, self.plotter.ys)
self.plotter.set_linear_region(regionData[0], regionData[1])
def write_analysis(self):
# import pdb;pdb.set_trace()
# This function writes the value to a csv and destroys the button object in the GUI
# Add stiffness to the list, if not append an empty string
if self.props.stiffness:
self.csvDataParser.outputDict[self.props.sample]['Stiffness'] \
= self.props.stiffness
else:
self.csvDataParser.outputDict[self.props.sample]['Stiffness'] \
= "NaN"
# Add strength to the list, if not append an empty string
if self.props.strength:
self.csvDataParser.outputDict[self.props.sample]['Strength'] \
= self.props.strength
else:
self.csvDataParser.outputDict[self.props.sample]['Strength'] \
= "NaN"
# Add all of the trasition props to the output
transitionProps = self.transitionProps._outputAllValues()
for prop, val in transitionProps.items():
self.csvDataParser.outputDict[self.props.sample][prop] = val
if prop not in self.headersOut:
self.headersOut.append(prop)
# print(self.csvDataParser.outputDict[self.props.sample])
# Write the properties to the csv file specified
write_props_csv(self.fnameOut, self.csvDataParser.outputDict,
self.props.sample, self.headersOut)
# destroy the button
self.buttonsdict[self.props.sample].destroy()
del self.props
# This is a hack and could be done better.... just need to get analysis done right now
# Destroy frame5 to get rid of the toolbar
self.frame5.destroy()
# Remake the frame to add another toolbar to
self.frame5 = Frame(self.master, borderwidth=5, relief='raised')
self.frame5.grid(row=0, column=1, sticky='nsew', ipady=20)
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~ Frame 4 functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def getTransitionProperties(self):
'''
This sets all the transition properties for plotting
'''
import numpy as np
stress_strain = np.stack((self.props.strain[:self.props.failIndx],
self.props.stress[:self.props.failIndx]),
axis=-1)
stress_strain_norm = np.stack((self.props.strain_norm[:self.props.failIndx],
self.props.stress_norm[:self.props.failIndx]),
axis=-1)
self.transitionProps._setStressStrain(stress_strain,stress_strain_norm)
self.transitionProps._runTransitionProps()
propDict = self.transitionProps._outputAllValues()
propDict['MaxStrain_'] = self.props.strain[self.props.failIndx]
propDict['StartStrain'] = self.props.strain[0]
propDict['StartStress'] = self.props.stress[0]
propDict['HighStiffness'] = self.transitionProps.rdp[-2:, :]
print(propDict['HighStiffness'])
propDict['RDP'] = self.transitionProps.rdp
self.plotter.set_props(propDict)
def getGraph(self, samplename):
self.fig.clear()
# Iterate through sample list to find matching sample
for sample in self.sampleList:
if samplename == sample[0]:
# Get all of the properties for this sample
self.props = getprops(fileDimslist=sample, smooth_width=29,
std=7, chkderivate=0.04,
stresstype=self.stresstype,
straintype=self.straintype)
self.getTransitionProperties()
# create an instance of DataPlotter class and pass instance of
# getproperties
self.plotter.setClass(self.props)
self.plotter.setSample(sample[0])
self.frame7._SetCheckState()
break
else:
print("Couldn't find the file")
canvas = self.plotter.plot_graph(self.frame4, self.frame5, Row=0, Col=0)
def main():
root = Tk()
mainApp = StartPage(root)
root.attributes('-fullscreen', True)
# root.geometry("500x500")
root.mainloop()
if __name__ == '__main__':
main()
| 2.171875 | 2 |
cats/cats.py | BrandtH22/CAT-admin-tool | 1 | 16299 | import click
import aiohttp
import asyncio
import re
import json
from typing import Optional, Tuple, Iterable, Union, List
from blspy import G2Element, AugSchemeMPL
from chia.cmds.wallet_funcs import get_wallet
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.config import load_config
from chia.util.ints import uint16
from chia.util.byte_types import hexstr_to_bytes
from chia.types.blockchain_format.program import Program
from clvm_tools.clvmc import compile_clvm_text
from clvm_tools.binutils import assemble
from chia.types.spend_bundle import SpendBundle
from chia.wallet.cc_wallet.cc_utils import (
construct_cc_puzzle,
CC_MOD,
SpendableCC,
unsigned_spend_bundle_for_spendable_ccs,
)
from chia.util.bech32m import decode_puzzle_hash
# Loading the client requires the standard chia root directory configuration that all of the chia commands rely on
async def get_client() -> Optional[WalletRpcClient]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
full_node_rpc_port = config["wallet"]["rpc_port"]
full_node_client = await WalletRpcClient.create(
self_hostname, uint16(full_node_rpc_port), DEFAULT_ROOT_PATH, config
)
return full_node_client
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if full node is running at {full_node_rpc_port}"
)
else:
print(f"Exception from 'harvester' {e}")
return None
async def get_signed_tx(fingerprint, ph, amt, fee):
try:
wallet_client: WalletRpcClient = await get_client()
wallet_client_f, _ = await get_wallet(wallet_client, fingerprint)
return await wallet_client.create_signed_transaction(
[{"puzzle_hash": ph, "amount": amt}], fee=fee
)
finally:
wallet_client.close()
await wallet_client.await_closed()
# The clvm loaders in this library automatically search for includable files in the directory './include'
def append_include(search_paths: Iterable[str]) -> List[str]:
if search_paths:
search_list = list(search_paths)
search_list.append("./include")
return search_list
else:
return ["./include"]
def parse_program(program: Union[str, Program], include: Iterable = []) -> Program:
if isinstance(program, Program):
return program
else:
if "(" in program: # If it's raw clvm
prog = Program.to(assemble(program))
elif "." not in program: # If it's a byte string
prog = Program.from_bytes(hexstr_to_bytes(program))
else: # If it's a file
with open(program, "r") as file:
filestring: str = file.read()
if "(" in filestring: # If it's not compiled
# TODO: This should probably be more robust
if re.compile(r"\(mod\s").search(filestring): # If it's Chialisp
prog = Program.to(
compile_clvm_text(filestring, append_include(include))
)
else: # If it's CLVM
prog = Program.to(assemble(filestring))
else: # If it's serialized CLVM
prog = Program.from_bytes(hexstr_to_bytes(filestring))
return prog
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.command()
@click.pass_context
@click.option(
"-l",
"--tail",
required=True,
help="The TAIL program to launch this CAT with",
)
@click.option(
"-c",
"--curry",
multiple=True,
help="An argument to curry into the TAIL",
)
@click.option(
"-s",
"--solution",
required=True,
default="()",
show_default=True,
help="The solution to the TAIL program",
)
@click.option(
"-t",
"--send-to",
required=True,
help="The address these CATs will appear at once they are issued",
)
@click.option(
"-a",
"--amount",
required=True,
type=int,
help="The amount to issue in mojos (regular XCH will be used to fund this)",
)
@click.option(
"-m",
"--fee",
required=True,
default=0,
show_default=True,
help="The XCH fee to use for this issuance",
)
@click.option(
"-f",
"--fingerprint",
type=int,
help="The wallet fingerprint to use as funds",
)
@click.option(
"-sig",
"--signature",
multiple=True,
help="A signature to aggregate with the transaction",
)
@click.option(
"-as",
"--spend",
multiple=True,
help="An additional spend to aggregate with the transaction",
)
@click.option(
"-b",
"--as-bytes",
is_flag=True,
help="Output the spend bundle as a sequence of bytes instead of JSON",
)
@click.option(
"-sc",
"--select-coin",
is_flag=True,
help="Stop the process once a coin from the wallet has been selected and return the coin",
)
def cli(
ctx: click.Context,
tail: str,
curry: Tuple[str],
solution: str,
send_to: str,
amount: int,
fee: int,
fingerprint: int,
signature: Tuple[str],
spend: Tuple[str],
as_bytes: bool,
select_coin: bool,
):
ctx.ensure_object(dict)
tail = parse_program(tail)
curried_args = [assemble(arg) for arg in curry]
solution = parse_program(solution)
address = decode_puzzle_hash(send_to)
aggregated_signature = G2Element()
for sig in signature:
aggregated_signature = AugSchemeMPL.aggregate(
[aggregated_signature, G2Element.from_bytes(hexstr_to_bytes(sig))]
)
aggregated_spend = SpendBundle([], G2Element())
for bundle in spend:
aggregated_spend = SpendBundle.aggregate(
[aggregated_spend, SpendBundle.from_bytes(hexstr_to_bytes(bundle))]
)
# Construct the TAIL
if len(curried_args) > 0:
curried_tail = tail.curry(*curried_args)
else:
curried_tail = tail
# Construct the intermediate puzzle
p2_puzzle = Program.to(
(1, [[51, 0, -113, curried_tail, solution], [51, address, amount, [address]]])
)
# Wrap the intermediate puzzle in a CAT wrapper
cat_puzzle = construct_cc_puzzle(CC_MOD, curried_tail.get_tree_hash(), p2_puzzle)
cat_ph = cat_puzzle.get_tree_hash()
# Get a signed transaction from the wallet
signed_tx = asyncio.get_event_loop().run_until_complete(
get_signed_tx(fingerprint, cat_ph, amount, fee)
)
eve_coin = list(
filter(lambda c: c.puzzle_hash == cat_ph, signed_tx.spend_bundle.additions())
)[0]
# This is where we exit if we're only looking for the selected coin
if select_coin:
primary_coin = list(
filter(lambda c: c.name() == eve_coin.parent_coin_info, signed_tx.spend_bundle.removals())
)[0]
print(json.dumps(primary_coin.to_json_dict(), sort_keys=True, indent=4))
print(f"Name: {primary_coin.name()}")
return
# Create the CAT spend
spendable_eve = SpendableCC(
eve_coin,
curried_tail.get_tree_hash(),
p2_puzzle,
Program.to([]),
limitations_solution=solution,
limitations_program_reveal=curried_tail,
)
eve_spend = unsigned_spend_bundle_for_spendable_ccs(CC_MOD, [spendable_eve])
# Aggregate everything together
final_bundle = SpendBundle.aggregate(
[
signed_tx.spend_bundle,
eve_spend,
aggregated_spend,
SpendBundle([], aggregated_signature),
]
)
if as_bytes:
final_bundle = bytes(final_bundle).hex()
else:
final_bundle = json.dumps(final_bundle.to_json_dict(), sort_keys=True, indent=4)
print(f"Asset ID: {curried_tail.get_tree_hash()}")
print(f"Spend Bundle: {final_bundle}")
def main():
cli()
if __name__ == "__main__":
main()
| 1.796875 | 2 |