text stringlengths 38 1.54M |
|---|
import numpy as np
from numpy import linalg as LA
import pandas as pd
# mean
def mean(x): return np.round(x.mean(axis=0),3)
# std
def std(x): return np.round(x.std(axis=0),3)
# covarian-matrix
def cov_matrix(x):
fact = x.shape[0] - 1
return np.round(np.dot((x-mean(x)).T,(x-std(x)))*(1/fact),3)
# multivariate normal distribution
def multi_distribution(X,cov,mean):
const = ((2*np.pi)**(cov.shape[1]/2))
cov_norm = LA.norm(cov)**(0.5)
exp = np.array(list(map(lambda x: np.exp(-0.5*np.dot(np.dot((x-mean),LA.inv(cov)),(x-mean).T)),X)))
return ((1/(const*cov_norm))*exp)
# cross_validations
def cross_validations_split(shape,folds):
fold_size = int(shape * folds/100)
k = 0
index = []
for i in range(1,folds+1):
index.append([k,i*fold_size]) if (i < folds) else index.append([k,shape])
k = i*fold_size
return index
# probability of Wi
def prob_of_p(n,N):
return n/N
# for 2 classes
def bayes_rules(f1,f2,p1,p2):
likelihood_ratio = f1/f2
threshold = p2/p1
decision_matrix = (likelihood_ratio > threshold)
return np.where(decision_matrix,np.float64(1),np.float64(2)).reshape(-1)
# confusion matrix
def confusion_matrix(y_pred,y_true,err = False):
if y_true.shape != y_pred.shape : return
def _condition(y_pred,y_true):
if y_pred == y_true and y_true == 1:
return "TN"
elif y_pred != y_true and y_true == 2:
return "FP"
elif y_pred != y_true and y_true == 1:
return "FN"
return "TP"
matrix = np.array([[0, 0], [0, 0]])
for i in range(y_true.shape[0]):
result = _condition(y_pred[i],y_true[i])
if result == "TN":
matrix[0][0] += 1
elif result == "FN":
matrix[0][1] += 1
elif result == "FP":
matrix[1][0] += 1
else:
matrix[1][1] += 1
if err:
return matrix,100-(matrix[0][0]+matrix[1][1])*100/y_true.shape[0]
return matrix
def preprocess_data(data,i,j):
population = np.concatenate((data[:i],data[j:]))
samples = data[i:j]
x_class1 = population[population[:,-1] == 1][:,:-1]
x_class2 = population[population[:,-1] == 2][:,:-1]
# calculate P(Wi)
p1 = prob_of_p(population[population[:,-1] == 1][:,:-1].shape[0],population.shape[0])
p2 = prob_of_p(population[population[:,-1] == 2][:,:-1].shape[0],population.shape[0])
# calculate COV(Wi)
cov_1 = cov_matrix(x_class1)
cov_2 = cov_matrix(x_class2)
# calculate mean(Wi)
mean_1 = mean(x_class1)
mean_2 = mean(x_class2)
pre_data = {
'population' : population,
'x_sample' : samples[:,:-1],
'x_class1' : x_class1, # separate the data to class 1
'x_class2' : x_class2, # separate the data to class 2
'p1': p1,
'p2': p2,
'y_sample': samples[:,-1],
'cov1': cov_1,
'cov2': cov_2,
'mean1': mean_1,
'mean2': mean_2,
}
return pre_data
# main
if __name__ == "__main__":
# split features and classes to two classes
data = np.genfromtxt('TWOCLASS.csv',delimiter=',')[1:,:]
np.random.shuffle(data) # shuffle data
k = 1
for i,j in cross_validations_split(data.shape[0],10):
# * --------------- preprocess data ---------------
x1 = preprocess_data(data,i,j) # for test 1
x2 = preprocess_data(data[:,[0,1,-1]],i,j) # for test 2
# calculate multivariate normal distribution test 1
fx1_1 = multi_distribution(x1['x_sample'],x1['cov1'],x1['mean1'])
fx1_2 = multi_distribution(x1['x_sample'],x1['cov2'],x1['mean2'])
# calculate multivariate normal distribution test 2
fx2_1 = multi_distribution(x2['x_sample'],x2['cov1'],x2['mean1'])
fx2_2 = multi_distribution(x2['x_sample'],x2['cov2'],x2['mean2'])
# evaluate test 1
y_pred1 = bayes_rules(fx1_1,fx1_2,x1['p1'],x1['p2'])
y_true1 = x1['y_sample']
# evaluate test 1
y_pred2 = bayes_rules(fx2_1,fx2_2,x2['p1'],x2['p2'])
y_true2 = x2['y_sample']
print("############### K=", k ," #################")
k+=1
|
def main():
a = 1
b = a * a
return b
def hello():
return 'hello123'
def add(a, b):
return a + b
def process(kw):
return str(kw['name']) + str(kw['value'])
# if __name__ == '__main__':
# kw = {'name': 'liuyang', 'value': 123}
# print(process(**kw))
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Counters for each IGMP protocol version.
"""
__slots__ = ('_path_helper', '_extmethods', '__v1','__v2','__v3',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__v1 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__v2 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__v3 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'igmp', 'interfaces', 'interface', 'counters', 'reports', 'state']
def _get_v1(self):
"""
Getter method for v1, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v1 (uint32)
YANG Description: IGMP v1.
"""
return self.__v1
def _set_v1(self, v, load=False):
"""
Setter method for v1, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v1 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v1() directly.
YANG Description: IGMP v1.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v1 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v1 = t
if hasattr(self, '_set'):
self._set()
def _unset_v1(self):
self.__v1 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_v2(self):
"""
Getter method for v2, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v2 (uint32)
YANG Description: IGMP v2.
"""
return self.__v2
def _set_v2(self, v, load=False):
"""
Setter method for v2, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v2 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v2() directly.
YANG Description: IGMP v2.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v2 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v2 = t
if hasattr(self, '_set'):
self._set()
def _unset_v2(self):
self.__v2 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_v3(self):
"""
Getter method for v3, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v3 (uint32)
YANG Description: IGMP v3.
"""
return self.__v3
def _set_v3(self, v, load=False):
"""
Setter method for v3, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v3 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v3 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v3() directly.
YANG Description: IGMP v3.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v3 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v3 = t
if hasattr(self, '_set'):
self._set()
def _unset_v3(self):
self.__v3 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
v1 = __builtin__.property(_get_v1)
v2 = __builtin__.property(_get_v2)
v3 = __builtin__.property(_get_v3)
_pyangbind_elements = OrderedDict([('v1', v1), ('v2', v2), ('v3', v3), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Counters for each IGMP protocol version.
"""
__slots__ = ('_path_helper', '_extmethods', '__v1','__v2','__v3',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__v1 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__v2 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__v3 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'igmp', 'interfaces', 'interface', 'counters', 'reports', 'state']
def _get_v1(self):
"""
Getter method for v1, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v1 (uint32)
YANG Description: IGMP v1.
"""
return self.__v1
def _set_v1(self, v, load=False):
"""
Setter method for v1, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v1 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v1() directly.
YANG Description: IGMP v1.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v1 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v1 = t
if hasattr(self, '_set'):
self._set()
def _unset_v1(self):
self.__v1 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_v2(self):
"""
Getter method for v2, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v2 (uint32)
YANG Description: IGMP v2.
"""
return self.__v2
def _set_v2(self, v, load=False):
"""
Setter method for v2, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v2 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v2() directly.
YANG Description: IGMP v2.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v2 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v2 = t
if hasattr(self, '_set'):
self._set()
def _unset_v2(self):
self.__v2 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_v3(self):
"""
Getter method for v3, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v3 (uint32)
YANG Description: IGMP v3.
"""
return self.__v3
def _set_v3(self, v, load=False):
"""
Setter method for v3, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v3 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v3 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v3() directly.
YANG Description: IGMP v3.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v3 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v3 = t
if hasattr(self, '_set'):
self._set()
def _unset_v3(self):
self.__v3 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
v1 = __builtin__.property(_get_v1)
v2 = __builtin__.property(_get_v2)
v3 = __builtin__.property(_get_v3)
_pyangbind_elements = OrderedDict([('v1', v1), ('v2', v2), ('v3', v3), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Counters for each IGMP protocol version.
"""
__slots__ = ('_path_helper', '_extmethods', '__v1','__v2','__v3',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__v1 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__v2 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__v3 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'igmp', 'interfaces', 'interface', 'counters', 'reports', 'state']
def _get_v1(self):
"""
Getter method for v1, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v1 (uint32)
YANG Description: IGMP v1.
"""
return self.__v1
def _set_v1(self, v, load=False):
"""
Setter method for v1, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v1 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v1() directly.
YANG Description: IGMP v1.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v1 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v1 = t
if hasattr(self, '_set'):
self._set()
def _unset_v1(self):
self.__v1 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_v2(self):
"""
Getter method for v2, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v2 (uint32)
YANG Description: IGMP v2.
"""
return self.__v2
def _set_v2(self, v, load=False):
"""
Setter method for v2, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v2 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v2() directly.
YANG Description: IGMP v2.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v2 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v2 = t
if hasattr(self, '_set'):
self._set()
def _unset_v2(self):
self.__v2 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_v3(self):
"""
Getter method for v3, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v3 (uint32)
YANG Description: IGMP v3.
"""
return self.__v3
def _set_v3(self, v, load=False):
"""
Setter method for v3, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v3 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v3 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v3() directly.
YANG Description: IGMP v3.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v3 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v3 = t
if hasattr(self, '_set'):
self._set()
def _unset_v3(self):
self.__v3 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
v1 = __builtin__.property(_get_v1)
v2 = __builtin__.property(_get_v2)
v3 = __builtin__.property(_get_v3)
_pyangbind_elements = OrderedDict([('v1', v1), ('v2', v2), ('v3', v3), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Counters for each IGMP protocol version.
"""
__slots__ = ('_path_helper', '_extmethods', '__v1','__v2','__v3',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__v1 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__v2 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
self.__v3 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'igmp', 'interfaces', 'interface', 'counters', 'reports', 'state']
def _get_v1(self):
"""
Getter method for v1, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v1 (uint32)
YANG Description: IGMP v1.
"""
return self.__v1
def _set_v1(self, v, load=False):
"""
Setter method for v1, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v1 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v1() directly.
YANG Description: IGMP v1.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v1 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v1 = t
if hasattr(self, '_set'):
self._set()
def _unset_v1(self):
self.__v1 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_v2(self):
"""
Getter method for v2, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v2 (uint32)
YANG Description: IGMP v2.
"""
return self.__v2
def _set_v2(self, v, load=False):
"""
Setter method for v2, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v2 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v2() directly.
YANG Description: IGMP v2.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v2 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v2 = t
if hasattr(self, '_set'):
self._set()
def _unset_v2(self):
self.__v2 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v2", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
def _get_v3(self):
"""
Getter method for v3, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v3 (uint32)
YANG Description: IGMP v3.
"""
return self.__v3
def _set_v3(self, v, load=False):
"""
Setter method for v3, mapped from YANG variable /network_instances/network_instance/protocols/protocol/igmp/interfaces/interface/counters/reports/state/v3 (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_v3 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_v3() directly.
YANG Description: IGMP v3.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """v3 must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
})
self.__v3 = t
if hasattr(self, '_set'):
self._set()
def _unset_v3(self):
self.__v3 = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="v3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)
v1 = __builtin__.property(_get_v1)
v2 = __builtin__.property(_get_v2)
v3 = __builtin__.property(_get_v3)
_pyangbind_elements = OrderedDict([('v1', v1), ('v2', v2), ('v3', v3), ])
|
# -*- coding: utf-8 -*-
"""
Created on 2020/5/4 7:59
@author: dct
"""
import requests
from lxml import etree
import re
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36',
'Referer': 'https://movie.douban.com/top250?start=0&filter='}
# ่ทๅๆฏไธช้กต้ข็ตๅฝฑ็็ฝๅ
def getMovieUrls(baseUrl):
try:
response = requests.get(baseUrl, headers = header,timeout=10)
text = response.text
# print(text)
html = etree.HTML(text)
except requests.exceptions.RequestException as e:
print(e)
try:
movies_urls = html.xpath('//ol[@class="grid_view"]//a/@href')
except:
print('่ทๅ็ตๅฝฑurlๅคฑ่ดฅ')
return movies_urls
def getDetail(url):
try:
response = requests.get(url, headers = header,timeout=10)
text = response.text
html = etree.HTML(text)
except requests.exceptions.RequestException as e:
print(e)
try:
movie_name = html.xpath('//div[@id="content"]//h1/span/text()')
movie_sorted = html.xpath('//div[@id="content"]/div[@class ="top250"]/span/text()')
movies_infos = html.xpath('//div[@id="info"]//text()')
except:
print('่ทๅ็ตๅฝฑ่ฏฆๆ
ๅคฑ่ดฅ')
return False
movie = {}
p = re.compile(r'[/:]') # ๅฐ/:ๆฟๆขไธบ็ฉบๆ ผ
movies_infos = [re.sub(p,'',movies_info).strip() for movies_info in movies_infos]
movies_infos = [m for m in movies_infos if m != '']
movie['movie_name'] = movie_name
movie['movie_sorted'] = movie_sorted
movie['movie_name'] = movie_name
# print(movies_infos)
for index,movies_info in enumerate(movies_infos):
if movies_info == '็้ฟ':
movie['็้ฟ'] = movies_infos[index + 1]
elif movies_info == '่ฏญ่จ':
movie['่ฏญ่จ'] = movies_infos[index + 1]
return movie
def spiderUrls():
baseUrl = 'https://movie.douban.com/top250?start={}&filter='
num = 1
for i in range(0,251,25):
baseUrl.format(i)
moveUrls = getMovieUrls(baseUrl)
for moveUrl in moveUrls:
movie = getDetail(moveUrl)
if movie:
print('็ฌฌ{}็ตๅฝฑไฟกๆฏ'.format(num))
else:
print("่ทๅ็ตๅฝฑๅคฑ่ดฅ")
num += 1
print(movie)
if __name__ == '__main__':
spiderUrls() |
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas
import plotly.express as px
app = dash.Dash(__name__)
data_frame = pandas.DataFrame({
"Day": ["Mon", "Tue", "Wed", "Thur", "Fri", "Sat", "Sun", "Mon", "Tue", "Wed", "Thur", "Fri", "Sat", "Sun"],
"Value": [9.5, 8.7, 8.8, 8.5, 10.0, 10.3, 9.6, 40.0, 40.0, 39.0, 39.0, 42.0, 41.0, 37.0],
#"Value": [9, 8, 8, 8, 10, 10, 9, 40, 40, 39, 39, 42, 41, 37],
"Measure": ["HR", "HR", "HR", "HR", "HR", "HR", "HR", "HRV", "HRV", "HRV", "HRV", "HRV", "HRV", "HRV"]
})
fig = px.bar(data_frame, x="Day", y="Value", color="Measure", barmode="group")
app.layout = html.Div(children=[
html.H1(children='Hello My First Dash'),
html.Div(children='''
Dash: Tracking My HR/HRV Data.
'''),
dash.dcc.Graph(
id='example-graph',
figure=fig
)
])
if __name__ == '__main__':
app.run_server(debug=True)
|
import numpy as np
from easydict import EasyDict
para = EasyDict()
# 6: cos, sin, x, y, w, l
# 5: r, x, y, w, l
para.box_code_len = 6
if para.box_code_len == 6:
para.target_mean = np.array([0.799, -0.053, 0.194, 0.192, 0.487, 1.37], dtype=np.float32)
para.target_std_dev = np.array([0.325, 0.504, 0.537, 0.389, 0.064, 0.109], dtype=np.float32)
elif para.box_code_len == 5:
para.target_mean = np.array([0.0, 0.194, 0.192, 0.487, 1.37 ], dtype=np.float32)
para.target_std_dev = np.array([1.0, 0.537, 0.389, 0.064, 0.109], dtype=np.float32)
else:
raise NotImplementedError
para.sin_angle_loss = False
if para.sin_angle_loss:
assert para.box_code_len == 5
para.corner_loss = False
para.corner_loss_start = 100
para.estimate_zh = True
if para.estimate_zh: # append [z,h]
para.box_code_len += 2 # 8, 7
para.target_mean = np.resize(para.target_mean, para.box_code_len)
para.target_std_dev = np.resize(para.target_std_dev, para.box_code_len)
para.target_mean[-2:] = np.array([-0.74, 0.456], dtype=np.float32)
para.target_std_dev[-2:] = np.array([0.35, 0.085], dtype=np.float32)
para.L1 = -40.0
para.L2 = 40.0
para.W1 = 0.0
para.W2 = 70.4
para.H1 = -3
para.H2 = 1.0
para.dense_net = False
def use_dense_net(sel):
para.dense_net = sel
if para.dense_net:
para.grid_sizeLW = 0.1
para.grid_sizeH = 0.1
para.ratio = 4
para.input_shape = (800, 704)
# PIXOR or PIXOR_RFB
para.net = 'PIXOR'
# 'rgb', 'pixor', 'pixor-rgb', 'voxel'
para.channel_type = 'rgb'
para.batch_size = 4
else:
para.grid_sizeLW = 0.1
para.grid_sizeH = 0.1
if para.grid_sizeLW == 0.05:
para.ratio = 8
para.input_shape = (1600, 1408)
para.full_shape = np.array([1408, 1600, 40])
para.batch_size = 6
elif para.grid_sizeLW == 0.1:
para.ratio = 4
para.input_shape = (800, 704)
para.full_shape = np.array([704, 800, 40])
para.batch_size = 6
para.net = 'PIXOR_SPARSE'
para.channel_type = 'sparse'
# 2(z,i), 4(x,y,z,i), 32(sift)
para.voxel_feature_len = 4
para.centroid_voxel_feature = False
use_dense_net(para.dense_net)
para.rpn_ver = 'v2'
para.sparse_res_middle_net = False
para.resnet_version = 'v1'
para.sparse_inception_middle_net = False
para.label_shape = (200, 176)
para.estimate_dir = True
para.label_channels = 1 + para.box_code_len
if para.estimate_dir:
para.label_channels += 1
if para.channel_type == 'rgb':
para.input_channels = 3
if para.channel_type == 'pixor':
para.input_channels = int((para.H2 - para.H1) / para.grid_sizeH + 1)
if para.channel_type == 'pixor-rgb':
para.input_channels = int(3 + (para.H2 - para.H1) / para.grid_sizeH + 1)
if para.channel_type == 'voxel':
para.input_channels = int((para.H2 - para.H1) / para.grid_sizeH)
if para.channel_type == 'sparse':
para.input_channels = -1
para.object_list = ['Car']
para.collision_object_list = ['Car', 'Van', 'Truck', 'Pedestrian', 'Cyclist']
para.box_in_labelmap_ratio = 0.6
para.use_labelmap_mask = False
if para.use_labelmap_mask:
para.box_in_labelmap_ratio = 0.5
para.box_in_labelmap_mask_ratio = 1.1
para.use_se_mod = False
para.align_pc_with_img = False
para.img_shape = (375, 1242)
para.crop_pc_by_fov = True
para.augment_data_use_db = True
para.augment_data_by_flip = False
para.filter_sampled_by_ground = True
para.move_sampled_to_road_plane = True
para.augment_max_samples = 15
para.remove_points_after_sample = False
para.filter_bad_targets = True
para.minimum_target_points = 8 |
from PyQt4 import QtGui
from PyQt4 import QtCore
from AlignmentTableAbstractModel import TableAbstractModel as alignment_model
from InputDialogWidget import InputDialog as input_dialog
import random
import sys
class AlignmentWindow(QtGui.QMainWindow):
def __init__(self, sequences):
super(AlignmentWindow, self).__init__()
self.setGeometry(50, 50, 700, 400)
self.setWindowTitle('Alignment Analysis')
self.move(QtGui.QApplication.desktop().screen().rect().center()- self.rect().center())
self.alignment = sequences
self.add_main_menu()
self.add_toolbar()
self.alignment_window_view()
def add_main_menu(self):
mainMenu = self.menuBar()
# file submenu
fileMenu = mainMenu.addMenu('&File')
quitAction = QtGui.QAction("&Quit", self)
quitAction.setShortcut("Ctrl+Q")
quitAction.setStatusTip("Leave the application")
quitAction.triggered.connect(self.close_application)
fileMenu.addAction(quitAction)
def add_toolbar(self):
changeMultipleAction = QtGui.QAction('Change nucleotides', self)
changeMultipleAction.triggered.connect(self.change_multiple)
self.toolBar = self.addToolBar('Alingment correction')
self.toolBar.addAction(changeMultipleAction)
addSequence = QtGui.QAction('Add sequence', self)
addSequence.triggered.connect(self.add_sequence)
self.toolBar.addAction(addSequence)
deleteSequence = QtGui.QAction('Delete sequence', self)
deleteSequence.triggered.connect(self.delete_sequence)
self.toolBar.addAction(deleteSequence)
realignSection = QtGui.QAction('Realign selection', self)
realignSection.triggered.connect(self.realign_selection)
self.toolBar.addAction(realignSection)
reverseComplement = QtGui.QAction('Reverse complement', self)
reverseComplement.triggered.connect(self.reverse_complement)
self.toolBar.addAction(reverseComplement)
moveUp = QtGui.QAction('Move up', self)
moveUp.triggered.connect(self.move_up)
self.toolBar.addAction(moveUp)
moveDown = QtGui.QAction('Move down', self)
moveDown.triggered.connect(self.move_down)
self.toolBar.addAction(moveDown)
# testPopup = QtGui.QAction('Popup', self)
# testPopup.triggered.connect(self.pop_up_editor)
#
# self.toolBar.addAction(testPopup)
def change_multiple(self):
nucleotide, ok = QtGui.QInputDialog.getText(self, 'Nucleotide substitution', 'Enter nucleotide')
if ok:
row = self.alignment_table.selectedIndexes()
for _row in row:
self.alignment_table.model().set_item(_row, str(nucleotide))
def add_sequence(self):
seq, ok = input_dialog.getDateTime()
#test case - AGGGGGGGGGGGG
if ok:
self.table_data.append(str(seq))
self.alignment_table.model().layoutChanged.emit()
def delete_sequence(self):
row = self.alignment_table.selectedIndexes()
for _row in row:
del self.table_data[_row.row()]
self.alignment_table.model().layoutChanged.emit()
def realign_selection(self):
print(1)
def rev_comp(self, sequence):
reverse_alphabet = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'N': 'N',
'-': '-'
}
rev_comp_sequence = ''
for sym in sequence[::-1]:
rev_comp_sequence += reverse_alphabet[sym]
return rev_comp_sequence
def reverse_complement(self):
rows = self.alignment_table.selectedIndexes()
selected_rows = set([_row.row() for _row in rows])
if len(selected_rows) != 0:
for _row in selected_rows:
self.table_data[_row] = list(self.rev_comp(self.table_data[_row]))
else:
for i in range(len(self.table_data)):
self.table_data[i] = list(self.rev_comp(self.table_data[i]))
self.alignment_table.model().layoutChanged.emit()
def move_up(self):
# rows = self.alignment_table.selectedIndexes()
# selected_rows = set([_row.row() for _row in rows])
#
# for _row in selected_rows:
# if _row != 0 and _row != len(self.table_data) - 1:
# self.table_data[_row], self.table_data[_row - 1] = self.table_data[_row - 1], self.table_data[_row]
print(1)
# self.alignment_table.model().layoutChanged.emit()
def move_down(self):
print(2)
def close_application(self):
choice = QtGui.QMessageBox.question(self, 'Quit', 'Do you sure want to exit?', QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
sys.exit()
else:
pass
def alignment_window_view(self):
self.get_table_data(self.alignment)
self.alignment_table = self.create_table()
self.setCentralWidget(self.alignment_table)
self.show()
def get_table_data(self, data):
self.table_data = [list(seq) for seq in data]
def create_table(self):
tv = QtGui.QTableView()
hor_header = ['{}'.format(i) for i in range(len(self.table_data[0]))]
vert_header = ['Sequence number {}'.format(i) for i in range(len(self.table_data) - 1)]
vert_header.append('CONSENSUS SEQUENCE')
table_model = alignment_model(self.table_data, hor_header, vert_header, self)
tv.setModel(table_model)
tv.setShowGrid(False)
vh = tv.verticalHeader()
vh.setVisible(True)
hh = tv.horizontalHeader()
hh.setStretchLastSection(False)
tv.resizeColumnsToContents()
tv.resizeRowsToContents()
tv.setSortingEnabled(False)
return tv
|
# -*-coding:utf-8-*-
import codecs
import csv
import os
import pandas as pd
import util.common_util as my_util
from collections import Counter
# ่ฏๆฎๅ็งฐๅ่กจ
evidence_list = list()
# ็ฌๅฝๆญฃๆๅญๅ
ธ ๆไปถๅ:ๅ
ๅฎน
content_dict = dict()
# ็ฌๅฝไธญไธพ่ฏ่ดจ่ฏๆๆฌ ๆไปถๅ๏ผๅ
ๅฎน
train_evidence_paragraph_dict = dict()
dev_evidence_paragraph_dict = dict()
test_evidence_paragraph_dict = dict()
# ็ฌๅฝไธญๅญๅจ็่ฏๆฎๅฏนๅบๅ
ณ็ณป ๆไปถๅ:[ไธพ่ฏๆน Evidence(E) ,่ฏๆฎๅ็งฐ Trigger(T) ,่ฏๅฎๅ
ๅฎน Content(C), ่ดจ่ฏๆ่ง View(V),่ดจ่ฏๆน Anti-Evidence(A)]
tag_dic = dict()
other_count, evidence_count, view_count = 0, 0, 0
def analyse_cl_data():
global other_count, evidence_count, view_count
analyse_data_excel_content()
length = len(content_dict.values())
train_content_keys = sorted(content_dict)[:int(length * 0.8)]
dev_content_keys = sorted(content_dict)[int(length * 0.8):int(length * 0.9)]
test_content_keys = sorted(content_dict)[int(length * 0.9):]
train_content, dev_content, test_content = {}, {}, {}
for key in train_content_keys:
train_content[key] = content_dict[key]
for key in dev_content_keys:
dev_content[key] = content_dict[key]
for key in test_content_keys:
test_content[key] = content_dict[key]
analyse_data_excel_tags()
extract_evidence_paragraph(train_content, "train")
extract_evidence_paragraph(dev_content, "dev")
extract_evidence_paragraph(test_content, "test")
# analyse_dir_document()
create_cl_data(train_evidence_paragraph_dict, "train")
create_cl_data(dev_evidence_paragraph_dict, "train")
print("\ntrain other_count:%s,evidence_count:%s,view_count:%s," % (other_count, evidence_count, view_count))
other_count, evidence_count, view_count = 0, 0, 0
create_cl_data(test_evidence_paragraph_dict, "test")
print("\ntest other_count:%s,evidence_count:%s,view_count:%s," % (other_count, evidence_count, view_count))
def analyse_ner_data():
analyse_data_excel_content()
length = len(content_dict.values())
train_content_keys = sorted(content_dict)[:int(length * 0.8)]
dev_content_keys = sorted(content_dict)[int(length * 0.8):int(length * 0.9)]
test_content_keys = sorted(content_dict)[int(length * 0.9):]
train_content, dev_content, test_content = {}, {}, {}
for key in train_content_keys:
train_content[key] = content_dict[key]
for key in dev_content_keys:
dev_content[key] = content_dict[key]
for key in test_content_keys:
test_content[key] = content_dict[key]
analyse_data_excel_tags()
extract_evidence_paragraph(train_content, "train")
extract_evidence_paragraph(dev_content, "dev")
extract_evidence_paragraph(test_content, "test")
# analyse_dir_document()
create_ner_data(train_evidence_paragraph_dict, "ner_train")
create_ner_data(dev_evidence_paragraph_dict, "ner_dev")
create_ner_data(test_evidence_paragraph_dict, "ner_test")
# ไปexcelไธญๅ ่ฝฝๆฐๆฎ
def analyse_data_excel_content(title=None, content=None):
if title is None and content is None:
rows = pd.read_excel("./raw_data/ๆไนฆๅ
ๅฎน.xls", sheet_name=0, header=0)
for title, content in rows.values:
title = my_util.format_brackets(title.strip())
# print(title)
analyse_data_excel_content(title, content)
else:
old_paragraphs = [paragraph for paragraph in my_util.split_paragraph(content)
if paragraph is not None and len(paragraph.strip()) > 0]
new_paragraphs = list()
new_paragraph = ""
# ๅๅนถๅ่จไบบๆฎต่ฝ
for index, paragraph in enumerate(old_paragraphs):
# print("%s:%s" % (title, index))
if my_util.check_paragraph(paragraph):
if new_paragraph is not None and len(new_paragraph) > 0:
if '\u4e00' <= paragraph[-1] <= '\u9fff':
paragraph += "ใ"
new_paragraphs.append(new_paragraph)
new_paragraph = paragraph
else:
if '\u4e00' <= paragraph[-1] <= '\u9fff':
paragraph += "ใ"
new_paragraph = new_paragraph + paragraph
content_dict[title] = [
[my_util.clean_text(sentence) for sentence in paragraph.split("ใ")
if sentence is not None and len(sentence.strip()) > 0]
for paragraph in new_paragraphs]
return content_dict[title]
def extract_single_sentence_from_paragraph(paragraph):
sentences = []
if my_util.is_nan(paragraph):
return sentences
for sentence in paragraph.split("ใ"):
if sentence is not None and len(sentence.strip()) > 0:
for sen in sentence.split("๏ผ"):
if sen is not None and len(sen.strip()) > 0:
sentences.append(my_util.clean_text(sen))
return sentences
# ไธพ่ฏๆน Evidence(E) ่ฏๆฎๅ็งฐ Trigger(T) ่ฏๅฎๅ
ๅฎน Content(C) ่ดจ่ฏๆ่ง Opinion(O) ่ดจ่ฏๆน Anti-Evidence(A)
def analyse_data_excel_tags():
rows = pd.read_excel("./raw_data/่ฏๆฎๅ
ณ็ณปๅฏนๅบ.xls", sheet_name=0, header=0)
for title, E, T, C, V, A in rows.values:
title = my_util.clean_text(title)
E = my_util.clean_text(E)
A = my_util.clean_text(A)
title = my_util.format_brackets(title)
# print("tag_title:%s" % title)
T = extract_single_sentence_from_paragraph(T)
C = extract_single_sentence_from_paragraph(C)
V = extract_single_sentence_from_paragraph(V)
if title not in tag_dic:
tag_list = list()
for t in T:
tag_list.append([E, t, C, V, A])
tag_dic[title] = tag_list
else:
for t in T:
tag_dic[title].append([E, t, C, V, A])
if t not in evidence_list:
evidence_list.append(t)
# ๆฝๅไธป่ฆไธพ่ฏ่ดจ่ฏๆฎต่ฝ
def extract_evidence_paragraph(content, type=None):
for d in content:
if d not in tag_dic:
continue
start, end = my_util.check_evidence_paragraph(content[d])
# print(
# "ๆๅ่ฏๆฎๆฎต่ฝๅฎๆใ%sใ(%s)๏ผ่ตทๅงไฝ็ฝฎ๏ผ%s,็ปๆไฝ็ฝฎ๏ผ%s\n%s\n%s" % (
# d, len(content_dict[d]), start, end, content_dict[d][start],
# content_dict[d][end - 1]))
if type == "train":
train_evidence_paragraph_dict[d] = content[d][start:end]
elif type == "dev":
dev_evidence_paragraph_dict[d] = content[d][start:end]
else:
test_evidence_paragraph_dict[d] = content[d][start:end]
def create_cl_data(evidence_paragraph_dict, type=None):
global other_count, evidence_count, view_count
text = []
for d in evidence_paragraph_dict:
if d not in tag_dic:
# print("ๆๆกฃใ%sใๆฒกๆๅฏนๅบ็ๆฐๆฎๆ ็ญพ\n" % d)
continue
evidence_content = evidence_paragraph_dict[d]
last_paragraph = None
for paragrah in evidence_content:
paragrah = "ใ".join(paragrah)
tag = ["O"] * len(paragrah)
if len(paragrah) <= 0:
continue
for [_, t, C, V, _] in tag_dic[d]:
find_t = str(paragrah).find(t)
while find_t != -1 and tag[find_t] == "O":
tag = tag[:find_t] + ["E"] * len(t) + tag[find_t + len(t):]
find_t = str(paragrah).find(t, find_t)
for c in C:
if len(c) <= 1:
continue
find_c = str(paragrah).find(c)
while find_c != -1 and tag[find_c] == "O":
tag = tag[:find_c] + ["E"] * len(c) + tag[find_c + len(c):]
find_c = str(paragrah).find(c, find_c)
for v in V:
if len(v) <= 1:
continue
find_v = str(paragrah).find(v)
while find_v != -1 and tag[find_v] == "O":
tag = tag[:find_v] + ["V"] * len(v) + tag[find_v + len(v):]
find_v = str(paragrah).find(v, find_v)
context = paragrah + "\t" + ("" if last_paragraph is None else last_paragraph)
last_paragraph = paragrah
counter = Counter(tag)
if counter["E"] > counter["V"] and counter["O"] - counter["E"] < 5:
text.append(["E", context])
evidence_count += 1
elif counter["E"] < counter["V"] and counter["O"] - counter["V"] < 5:
text.append(["V", context])
view_count += 1
else:
if type == "train" and other_count % 10 == 0:
text.append(["O", context])
else:
text.append(["O", context])
other_count += 1
with codecs.open('./data/%s.tsv' % type, "a", "utf-8") as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
for line in text:
if len(line[1]) <= 0:
continue
tsv_writer.writerow([line[0], line[1].split("\t")[0], line[1].split("\t")[1]])
def create_ner_data(evidence_paragraph_dict, type=None):
evidence_count = 0
opinion_count = 0
for d in evidence_paragraph_dict:
if d not in tag_dic:
continue
evidence_content = evidence_paragraph_dict[d]
for paragrah in evidence_content:
paragrah = "ใ".join(paragrah)
tag = ["O"] * len(paragrah)
evidence_paragraph, opinion_paragraph = False, False
for [E, t, C, O, A] in tag_dic[d]:
has_t, has_c, has_o = False, False, False
find_t = str(paragrah).find(t)
while find_t != -1 and tag[find_t] == "O":
has_t = True
tag = tag[:find_t] + ["B-T"] + ["I-T"] * (len(t) - 1) + tag[find_t + len(t):]
find_t = str(paragrah).find(t)
for c in C:
if len(c) <= 1:
continue
find_c = str(paragrah).find(c)
while find_c != -1 and tag[find_c] == "O":
has_c = True
tag = tag[:find_c] + ["B-C"] + ["I-C"] * (len(c) - 1) + tag[find_c + len(c):]
find_c = str(paragrah).find(c)
for o in O:
if len(o) <= 1:
continue
find_o = str(paragrah).find(o)
while find_o != -1 and tag[find_o] == "O":
has_o = True
tag = tag[:find_o] + ["B-O"] + ["I-O"] * (len(o) - 1) + tag[find_o + len(o):]
find_o = str(paragrah).find(o)
if len(A.strip()) > 1:
find_a = str(paragrah).find(A + "๏ผ")
if find_a != -1 and has_o and tag[find_a] == "O":
tag = tag[:find_a] + ["B-A"] + ["I-A"] * (len(A) - 1) + tag[find_a + len(A):]
opinion_paragraph = True
if len(E.strip()) > 1:
find_e = str(paragrah).find(E + "๏ผ")
if find_e != -1 and (has_t or has_c) and tag[find_e] == "O":
tag = tag[:find_e] + ["B-E"] + ["I-E"] * (len(E) - 1) + tag[find_e + len(E):]
evidence_paragraph = True
assert len(paragrah) == len(tag)
if opinion_paragraph:
for i, label in enumerate(tag):
if label not in ["O", "B-A", "I-A", "B-O", "I-O"]:
tag[i] = tag[i - 1]
opinion_count += 1
with codecs.open('./data/%s_opinion.txt' % type, "a", "utf-8") as f:
for i, word in enumerate(paragrah):
f.write("%s %s\n" % (word, tag[i]))
f.write("\n")
if evidence_paragraph:
for i, label in enumerate(tag):
if label not in ["O", "B-E", "I-E", "B-T", "I-T", "B-C", "I-C"]:
tag[i] = tag[i - 1]
evidence_count += 1
with codecs.open('./data/%s_evidence.txt' % type, "a", "utf-8") as f:
for i, word in enumerate(paragrah):
f.write("%s %s\n" % (word, tag[i]))
f.write("\n")
print("\ncount:evidence[%d],opinion[%d]" % (evidence_count, opinion_count))
def generate_data():
test_all_data_path = '.\\test_all_data'
if not os.path.exists(test_all_data_path):
os.makedirs(test_all_data_path)
analyse_data_excel_content()
length = len(content_dict.values())
test_content_keys = sorted(content_dict)[int(length * 0.9):]
for key in test_content_keys:
file_path = os.path.join(test_all_data_path, key + ".tsv")
# ๆๅๅฎๆด็ๆๆกฃ
with codecs.open(file_path, "w", "utf-8") as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
for line in content_dict[key]:
if len(line) <= 0:
continue
tsv_writer.writerow(["ใ".join(line)])
start, end = my_util.check_evidence_paragraph(content_dict[key])
evidence_file_path = os.path.join(test_all_data_path, key + "_evidence.tsv")
# ๆๅ้่ฆๆฎต่ฝ
with codecs.open(evidence_file_path, "w", "utf-8") as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
last_line = None
for line in content_dict[key][start:end]:
if len(line) <= 0:
continue
last_line = "" if last_line is None else last_line
tsv_writer.writerow(["ใ".join(line), last_line])
last_line = "ใ".join(line)
if __name__ == '__main__':
analyse_cl_data()
# generate_data()
|
from get_ohlcv_data import load_asset_dfs
from strategies.n_over_a_strat import NOverAStrategy
TEST_START_DATE = '2019-01-01'
TEST_END_DATE = '2021-01-01'
INITIAL_CASH=10000
COMMISSION_AND_SLIPPAGE = 0.01
def test_backtest_strategy():
asset_dfs = load_asset_dfs()
strategy = NOverAStrategy(asset_dfs)
strategy.backtest(TEST_START_DATE, TEST_END_DATE, INITIAL_CASH, COMMISSION_AND_SLIPPAGE) |
import json
from talentmap_api.common.serializers import PrefetchedSerializer, StaticRepresentationField
from talentmap_api.messaging.models import Notification
class NotificationSerializer(PrefetchedSerializer):
owner = StaticRepresentationField(read_only=True)
class Meta:
model = Notification
fields = "__all__"
writable_fields = ("is_read")
def to_representation(self, data):
data = super(NotificationSerializer, self).to_representation(data)
try:
data['meta'] = json.loads(data.get('meta'))
return data
except:
return {}
|
import json
import os
def config() -> dict:
file = os.path.join(os.path.dirname(__file__), 'config.json')
with open(file) as fh:
return json.load(fh)
|
import io
import json
from waitress import serve
from flask import Flask, request, render_template, make_response
from utils import extension_validation, process_data_for_output
app = Flask(__name__)
@app.route('/', methods=["POST", "GET"])
def form():
if request.method == 'POST':
f = request.files['data_file']
if not f:
context = {"error": True, "error_message": "No File"}
else:
file_check = extension_validation(f.filename)
if file_check:
stream = io.StringIO(f.stream.read().decode("utf-8-sig"), newline=None)
final_data, error = process_data_for_output(stream)
if error:
context = {"error": True, "error_message": error}
else:
context = {"output": json.dumps(final_data, indent=2), "enable_output": True}
else:
context = {"error": True, "error_message": "Incorrect format"}
else:
context = dict()
return render_template("base.html", **context)
@app.route('/sample', methods=["GET"])
def get_sample_json():
si = io.StringIO()
try:
file = open("sample.json", "r").read()
except Exception as e:
# just in case when sample.json is not available
file = '''{"reference":{"ref-temp":50,"ref-hum":50},"data":{"temp-1":{"humidity":[{"timestamp":"2007-04-05T22:12","data":45}],"thermometer":[{"timestamp":"2007-04-05T22:00","data":72.4}]},"temp-2":{"thermometer":[{"timestamp":"2007-04-05T22:12","data":69.4}]}}}'''
si.write(json.loads(json.dumps(file)))
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=sample.json"
output.headers["Content-type"] = "text/json"
return output
if __name__ == "__main__":
serve(app, host='0.0.0.0', port=5000)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
import os
from django.conf import settings
class LineServerConfig(AppConfig):
name = 'line_server'
def ready(self):
pre_process()
# one-time initialization when server is up
def pre_process():
from .models import LineText
initial_count = LineText.objects.count()
if initial_count == 0:
FILE_PATH = "{}/assets/adam_smith.txt".format(settings.BASE_DIR)
line_text_list = []
with open(FILE_PATH) as f:
for idx, line in enumerate(f):
# todo: consider compressing the line_text
# in average, after compression text size will be 1/3 of original
lt = LineText(line_num=idx, line_text=line)
line_text_list.append(lt)
# read 10000 lines and store data to db;
# in case the file is really big
if len(line_text_list) % 10000 == 0:
LineText.objects.bulk_create(line_text_list)
line_text_list = []
# store the remaining objs in list
LineText.objects.bulk_create(line_text_list)
count = LineText.objects.count()
print("{} lines of text added to db.".format(count))
else:
print("Already initialized.") |
class Solution:
def decodeString(self, s):
curstring, curnum = "", 0
stack = []
for c in s:
if c == '[':
stack.append(curnum)
stack.append(curstring)
curstring = ''
curnum = 0
elif c == ']':
curstring = stack.pop() + stack.pop() * curstring
curnum = 0
elif c.isdigit():
curnum = 10*curnum + int(c)
else:
curstring += c
return curstring
print(Solution().decodeString("2[abc]3[cd]ef")) |
# -*- coding: utf-8 -*-
import os
path = r'C:\Users\sssh\OneDrive\Desktop\ะะพะฒะฐั ะฟะฐะฟะบะฐ'
def rename_all_files(path):
for root, dirs, files in os.walk(path):
for _file in files:
if _file.endswith('.txt'):
file_name = _file.split('.')
remove_part = file_name[0][:len(file_name[0]) - 4]
name = '{}_R19.txt'.format(remove_part)
new_name = os.path.join(root, name)
old_name = os.path.join(root, _file)
os.rename(old_name, new_name)
print(name)
rename_all_files(path) |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramรจtre 'crรฉer' de la commande 'navire'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmCreer(Parametre):
"""Commande 'navire crรฉer'.
"""
def __init__(self):
"""Constructeur du paramรจtre"""
Parametre.__init__(self, "crรฉer", "create")
self.schema = "<modele_navire>"
self.aide_courte = "crรฉe un navire sur un modรจle"
self.aide_longue = \
"Crรฉe un navire sur un modรจle existant. Cette commande " \
"crรฉe un navire mais ne le place dans aucune รฉtendue d'eau."
def interpreter(self, personnage, dic_masques):
"""Interprรฉtation du paramรจtre"""
# On rรฉcupรจre le modรจle
modele = dic_masques["modele_navire"].modele
navire = importeur.navigation.creer_navire(modele)
# Gรฉnรฉration du graph
if (len(navire.salles) ** 2 - len(navire.salles)) != \
len(modele.graph):
personnage << "Gรฉnรฉration du graph du modรจle {}.".format(
modele.cle)
importeur.navigation.nav_logger.info(
"Calcul du graph du modรจle de navire {}.".format(
modele.cle))
modele.generer_graph()
importeur.navigation.nav_logger.info(
"Gรฉnรฉration du graph terminรฉe.")
personnage << "Le navire {} a bien รฉtรฉ crรฉรฉ.".format(navire.cle)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 10:05
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AccessGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True)),
('supergroup', models.BooleanField(default=False)),
('can_be_shared_with', models.BooleanField(default=True)),
('auto_share_groups', models.ManyToManyField(blank=True, related_name='_accessgroup_auto_share_groups_+', to='django_group_access.AccessGroup')),
('can_share_with', models.ManyToManyField(blank=True, to='django_group_access.AccessGroup')),
('members', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name',),
},
),
]
|
# Generated by Django 2.0.3 on 2019-11-13 09:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20191113_1155'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='img',
),
migrations.AddField(
model_name='article',
name='img_source',
field=models.ImageField(default='1.png', upload_to='media/article'),
),
]
|
def fac(x):
if x == 1:
return 1
else:
return x*fac(x-1)
def fib(n):
if n < 2:
return 1
else:
return fib(n - 1) + fib(n - 2)
fib_monster = fib(10)
def bar(x):
return 5 |
# -*- coding: utf-8 -*-
#
# Copyright 2016-2023 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer topic model
"""
def get_topic_model_options(defaults=None):
"""Adding arguments for the topic model subcommand
"""
if defaults is None:
defaults = {}
options = {
# Input fields to include in the topic model.
'--topic-fields': {
"action": 'store',
"dest": 'topic_fields',
"default": defaults.get('topic_fields', None),
"help": ("Comma-separated list of input fields"
" (predictors) to create the topic model.")},
# If a BigML topic model is provided, the script will
# use it to generate predictions
'--topic-model': {
'action': 'store',
'dest': 'topic_model',
'default': defaults.get('topic_model', None),
'help': "BigML topic model Id."},
# The path to a file containing topic model ids.
'--topic-models': {
'action': 'store',
'dest': 'topic_models',
'default': defaults.get('topic_models', None),
'help': ("Path to a file containing topicmodel/ids."
" One topicmodel"
" per line (e.g., "
"topicmodel/50a206a8035d0706dc000376"
").")},
# If a BigML json file containing a topic model
# structure is provided,
# the script will use it.
'--topic-model-file': {
'action': 'store',
'dest': 'topic_model_file',
'default': defaults.get('topic_model_file', None),
'help': "BigML topic model JSON structure file."},
# Whether to include a contiguous sequence of two items from a given
# sequence of text
'--bigrams': {
"action": 'store_true',
"dest": 'bigrams',
"default": defaults.get('bigrams', False),
"help": ("Whether to include a contiguous sequence of two items"
" from a given sequence of text.")},
# Whether the analysis is case-sensitive or not.
'--case-sensitive': {
"action": 'store_true',
"dest": 'case_sensitive',
"default": defaults.get('case_sensitive', False),
"help": "Whether the analysis is case-sensitive or not."},
# Comma separated list of tems to be excluded from term analysis.
'--excluded-terms': {
'action': 'store',
'dest': 'excluded_terms',
'default': defaults.get('excluded_terms', None),
'help': ("Comma-separated list of terms to be excluded from "
"text analysis.")},
# Number of topics to be generated
'--number-of-topics': {
'action': 'store',
'dest': 'number_of_topics',
'type': int,
'default': defaults.get('number_of_topics', None),
'help': ("Number of topics to be generated for the model.")},
# Minum number of terms to name the topic
'--minimum-name-terms': {
'action': 'store',
'dest': 'minimum_name_terms',
'type': int,
'default': defaults.get('minimum_name_terms', None),
'help': ("Number of terms to be used to name the topic.")},
# The maximum number of terms used for the topic model vocabulary
'--term-limit': {
'action': 'store',
'dest': 'term_limit',
'type': int,
'default': defaults.get('term_limit', 4096),
'help': ("The maximum number of terms used for the topic"
" model vocabulary.")},
# The size of the most influential terms recorded.
'--top-n-terms': {
'action': 'store',
'dest': 'top_n_terms',
'type': int,
'default': defaults.get('top_n_terms', 10),
'help': "The size of the most influential terms recorded."},
# Whether to use stop words.
'--use-stopwords': {
"action": 'store_true',
"dest": 'use_stopwords',
"default": defaults.get('use_stopwords', False),
"help": "Whether to use stop words."},
# Does not create a topic model just a dataset.
'--no-topic-model': {
'action': 'store_true',
'dest': 'no_topic_model',
'default': defaults.get('no_topic_model', False),
'help': "Do not create a topic model."},
# Create a topic model, not just a dataset.
'--no-no-topic-model': {
'action': 'store_false',
'dest': 'no_topic_model',
'default': defaults.get('no_topic_model', False),
'help': "Create a topic model."},
# The path to a file containing topic model attributes.
'--topic-model-attributes': {
'action': 'store',
'dest': 'topic_model_attributes',
'default': defaults.get('topic_model_attributes', None),
'help': ("Path to a json file describing topic model"
" attributes.")}}
return options
|
from Eulerstep import *
import numpy as np
#simulates the sin/cos function
def func(t, vector):
y_out = np.zeros((2, 1))
x = -vector[1]
y = vector[0]
y_out[0][0] = x
y_out[1][0] = y
return y_out.T[0]
# vector_euler(lorenz, 0, 1, [1, 1, 1], 0.05, "plot")
# vector_euler(lorenz, 0, 1, [1, 1, 1], 0.05/2, "plot")
# vector_euler(lorenz, 0, 1, [1, 1, 1], 0.05/4, "plot")
# vector_euler(lorenz, 0, 1, [1, 1, 1], 0.05/8, "plot")
# vector_rk4(f, t_0, t_fin, y_0, h, output)
vector_rk4(func, 0, 1, [1, 2], 0.05, "hi")
vector_rk4(func, 0, 1, [1, 2], 0.05/2, "hi")
vector_rk4(func, 0, 1, [1, 2], 0.05/4, "hi")
# vector_rk4(lorenz, 0, 10, [1, 1, 1], 0.05, "plot")
# vector_rk4(lorenz, 0, 1, [1, 1, 1], 0.05/2, "plot")
# vector_rk4(lorenz, 0, 1, [1, 1, 1], 0.05/4, "plot")
# vector_rk4(lorenz, 0, 1, [1, 1, 1], 0.05/8, "plot")
# rk_iterate(f, start_time, start_position, h, output)
|
species(
label = 'CC(C[CH]CC[O])OO(3840)',
structure = SMILES('CC(C[CH]CC[O])OO'),
E0 = (-32.494,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,1380,1390,370,380,2900,435,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.438342,0.109395,-0.000153683,1.40416e-07,-5.23088e-11,-3759.73,39.2697], Tmin=(100,'K'), Tmax=(809.303,'K')), NASAPolynomial(coeffs=[4.03663,0.0652995,-3.12181e-05,5.97981e-09,-4.14571e-13,-3764.29,23.074], Tmin=(809.303,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-32.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(RCCJCC)"""),
)
species(
label = 'CH2O(19)',
structure = SMILES('C=O'),
E0 = (-119.055,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (30.026,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4140.62,'J/mol'), sigma=(3.59,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.79372,-0.00990833,3.7322e-05,-3.79285e-08,1.31773e-11,-14379.2,0.602798], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16953,0.00619321,-2.25056e-06,3.65976e-10,-2.20149e-14,-14548.7,6.04208], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-119.055,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH2O""", comment="""Thermo library: FFCM1(-)"""),
)
species(
label = 'C=CCC(C)OO(1413)',
structure = SMILES('C=CCC(C)OO'),
E0 = (-141.267,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,1380,1390,370,380,2900,435,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,3615,1310,387.5,850,1000,3010,987.5,1337.5,450,1655,245.623,245.623],'cm^-1')),
HinderedRotor(inertia=(0.609581,'amu*angstrom^2'), symmetry=1, barrier=(26.0973,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.609581,'amu*angstrom^2'), symmetry=1, barrier=(26.0973,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.201241,'amu*angstrom^2'), symmetry=1, barrier=(8.61547,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.420573,'amu*angstrom^2'), symmetry=1, barrier=(18.0055,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00279423,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (102.132,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3800.23,'J/mol'), sigma=(6.47665,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=593.59 K, Pc=31.74 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.481619,0.070817,-5.22184e-05,1.96663e-08,-3.01737e-12,-16858.7,28.0792], Tmin=(100,'K'), Tmax=(1516.1,'K')), NASAPolynomial(coeffs=[15.5922,0.0309501,-1.27749e-05,2.32202e-09,-1.57362e-13,-21440.5,-51.1086], Tmin=(1516.1,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-141.267,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(386.623,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,-2.38914e-13,3.12709e-16,-1.33367e-19,1.7499e-23,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(4383.16,'K')), NASAPolynomial(coeffs=[2.50003,-3.04997e-08,1.01101e-11,-1.48797e-15,8.20356e-20,25472.7,-0.459785], Tmin=(4383.16,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'CC(C=CCC[O])OO(4125)',
structure = SMILES('CC(C=CCC[O])OO'),
E0 = (-110.617,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,3615,1310,387.5,850,1000,1380,1390,370,380,2900,435,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.311934,0.0878456,-7.35458e-05,3.48576e-08,-7.27098e-12,-13177,35.6417], Tmin=(100,'K'), Tmax=(1084.35,'K')), NASAPolynomial(coeffs=[9.89397,0.0524985,-2.46489e-05,4.79508e-09,-3.39889e-13,-15255,-11.3619], Tmin=(1084.35,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-110.617,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(CCOJ)"""),
)
species(
label = 'CC(CC=CC[O])OO(4126)',
structure = SMILES('CC(CC=CC[O])OO'),
E0 = (-104.724,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,3615,1310,387.5,850,1000,1380,1390,370,380,2900,435,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.180137,0.0916733,-9.20246e-05,5.95706e-08,-1.74027e-11,-12464.6,35.6033], Tmin=(100,'K'), Tmax=(796.995,'K')), NASAPolynomial(coeffs=[7.05056,0.0571917,-2.71276e-05,5.28585e-09,-3.74692e-13,-13559.7,4.01639], Tmin=(796.995,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-104.724,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(457.296,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)OsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(CCOJ)"""),
)
species(
label = 'CC(C[CH]CC=O)OO(4127)',
structure = SMILES('CC(C[CH]CC=O)OO'),
E0 = (-175.727,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2782.5,750,1395,475,1775,1000,1380,1390,370,380,2900,435,3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (131.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0661917,0.0925617,-8.55042e-05,4.54264e-08,-1.04374e-11,-20998.5,35.8798], Tmin=(100,'K'), Tmax=(1009.88,'K')), NASAPolynomial(coeffs=[10.5866,0.0508922,-2.36119e-05,4.56895e-09,-3.23095e-13,-23123.4,-14.9786], Tmin=(1009.88,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-175.727,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CCJCC=O)"""),
)
species(
label = 'C[CH]OO(225)',
structure = SMILES('C[CH]OO'),
E0 = (11.0077,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,2750,2800,2850,1350,1500,750,1050,1375,1000],'cm^-1')),
HinderedRotor(inertia=(0.316495,'amu*angstrom^2'), symmetry=1, barrier=(13.6004,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.155184,'amu*angstrom^2'), symmetry=1, barrier=(6.66494,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0102028,'amu*angstrom^2'), symmetry=1, barrier=(34.7943,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (61.0599,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.64579,0.0291918,-1.96294e-05,6.94664e-09,-1.0331e-12,1372.92,15.2463], Tmin=(100,'K'), Tmax=(1505.37,'K')), NASAPolynomial(coeffs=[7.52069,0.0162383,-6.72201e-06,1.23043e-09,-8.37881e-14,-94.7752,-10.2663], Tmin=(1505.37,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(11.0077,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(220.334,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(CCsJOOH)"""),
)
species(
label = 'C=CCC[O](1344)',
structure = SMILES('C=CCC[O]'),
E0 = (49.3559,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,263.534,264.574,1655.14],'cm^-1')),
HinderedRotor(inertia=(0.108742,'amu*angstrom^2'), symmetry=1, barrier=(5.37585,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.108265,'amu*angstrom^2'), symmetry=1, barrier=(5.37506,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (71.0978,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.54294,0.034052,-1.45097e-05,2.03587e-09,1.55494e-14,5986.72,18.8853], Tmin=(100,'K'), Tmax=(2019.45,'K')), NASAPolynomial(coeffs=[12.9949,0.0193779,-8.0879e-06,1.39413e-09,-8.8004e-14,536.014,-41.9289], Tmin=(2019.45,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(49.3559,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(CCOJ)"""),
)
species(
label = '[CH2][O](221)',
structure = SMILES('[CH2][O]'),
E0 = (192.903,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (30.026,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.88411,-0.00363913,3.28554e-05,-4.13626e-08,1.59638e-11,23210.8,7.47974], Tmin=(100,'K'), Tmax=(933.052,'K')), NASAPolynomial(coeffs=[6.69328,0.000290113,8.61343e-07,-1.56333e-10,7.33633e-15,21991.3,-9.6039], Tmin=(933.052,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(192.903,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), comment="""Thermo library: FFCM1(-) + radical(CsJOH) + radical(H3COJ)"""),
)
species(
label = '[CH2][CH]CC(C)OO(1416)',
structure = SMILES('[CH2][CH]CC(C)OO'),
E0 = (130.643,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,1380,1390,370,380,2900,435,3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,200,800],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (102.132,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.87884,0.073897,-7.16436e-05,4.40717e-08,-1.20929e-11,15820.5,30.7127], Tmin=(100,'K'), Tmax=(849.348,'K')), NASAPolynomial(coeffs=[7.02983,0.0449301,-2.04883e-05,3.92063e-09,-2.75151e-13,14775.6,2.04187], Tmin=(849.348,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(130.643,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(382.466,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJC) + radical(RCCJ)"""),
)
species(
label = 'CC([CH]CCC[O])OO(4128)',
structure = SMILES('CC([CH]CCC[O])OO'),
E0 = (-26.5373,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2770,2790,2810,2830,2850,1425,1437.5,1450,1225,1250,1275,1270,1305,1340,700,750,800,300,350,400,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,1380,1390,370,380,2900,435,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.556824,0.0913001,-4.10629e-05,-1.002e-07,1.21853e-10,-3083.81,35.7157], Tmin=(100,'K'), Tmax=(468.167,'K')), NASAPolynomial(coeffs=[7.32095,0.0604996,-2.88598e-05,5.57135e-09,-3.90116e-13,-4012.97,5.05705], Tmin=(468.167,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-26.5373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CCJCOOH)"""),
)
species(
label = 'CC(CC[CH]C[O])OO(4122)',
structure = SMILES('CC(CC[CH]C[O])OO'),
E0 = (-27.0502,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.15002,0.0991424,-0.000102774,6.71469e-08,-1.94122e-11,-3110.9,38.1974], Tmin=(100,'K'), Tmax=(811.769,'K')), NASAPolynomial(coeffs=[8.11511,0.0584154,-2.75166e-05,5.34102e-09,-3.77651e-13,-4452.76,0.0466454], Tmin=(811.769,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-27.0502,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CCJCO)"""),
)
species(
label = 'CC(C[CH]C[CH]O)OO(4129)',
structure = SMILES('CC(C[CH]C[CH]O)OO'),
E0 = (-77.9012,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3615,1310,387.5,850,1000,3000,3050,390,425,1340,1360,335,370,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,3615,1277.5,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-1.02346,0.120279,-0.000170152,1.43796e-07,-4.93695e-11,-9197.88,40.0615], Tmin=(100,'K'), Tmax=(814.22,'K')), NASAPolynomial(coeffs=[9.30383,0.05576,-2.58985e-05,4.89263e-09,-3.36434e-13,-10422.7,-4.83342], Tmin=(814.22,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-77.9012,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(RCCJCC) + radical(CCsJOH)"""),
)
species(
label = 'C[C](CCCC[O])OO(4130)',
structure = SMILES('C[C](CCCC[O])OO'),
E0 = (-40.0532,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.557768,0.111472,-0.00015493,1.39329e-07,-5.15575e-11,-4664.04,37.4285], Tmin=(100,'K'), Tmax=(797.823,'K')), NASAPolynomial(coeffs=[5.00019,0.0646011,-3.10767e-05,5.97932e-09,-4.16052e-13,-4946.04,15.6606], Tmin=(797.823,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-40.0532,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(C2CsJOOH)"""),
)
species(
label = 'CC(CCC[CH][O])OO(4131)',
structure = SMILES('CC(CCC[CH][O])OO'),
E0 = (-46.6544,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.165466,0.0999446,-6.91853e-05,-5.33829e-08,8.78163e-11,-5489.03,34.3246], Tmin=(100,'K'), Tmax=(491.76,'K')), NASAPolynomial(coeffs=[8.65178,0.0586612,-2.78888e-05,5.36174e-09,-3.74031e-13,-6659.15,-4.00459], Tmin=(491.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-46.6544,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCsJOH) + radical(CCOJ)"""),
)
species(
label = 'CC(C[CH][CH]CO)OO(4132)',
structure = SMILES('CC(C[CH][CH]CO)OO'),
E0 = (-58.2971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.420351,0.104466,-0.000122737,9.05021e-08,-2.86105e-11,-6859.02,40.8582], Tmin=(100,'K'), Tmax=(757.42,'K')), NASAPolynomial(coeffs=[8.88256,0.0553345,-2.5431e-05,4.85096e-09,-3.38424e-13,-8268.19,-1.43781], Tmin=(757.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-58.2971,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(RCCJCC) + radical(CCJCO)"""),
)
species(
label = '[CH2]C(CCCC[O])OO(4133)',
structure = SMILES('[CH2]C(CCCC[O])OO'),
E0 = (-12.9897,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2764.29,2778.57,2792.86,2807.14,2821.43,2835.71,2850,1425,1433.33,1441.67,1450,1225,1241.67,1258.33,1275,1270,1293.33,1316.67,1340,700,733.333,766.667,800,300,333.333,366.667,400,3615,1310,387.5,850,1000,1380,1390,370,380,2900,435,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0944714,0.10186,-9.66213e-05,3.13258e-08,1.30358e-11,-1426.14,37.5769], Tmin=(100,'K'), Tmax=(559.813,'K')), NASAPolynomial(coeffs=[8.40775,0.0586512,-2.7845e-05,5.39449e-09,-3.79918e-13,-2652.93,-0.963762], Tmin=(559.813,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-12.9897,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CJCOOH) + radical(CCOJ)"""),
)
species(
label = 'CC(CCCC[O])O[O](4134)',
structure = SMILES('CC(CCCC[O])O[O]'),
E0 = (-74.9476,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.379545,0.10469,-0.0001319,1.09815e-07,-3.89406e-11,-8864.41,37.2582], Tmin=(100,'K'), Tmax=(774.506,'K')), NASAPolynomial(coeffs=[6.52458,0.0600282,-2.79629e-05,5.33768e-09,-3.71139e-13,-9663.79,7.45756], Tmin=(774.506,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-74.9476,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(482.239,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(ROOJ)"""),
)
species(
label = 'CC([CH][CH]CCO)OO(4135)',
structure = SMILES('CC([CH][CH]CCO)OO'),
E0 = (-57.7842,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.745226,0.113764,-0.000155616,1.31837e-07,-4.60113e-11,-6787.99,41.8185], Tmin=(100,'K'), Tmax=(801.396,'K')), NASAPolynomial(coeffs=[7.98575,0.0575803,-2.68608e-05,5.10048e-09,-3.52393e-13,-7782.61,4.1551], Tmin=(801.396,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-57.7842,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCJCOOH) + radical(RCCJCC)"""),
)
species(
label = 'C[C](C[CH]CCO)OO(4136)',
structure = SMILES('C[C](C[CH]CCO)OO'),
E0 = (-71.3,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.668923,0.114708,-0.000166474,1.49839e-07,-5.42344e-11,-8418.91,39.5312], Tmin=(100,'K'), Tmax=(824.648,'K')), NASAPolynomial(coeffs=[5.53356,0.0619437,-2.9246e-05,5.55138e-09,-3.82095e-13,-8670.75,15.4791], Tmin=(824.648,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-71.3,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(RCCJCC) + radical(C2CsJOOH)"""),
)
species(
label = '[CH2]C(C[CH]CCO)OO(4137)',
structure = SMILES('[CH2]C(C[CH]CCO)OO'),
E0 = (-44.2366,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.815972,0.114182,-0.000151682,1.23344e-07,-4.16838e-11,-5154.88,41.7653], Tmin=(100,'K'), Tmax=(784.821,'K')), NASAPolynomial(coeffs=[9.41416,0.0550878,-2.54469e-05,4.82433e-09,-3.33639e-13,-6546.48,-3.74602], Tmin=(784.821,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-44.2366,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(RCCJCC) + radical(CJCOOH)"""),
)
species(
label = 'CC(C[CH]CCO)O[O](4138)',
structure = SMILES('CC(C[CH]CCO)O[O]'),
E0 = (-106.194,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.516302,0.10826,-0.000144769,1.22278e-07,-4.25585e-11,-12618.2,39.4507], Tmin=(100,'K'), Tmax=(815.397,'K')), NASAPolynomial(coeffs=[7.12873,0.0572423,-2.60546e-05,4.89076e-09,-3.35569e-13,-13415.7,6.88296], Tmin=(815.397,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-106.194,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(ROOJ) + radical(RCCJCC)"""),
)
species(
label = 'CC(CC=CCO)OO(4139)',
structure = SMILES('CC(CC=CCO)OO'),
E0 = (-330.429,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.184472,0.0947758,-8.23814e-05,3.95842e-08,-8.0488e-12,-39593.1,36.0286], Tmin=(100,'K'), Tmax=(1143.23,'K')), NASAPolynomial(coeffs=[13.0768,0.0483771,-2.15039e-05,4.08441e-09,-2.85868e-13,-42625.3,-29.7247], Tmin=(1143.23,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-330.429,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)OsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH)"""),
)
species(
label = 'CC(CCCC=O)OO(4140)',
structure = SMILES('CC(CCCC=O)OO'),
E0 = (-375.629,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0989926,0.0973328,-9.21584e-05,5.1833e-08,-1.2771e-11,-45036.2,33.9829], Tmin=(100,'K'), Tmax=(942.199,'K')), NASAPolynomial(coeffs=[9.68481,0.0557975,-2.60345e-05,5.04691e-09,-3.57163e-13,-46879.9,-12.6359], Tmin=(942.199,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-375.629,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsH)"""),
)
species(
label = 'CC(C=CCCO)OO(4141)',
structure = SMILES('CC(C=CCCO)OO'),
E0 = (-336.322,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.47961,0.0958381,-8.03269e-05,3.53532e-08,-6.39556e-12,-40286.9,37.6074], Tmin=(100,'K'), Tmax=(1296.47,'K')), NASAPolynomial(coeffs=[16.9212,0.0421529,-1.82154e-05,3.41531e-09,-2.37098e-13,-44799,-50.8599], Tmin=(1296.47,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-336.322,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH)"""),
)
species(
label = 'CH2(S)(24)',
structure = SMILES('[CH2]'),
E0 = (418.921,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1358.21,2621.43,3089.55],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19331,-0.00233105,8.15676e-06,-6.62986e-09,1.93233e-12,50366.2,-0.746734], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.13502,0.00289594,-8.16668e-07,1.13573e-10,-6.36263e-15,50504.1,4.06031], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(418.921,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: FFCM1(-)"""),
)
species(
label = '[O]CC[CH]CCOO(4142)',
structure = SMILES('[O]CC[CH]CCOO'),
E0 = (3.9292,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2764.29,2778.57,2792.86,2807.14,2821.43,2835.71,2850,1425,1433.33,1441.67,1450,1225,1241.67,1258.33,1275,1270,1293.33,1316.67,1340,700,733.333,766.667,800,300,333.333,366.667,400,3615,1310,387.5,850,1000,3025,407.5,1350,352.5,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (118.131,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.224523,0.0966185,-0.00015006,1.45757e-07,-5.54069e-11,595.355,34.9899], Tmin=(100,'K'), Tmax=(833.73,'K')), NASAPolynomial(coeffs=[1.74486,0.0592574,-2.87473e-05,5.49814e-09,-3.7902e-13,1386.83,34.1985], Tmin=(833.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(3.9292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(407.409,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + radical(RCCJCC) + radical(CCOJ)"""),
)
species(
label = 'OH(5)',
structure = SMILES('[OH]'),
E0 = (28.372,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3287.46],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (17.0073,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.48579,0.001334,-4.70054e-06,5.64393e-09,-2.06324e-12,3411.96,1.99789], Tmin=(100,'K'), Tmax=(1005.24,'K')), NASAPolynomial(coeffs=[2.88226,0.00103867,-2.35641e-07,1.40204e-11,6.3479e-16,3669.56,5.59047], Tmin=(1005.24,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.372,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""OH""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'CC1CC(CC[O])O1(4143)',
structure = SMILES('CC1CC(CC[O])O1'),
E0 = (-127.068,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (115.15,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.747719,0.0632236,-1.79766e-05,-1.70849e-08,1.08384e-11,-15158.2,28.0594], Tmin=(100,'K'), Tmax=(946.392,'K')), NASAPolynomial(coeffs=[10.7064,0.0409766,-1.41679e-05,2.38783e-09,-1.58225e-13,-17931.8,-24.1316], Tmin=(946.392,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-127.068,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(444.824,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + ring(Oxetane) + radical(CCOJ)"""),
)
species(
label = '[CH2]C(CC[O])C(C)OO(4144)',
structure = SMILES('[CH2]C(CC[O])C(C)OO'),
E0 = (-24.2171,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.208492,0.101645,-0.000103241,5.44021e-08,-6.66578e-12,-2769.46,37.4458], Tmin=(100,'K'), Tmax=(619.973,'K')), NASAPolynomial(coeffs=[8.78625,0.0566132,-2.57439e-05,4.89288e-09,-3.41122e-13,-4134.62,-3.66359], Tmin=(619.973,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-24.2171,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsCsH) + group(Cs-CsCsOsH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(Isobutyl) + radical(CCOJ)"""),
)
species(
label = '[CH2]C(C[O])CC(C)OO(3841)',
structure = SMILES('[CH2]C(C[O])CC(C)OO'),
E0 = (-27.5643,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4518.77,'J/mol'), sigma=(7.72648,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=705.82 K, Pc=22.23 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.20811,0.101639,-0.000103213,5.43493e-08,-6.63207e-12,-3172.05,37.4444], Tmin=(100,'K'), Tmax=(619.898,'K')), NASAPolynomial(coeffs=[8.78603,0.0566137,-2.57442e-05,4.89295e-09,-3.41128e-13,-4537.12,-3.66236], Tmin=(619.898,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-27.5643,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(478.082,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(Isobutyl)"""),
)
species(
label = 'CC(CC1CCO1)OO(3844)',
structure = SMILES('CC(CC1CCO1)OO'),
E0 = (-281.269,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.30685,0.0830983,-3.7566e-05,-1.27584e-08,1.24455e-11,-33663.1,32.4389], Tmin=(100,'K'), Tmax=(938.992,'K')), NASAPolynomial(coeffs=[16.6967,0.0391969,-1.30135e-05,2.16938e-09,-1.44472e-13,-38114.2,-55.2207], Tmin=(938.992,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-281.269,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(486.397,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + ring(Oxetane)"""),
)
species(
label = 'CC([O])CC(O)CC[O](4145)',
structure = SMILES('CC([O])CC(O)CC[O]'),
E0 = (-240.567,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (132.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.305747,0.101946,-0.000112582,7.75944e-08,-2.31378e-11,-28785,37.658], Tmin=(100,'K'), Tmax=(797.032,'K')), NASAPolynomial(coeffs=[8.89263,0.055783,-2.57064e-05,4.92921e-09,-3.45666e-13,-30251.3,-4.63226], Tmin=(797.032,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-240.567,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(482.239,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(O2s-CsH) + group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CC(C)OJ)"""),
)
species(
label = '[CH2]C[O](258)',
structure = SMILES('[CH2]C[O]'),
E0 = (188.892,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,1398.33],'cm^-1')),
HinderedRotor(inertia=(0.00547724,'amu*angstrom^2'), symmetry=1, barrier=(7.58298,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (44.0526,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.57172,0.0102136,5.90921e-06,-7.99877e-09,2.07081e-12,22733,11.7516], Tmin=(100,'K'), Tmax=(1490.83,'K')), NASAPolynomial(coeffs=[4.74093,0.0150201,-6.91919e-06,1.3118e-09,-8.9825e-14,21501.6,2.68333], Tmin=(1490.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(188.892,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo library: FFCM1(-) + radical(CCOJ) + radical(CJCO)"""),
)
species(
label = '[CH]CC(C)OO(863)',
structure = SMILES('[CH]CC(C)OO'),
E0 = (202.946,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,1380,1390,370,380,2900,435,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (88.1051,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.10082,0.0630657,-5.47473e-05,2.52727e-08,-4.81692e-12,24513.6,24.135], Tmin=(100,'K'), Tmax=(1232.86,'K')), NASAPolynomial(coeffs=[11.8571,0.028167,-1.22867e-05,2.31216e-09,-1.60955e-13,21861.4,-30.0094], Tmin=(1232.86,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(202.946,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(311.793,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJ2_triplet)"""),
)
species(
label = 'O(4)',
structure = SMILES('[O]'),
E0 = (243.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (15.9994,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,-2.38914e-13,3.12709e-16,-1.33367e-19,1.7499e-23,29226.7,5.11107], Tmin=(100,'K'), Tmax=(4383.16,'K')), NASAPolynomial(coeffs=[2.50003,-3.04997e-08,1.01101e-11,-1.48797e-15,8.20356e-20,29226.7,5.11085], Tmin=(4383.16,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.005,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""O""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C[CH]CC(C)OO(4146)',
structure = SMILES('[CH2]C[CH]CC(C)OO'),
E0 = (106.875,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,1380,1390,370,380,2900,435,3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,3615,1310,387.5,850,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (116.158,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.232501,0.0895718,-9.20838e-05,6.24051e-08,-1.89798e-11,12983.7,35.1079], Tmin=(100,'K'), Tmax=(771.288,'K')), NASAPolynomial(coeffs=[6.8284,0.0553663,-2.5564e-05,4.91109e-09,-3.44998e-13,11966.2,4.99908], Tmin=(771.288,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(106.875,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(453.139,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJ) + radical(RCCJCC)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43375e-09,2.58635e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.97591,0.0016414,-7.19719e-07,1.25377e-10,-7.91522e-15,-1025.85,5.53754], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (-32.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (106.907,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (112.801,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (53.5122,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (89.6928,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (66.2712,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (70.4706,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (132.455,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (126.498,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (51.3848,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (116.247,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (111.547,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (42.7867,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (61.9039,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (-5.61177,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (18.3835,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (37.0127,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (74.407,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (84.5032,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (323.546,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (30.9062,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (30.9062,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (-7.52072,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (422.85,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (43.3619,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (135.718,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (132.371,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (-24.2097,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (78.8,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (391.837,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS31',
E0 = (349.88,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CH2O(19)', 'C=CCC(C)OO(1413)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['H(3)', 'CC(C=CCC[O])OO(4125)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(1.46e+08,'cm^3/(mol*s)'), n=1.64, Ea=(5.73208,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2555 used for Cds-CsH_Cds-CsH;HJ
Exact match found for rate rule [Cds-CsH_Cds-CsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction3',
reactants = ['H(3)', 'CC(CC=CC[O])OO(4126)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(1.46e+08,'cm^3/(mol*s)'), n=1.64, Ea=(5.73208,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2555 used for Cds-CsH_Cds-CsH;HJ
Exact match found for rate rule [Cds-CsH_Cds-CsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['H(3)', 'CC(C[CH]CC=O)OO(4127)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(9.6e+09,'cm^3/(mol*s)'), n=0.935, Ea=(17.4473,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""From training reaction 2782 used for CO-CsH_O;HJ
Exact match found for rate rule [CO-CsH_O;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['C[CH]OO(225)', 'C=CCC[O](1344)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(0.0140176,'m^3/(mol*s)'), n=2.23645, Ea=(29.3292,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds-HH_Cds-CsH;CsJ] for rate rule [Cds-HH_Cds-CsH;CsJ-OsCsH]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['[CH2][O](221)', 'C=CCC(C)OO(1413)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(0.0172287,'m^3/(mol*s)'), n=2.32603, Ea=(14.6351,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-HH_Cds-CsH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['[CH2][CH]CC(C)OO(1416)', 'CH2O(19)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(225.36,'m^3/(mol*s)'), n=0.996465, Ea=(58.8821,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [CO-HH_O;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction8',
reactants = ['CC([CH]CCC[O])OO(4128)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(6.76e+09,'s^-1'), n=0.88, Ea=(158.992,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeC] for rate rule [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/(NonDeC/Cs)]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(CC[CH]C[O])OO(4122)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(6.76e+09,'s^-1'), n=0.88, Ea=(158.992,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 357 used for R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeC
Exact match found for rate rule [R2H_S;C_rad_out_H/NonDeC;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['CC(C[CH]C[CH]O)OO(4129)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(4500,'s^-1'), n=2.62, Ea=(129.286,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 322 used for R2H_S;C_rad_out_H/NonDeC;O_H_out
Exact match found for rate rule [R2H_S;C_rad_out_H/NonDeC;O_H_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['C[C](CCCC[O])OO(4130)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(1.05815e+09,'s^-1'), n=0.95, Ea=(148.741,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS_Cs;C_rad_out_H/NonDeC;Cs_H_out] for rate rule [R3H_SS_Cs;C_rad_out_H/NonDeC;Cs_H_out_OOH/Cs]
Euclidian distance = 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(CCC[CH][O])OO(4131)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(1.17661e+06,'s^-1'), n=1.79367, Ea=(144.041,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;C_rad_out_H/NonDeC;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(C[CH][CH]CO)OO(4132)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(223829,'s^-1'), n=2.27675, Ea=(75.2806,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;O_rad_out;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['[CH2]C(CCCC[O])OO(4133)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(754000,'s^-1'), n=1.63, Ea=(74.8936,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 110 used for R4H_SSS;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs)
Exact match found for rate rule [R4H_SSS;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs)]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(CCCC[O])O[O](4134)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(0.0378492,'s^-1'), n=3.26, Ea=(26.8822,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SSSS;C_rad_out_H/NonDeC;XH_out] for rate rule [R5H_SSSS;C_rad_out_H/NonDeC;O_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction16',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC([CH][CH]CCO)OO(4135)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(1.19599e+09,'s^-1'), n=0.63, Ea=(50.8774,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;O_rad_out;Cs_H_out_H/NonDeC] for rate rule [R5HJ_3;O_rad_out;Cs_H_out_H/(NonDeC/Cs)]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction17',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['C[C](C[CH]CCO)OO(4136)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(2330.97,'s^-1'), n=1.70781, Ea=(69.5067,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6Hall;O_rad_out;Cs_H_out_OOH] for rate rule [R6HJ_3;O_rad_out;Cs_H_out_OOH/Cs]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['[CH2]C(C[CH]CCO)OO(4137)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(5.85e+08,'s^-1'), n=0, Ea=(106.901,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R7Hall;O_rad_out;Cs_H_out_2H] for rate rule [R7HJ_3;O_rad_out;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(C[CH]CCO)O[O](4138)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(40268.4,'s^-1'), n=2.03024, Ea=(116.997,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [RnH;Y_rad_out;O_H_out] + [RnH;O_rad_out;XH_out] + [R8Hall;Y_rad_out;XH_out] for rate rule [R8Hall;O_rad_out;O_H_out]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH2][CH]CC(C)OO(1416)', '[CH2][O](221)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(7.35017e+06,'m^3/(mol*s)'), n=0.0284742, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction21',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(CC=CCO)OO(4139)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad_NDe]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction22',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(CCCC=O)OO(4140)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction23',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(C=CCCO)OO(4141)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction24',
reactants = ['CH2(S)(24)', '[O]CC[CH]CCOO(4142)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(873476,'m^3/(mol*s)'), n=0.189, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [carbene;Cs_H] for rate rule [carbene;C/H2/CsO]
Euclidian distance = 3.0
Multiplied by reaction path degeneracy 2.0
family: 1,2_Insertion_carbene
Ea raised from -1.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction25',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['OH(5)', 'CC1CC(CC[O])O1(4143)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(2.04e+11,'s^-1','*|/',1.74), n=0, Ea=(75.8559,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 9 used for R3OO_SS;C_rad/H/NonDeC_intra;OOH
Exact match found for rate rule [R3OO_SS;C_rad/H/NonDeC_intra;OOH]
Euclidian distance = 0
family: Cyclic_Ether_Formation"""),
)
reaction(
label = 'reaction26',
reactants = ['[CH2]C(CC[O])C(C)OO(4144)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction26',
reactants = ['[CH2]C(C[O])CC(C)OO(3841)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction28',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC(CC1CCO1)OO(3844)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Ypri_rad_out] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Opri_rad]
Euclidian distance = 2.2360679775
family: Birad_recombination"""),
)
reaction(
label = 'reaction29',
reactants = ['CC(C[CH]CC[O])OO(3840)'],
products = ['CC([O])CC(O)CC[O](4145)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(2.88e+10,'s^-1'), n=0, Ea=(111.294,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 4 used for R3OOH_SS;C_rad_out_H/NonDeC
Exact match found for rate rule [R3OOH_SS;C_rad_out_H/NonDeC]
Euclidian distance = 0
family: intra_OH_migration"""),
)
reaction(
label = 'reaction30',
reactants = ['[CH2]C[O](258)', '[CH]CC(C)OO(863)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(2.23625e+06,'m^3/(mol*s)'), n=0.36814, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/Cs;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -1.7 to 0 kJ/mol."""),
)
reaction(
label = 'reaction31',
reactants = ['O(4)', '[CH2]C[CH]CC(C)OO(4146)'],
products = ['CC(C[CH]CC[O])OO(3840)'],
transitionState = 'TS31',
kinetics = Arrhenius(A=(2085.55,'m^3/(mol*s)'), n=1.09077, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Y_rad;O_birad] for rate rule [C_rad/H2/Cs;O_birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -8.3 to 0 kJ/mol."""),
)
network(
label = '889',
isomers = [
'CC(C[CH]CC[O])OO(3840)',
],
reactants = [
('CH2O(19)', 'C=CCC(C)OO(1413)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '889',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
"""Python Cookbook
Chapter 14, recipe 6, Controlling complex sequences of steps.
"""
import argparse
import subprocess
from unittest.mock import Mock, call
from pytest import * # type: ignore
import Chapter_14.ch14_r06
@fixture # type: ignore
def mock_subprocess_run():
return Mock(
return_value=Mock(
stdout = "sample output\n"
)
)
def test_command(mock_subprocess_run, monkeypatch):
monkeypatch.setattr(Chapter_14.ch14_r06.subprocess, 'run', mock_subprocess_run)
options = argparse.Namespace(name="mock_options")
cmd = Chapter_14.ch14_r06.Command()
output = cmd.execute(options)
assert output == "sample output\n"
mock_subprocess_run.assert_called_once_with(
["echo", "Command", repr(options)],
check=True, stdout=subprocess.PIPE, text=True
)
def test_simulate(mock_subprocess_run, monkeypatch):
monkeypatch.setattr(Chapter_14.ch14_r06.subprocess, 'run', mock_subprocess_run)
options = argparse.Namespace(name="mock_options", samples=42, game_file="game_file.yaml")
cmd = Chapter_14.ch14_r06.Simulate()
output = cmd.execute(options)
assert output == "sample output\n"
mock_subprocess_run.assert_called_once_with(
["python",
"Chapter_13/ch13_r05.py",
"--samples", "42",
"-o", "game_file.yaml"],
check=True, stdout=subprocess.PIPE, text=True
)
def test_summarize(mock_subprocess_run, monkeypatch):
monkeypatch.setattr(Chapter_14.ch14_r06.subprocess, 'run', mock_subprocess_run)
options = argparse.Namespace(name="mock_options", samples=42, game_files=["game_file.yaml"], summary_file="summary_file.yaml")
cmd = Chapter_14.ch14_r06.Summarize()
output = cmd.execute(options)
assert output == "sample output\n"
mock_subprocess_run.assert_called_once_with(
["python",
"Chapter_13/ch13_r06.py",
"-o", "summary_file.yaml",
"game_file.yaml"
],
check=True, stdout=subprocess.PIPE, text=True
)
@fixture # type: ignore
def mock_simulate():
mock_simulate_class = Mock(
return_value=Mock(
name="Simulate instance",
execute=Mock(
return_value='simulate output')))
return mock_simulate_class
@fixture # type: ignore
def mock_summarize():
mock_summarize_class = Mock(
return_value=Mock(
name="Summarize instance",
execute=Mock(
return_value='summarize output')))
return mock_summarize_class
def test_iterative_sim(mock_simulate, mock_summarize, monkeypatch):
monkeypatch.setattr(Chapter_14.ch14_r06, 'Simulate', mock_simulate)
monkeypatch.setattr(Chapter_14.ch14_r06, 'Summarize', mock_summarize)
options_i = argparse.Namespace(simulations=2, samples=100, summary_file="data/y12.yaml")
iteration = Chapter_14.ch14_r06.IterativeSimulate()
iteration.execute(options_i)
mock_simulate.assert_called_once_with()
mock_simulate.return_value.execute.assert_has_calls(
[call(options_i), call(options_i)]
)
mock_summarize.assert_called_once_with()
mock_summarize.return_value.execute.assert_has_calls(
[call(options_i)]
)
def test_condition_sum(mock_simulate, mock_summarize, monkeypatch):
monkeypatch.setattr(Chapter_14.ch14_r06, 'Simulate', mock_simulate)
monkeypatch.setattr(Chapter_14.ch14_r06, 'Summarize', mock_summarize)
options_c = argparse.Namespace(simulations=2, samples=100, game_file="data/x.yaml")
conditional = Chapter_14.ch14_r06.ConditionalSummarize()
conditional.execute(options_c)
mock_simulate.assert_called_once_with()
mock_simulate.return_value.execute.assert_has_calls(
[call(options_c)]
)
mock_summarize.assert_not_called()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SearchAbilityOrderInfoOpenApi(object):
def __init__(self):
self._access_type = None
self._app_name = None
self._app_status = None
self._audit_status = None
self._biz_id = None
self._box_status = None
self._brand_template_id = None
self._data_key = None
self._gmt_modified = None
self._id = None
self._is_old_data = None
self._major_status = None
self._online_time = None
self._open_status = None
self._operator = None
self._reject_reason = None
self._scene_code = None
self._scene_name = None
self._search_app_id = None
self._sepc_code = None
self._service_code = None
self._sub_service_desc = None
self._sub_service_name = None
@property
def access_type(self):
return self._access_type
@access_type.setter
def access_type(self, value):
self._access_type = value
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_status(self):
return self._app_status
@app_status.setter
def app_status(self, value):
self._app_status = value
@property
def audit_status(self):
return self._audit_status
@audit_status.setter
def audit_status(self, value):
self._audit_status = value
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def box_status(self):
return self._box_status
@box_status.setter
def box_status(self, value):
self._box_status = value
@property
def brand_template_id(self):
return self._brand_template_id
@brand_template_id.setter
def brand_template_id(self, value):
self._brand_template_id = value
@property
def data_key(self):
return self._data_key
@data_key.setter
def data_key(self, value):
self._data_key = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def is_old_data(self):
return self._is_old_data
@is_old_data.setter
def is_old_data(self, value):
self._is_old_data = value
@property
def major_status(self):
return self._major_status
@major_status.setter
def major_status(self, value):
self._major_status = value
@property
def online_time(self):
return self._online_time
@online_time.setter
def online_time(self, value):
self._online_time = value
@property
def open_status(self):
return self._open_status
@open_status.setter
def open_status(self, value):
self._open_status = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def reject_reason(self):
return self._reject_reason
@reject_reason.setter
def reject_reason(self, value):
self._reject_reason = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def scene_name(self):
return self._scene_name
@scene_name.setter
def scene_name(self, value):
self._scene_name = value
@property
def search_app_id(self):
return self._search_app_id
@search_app_id.setter
def search_app_id(self, value):
self._search_app_id = value
@property
def sepc_code(self):
return self._sepc_code
@sepc_code.setter
def sepc_code(self, value):
self._sepc_code = value
@property
def service_code(self):
return self._service_code
@service_code.setter
def service_code(self, value):
self._service_code = value
@property
def sub_service_desc(self):
return self._sub_service_desc
@sub_service_desc.setter
def sub_service_desc(self, value):
self._sub_service_desc = value
@property
def sub_service_name(self):
return self._sub_service_name
@sub_service_name.setter
def sub_service_name(self, value):
self._sub_service_name = value
def to_alipay_dict(self):
params = dict()
if self.access_type:
if hasattr(self.access_type, 'to_alipay_dict'):
params['access_type'] = self.access_type.to_alipay_dict()
else:
params['access_type'] = self.access_type
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.app_status:
if hasattr(self.app_status, 'to_alipay_dict'):
params['app_status'] = self.app_status.to_alipay_dict()
else:
params['app_status'] = self.app_status
if self.audit_status:
if hasattr(self.audit_status, 'to_alipay_dict'):
params['audit_status'] = self.audit_status.to_alipay_dict()
else:
params['audit_status'] = self.audit_status
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.box_status:
if hasattr(self.box_status, 'to_alipay_dict'):
params['box_status'] = self.box_status.to_alipay_dict()
else:
params['box_status'] = self.box_status
if self.brand_template_id:
if hasattr(self.brand_template_id, 'to_alipay_dict'):
params['brand_template_id'] = self.brand_template_id.to_alipay_dict()
else:
params['brand_template_id'] = self.brand_template_id
if self.data_key:
if hasattr(self.data_key, 'to_alipay_dict'):
params['data_key'] = self.data_key.to_alipay_dict()
else:
params['data_key'] = self.data_key
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.is_old_data:
if hasattr(self.is_old_data, 'to_alipay_dict'):
params['is_old_data'] = self.is_old_data.to_alipay_dict()
else:
params['is_old_data'] = self.is_old_data
if self.major_status:
if hasattr(self.major_status, 'to_alipay_dict'):
params['major_status'] = self.major_status.to_alipay_dict()
else:
params['major_status'] = self.major_status
if self.online_time:
if hasattr(self.online_time, 'to_alipay_dict'):
params['online_time'] = self.online_time.to_alipay_dict()
else:
params['online_time'] = self.online_time
if self.open_status:
if hasattr(self.open_status, 'to_alipay_dict'):
params['open_status'] = self.open_status.to_alipay_dict()
else:
params['open_status'] = self.open_status
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.reject_reason:
if hasattr(self.reject_reason, 'to_alipay_dict'):
params['reject_reason'] = self.reject_reason.to_alipay_dict()
else:
params['reject_reason'] = self.reject_reason
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.scene_name:
if hasattr(self.scene_name, 'to_alipay_dict'):
params['scene_name'] = self.scene_name.to_alipay_dict()
else:
params['scene_name'] = self.scene_name
if self.search_app_id:
if hasattr(self.search_app_id, 'to_alipay_dict'):
params['search_app_id'] = self.search_app_id.to_alipay_dict()
else:
params['search_app_id'] = self.search_app_id
if self.sepc_code:
if hasattr(self.sepc_code, 'to_alipay_dict'):
params['sepc_code'] = self.sepc_code.to_alipay_dict()
else:
params['sepc_code'] = self.sepc_code
if self.service_code:
if hasattr(self.service_code, 'to_alipay_dict'):
params['service_code'] = self.service_code.to_alipay_dict()
else:
params['service_code'] = self.service_code
if self.sub_service_desc:
if hasattr(self.sub_service_desc, 'to_alipay_dict'):
params['sub_service_desc'] = self.sub_service_desc.to_alipay_dict()
else:
params['sub_service_desc'] = self.sub_service_desc
if self.sub_service_name:
if hasattr(self.sub_service_name, 'to_alipay_dict'):
params['sub_service_name'] = self.sub_service_name.to_alipay_dict()
else:
params['sub_service_name'] = self.sub_service_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SearchAbilityOrderInfoOpenApi()
if 'access_type' in d:
o.access_type = d['access_type']
if 'app_name' in d:
o.app_name = d['app_name']
if 'app_status' in d:
o.app_status = d['app_status']
if 'audit_status' in d:
o.audit_status = d['audit_status']
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'box_status' in d:
o.box_status = d['box_status']
if 'brand_template_id' in d:
o.brand_template_id = d['brand_template_id']
if 'data_key' in d:
o.data_key = d['data_key']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'id' in d:
o.id = d['id']
if 'is_old_data' in d:
o.is_old_data = d['is_old_data']
if 'major_status' in d:
o.major_status = d['major_status']
if 'online_time' in d:
o.online_time = d['online_time']
if 'open_status' in d:
o.open_status = d['open_status']
if 'operator' in d:
o.operator = d['operator']
if 'reject_reason' in d:
o.reject_reason = d['reject_reason']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'scene_name' in d:
o.scene_name = d['scene_name']
if 'search_app_id' in d:
o.search_app_id = d['search_app_id']
if 'sepc_code' in d:
o.sepc_code = d['sepc_code']
if 'service_code' in d:
o.service_code = d['service_code']
if 'sub_service_desc' in d:
o.sub_service_desc = d['sub_service_desc']
if 'sub_service_name' in d:
o.sub_service_name = d['sub_service_name']
return o
|
# Generated by Django 2.0.7 on 2018-11-26 23:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flujo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='obligaciones',
name='tasa_obligacion',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
]
|
import sys
from collections import defaultdict
import numpy
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import Normalizer
from sklearn.svm import LinearSVC
from sklearn.dummy import DummyClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.dummy import DummyClassifier
w=str(sys.argv[2])
def readExamples(inputFilename):
queries = []
labels = []
first = False
with open(inputFilename) as inputFile:
for line in inputFile:
if first:
first = False
else:
words = line.strip().split('\t')
query = words[2]
if w in words[0]:
label = 1
else:
label = 0
labels.append(label)
queries.append(query)
return numpy.array(queries), numpy.array(labels)
def computeDensity(vectorizer, examples):
nonZeros = numpy.apply_along_axis(numpy.sum,1,vectorizer.transform(examples).todense())
return 1 - numpy.count_nonzero(nonZeros)/float(len(nonZeros))
def predict(model,normalizer,vectorizer,examples, actual, fold=0, dump=False):
X = normalizer.transform(vectorizer.transform(examples))
pred = model.predict(X)
return accuracy_score(actual,pred)
def predictF1(model,normalizer,vectorizer,examples, actual):
X = normalizer.transform(vectorizer.transform(examples))
pred = model.predict(X)
return f1_score(actual,pred)
def prettyPrint(givenVector):
return "%.4f" % givenVector.mean() + "(" +"%.4f" % givenVector.std() + ")"
examples, labels = readExamples(sys.argv[1])
folds = 10
trainingAccuracy = numpy.zeros(folds)
trainingBaseline = numpy.zeros(folds)
testingAccuracy = numpy.zeros(folds)
testingBaseline = numpy.zeros(folds)
testingDensity = numpy.zeros(folds)
testingF1 = numpy.zeros(folds)
vectorizer = CountVectorizer(min_df=1,dtype='double')
normalizer = Normalizer()
classifier = LinearSVC(loss='l1')
X = normalizer.fit_transform(vectorizer.fit_transform(examples))
y = labels
classifier.fit(X, y)
distance=[]
distance1 = classifier.decision_function(X)
p=0
for d in distance1:
if d>=0:
distance.append((p,abs(d),1))
else:
distance.append((p,abs(d),-1))
p+=1
distance.sort(key=lambda x:x[1])
for i in distance:
print str(i)
|
# Exceptions are used to handle the errors in the python program
# In a program exit code = 1, means program "Crashed"
# code = 0 means "Success"
# An exception is a kind of error that crashes our program
# We use try, except block to handle exceptions that are raised in the programs
try:
age = int(input("Age: "))
income = 20000
risk = income / age
print(age)
except ZeroDivisionError:
print("Age cannot be 0.")
except ValueError:
print("Invalid value")
# age = 26
# age = 0 (Age cannot be zero)
# age = swetha (Invalid value)
|
import logging
import os
import shutil
import pytest
from rasa import data, model
from rasa.cli.utils import create_output_path
from rasa.nlu import data_router, config
from rasa.nlu.components import ComponentBuilder
from rasa.nlu.model import Trainer
from rasa.nlu import training_data
from rasa.nlu.config import RasaNLUModelConfig
logging.basicConfig(level="DEBUG")
CONFIG_DEFAULTS_PATH = "sample_configs/config_defaults.yml"
NLU_DEFAULT_CONFIG_PATH = "sample_configs/config_pretrained_embeddings_mitie.yml"
DEFAULT_DATA_PATH = "data/examples/rasa/demo-rasa.json"
NLU_MODEL_NAME = "nlu_model.tar.gz"
TEST_MODEL_DIR = "test_models"
NLU_MODEL_PATH = os.path.join(TEST_MODEL_DIR, "nlu")
# see `rasa.nlu.data_router` for details. avoids deadlock in
# `deferred_from_future` function during tests
data_router.DEFERRED_RUN_IN_REACTOR_THREAD = False
@pytest.fixture(scope="session")
def component_builder():
return ComponentBuilder()
@pytest.fixture(scope="session")
def spacy_nlp(component_builder, default_config):
spacy_nlp_config = {"name": "SpacyNLP"}
return component_builder.create_component(spacy_nlp_config, default_config).nlp
@pytest.fixture(scope="session")
def ner_crf_pos_feature_config():
return {
"features": [
["low", "title", "upper", "pos", "pos2"],
[
"bias",
"low",
"suffix3",
"suffix2",
"upper",
"title",
"digit",
"pos",
"pos2",
"pattern",
],
["low", "title", "upper", "pos", "pos2"],
]
}
@pytest.fixture(scope="session")
def mitie_feature_extractor(component_builder, default_config):
mitie_nlp_config = {"name": "MitieNLP"}
return component_builder.create_component(
mitie_nlp_config, default_config
).extractor
@pytest.fixture(scope="session")
def default_config():
return config.load(CONFIG_DEFAULTS_PATH)
@pytest.fixture
def trained_nlu_model(request):
cfg = RasaNLUModelConfig({"pipeline": "keyword"})
trainer = Trainer(cfg)
td = training_data.load_data(DEFAULT_DATA_PATH)
trainer.train(td)
model_path = trainer.persist(NLU_MODEL_PATH)
nlu_data = data.get_nlu_directory(DEFAULT_DATA_PATH)
output_path = os.path.join(NLU_MODEL_PATH, NLU_MODEL_NAME)
new_fingerprint = model.model_fingerprint(
NLU_DEFAULT_CONFIG_PATH, nlu_data=nlu_data
)
model.create_package_rasa(model_path, output_path, new_fingerprint)
def fin():
if os.path.exists(NLU_MODEL_PATH):
shutil.rmtree(NLU_MODEL_PATH)
if os.path.exists(output_path):
shutil.rmtree(output_path)
request.addfinalizer(fin)
return output_path
|
from .base import *
SECRET_KEY = '9875wetwrewyyu69854769kjhsdfiuy*^b32kw(993!sx1'
SELENIUM_TESTS_ENABLED = True
SELENIUM_DRIVER = 'Firefox'
|
import datetime
import sys
from dataHandler import getSeasonFilePath
from colorama import Fore
from leagues import selectLeague, getSeasonOptions
from teamsetCmd import executeTeamSet
from reindexCmd import executeReIndex
from battlerecordCmd import registerBattleRecord
TITLE = "Pokemon Go PvP Data Collector"
VERSION = "v0.4-alpha"
def launch():
dataPath = getSeasonFilePath()
print(TITLE)
print(VERSION)
#loading the commands from the other python files
commands = [executeTeamSet, executeReIndex ,registerBattleRecord]
league = selectLeague(infoMessage, errorMessage)
amountOfbattles = determineRemainingBattles(dataPath, league)
if amountOfbattles >= 25:
errorMessage("Unable to input more battles, max amount (5) of sets is reached for today")
return
print("Syntax")
print("W/L/D:Pokemon1,Pokemon2,Pokemon3")
#battle registration
while amountOfbattles < 25:
userInput = input("> ")
if userInput == "exit":
print(TITLE + " has been terminated by the user")
return
for command in commands:
result = command(userInput)
if (type(result) is tuple):
writeToFile(result[0], dataPath)
if result[1] != "" and result[2] != "":
infoMessage(result[1], result[2])
break
if result.startswith("ERR:"):
errorMessage(result)
break
if result == "OTHER_COMMAND":
continue
amountOfbattles += 1
break
errorMessage("Unable to input more battles, max amount (5) of sets is reached for today")
def determineRemainingBattles(dataPath, league):
content = open(dataPath, "r").read().splitlines()
if len(content) <= 3:
print("Good luck in the new season, trainer!")
writeToFile("- "+ league + " " + datetime.date.today().strftime("%Y-%m-%d"), dataPath)
return 0
lineIndex = len(content) - 1
amountOfBattles = 0
while lineIndex >= 0:
if content[lineIndex] == "":
lineIndex -= 1
continue
if content[lineIndex].startswith('- '):
if isInPast(content[lineIndex].split(' ')[2]):
writeToFile("- " + league + " " + datetime.date.today().strftime("%Y-%m-%d"), dataPath)
executeReIndex("reindex")
return 0
print("You have done " + str(amountOfBattles) + " battles so far.")
return amountOfBattles
amountOfBattles += 1
lineIndex -= 1
return 0
def isInPast(strDate):
now = datetime.date.today()
dateArray = strDate.split('-')
return datetime.datetime(now.year, now.month, now.day) > datetime.datetime(int(dateArray[0]), int(dateArray[1]), int(dateArray[2]))
def writeToFile(input, dataPath):
file = open(dataPath, "a")
file.write(input + "\n")
def errorMessage(errorMessage):
print(Fore.RED + errorMessage + Fore.RESET)
def infoMessage(pretext, value):
print(pretext + Fore.CYAN + value + Fore.RESET)
launch() |
# -*- coding: utf-8 -*-
# scraper.py for 17-QC-HealthAndSocialServices
import urllib.request
import re
import os
import csv
import sys
import ipgetter
import ipcalc
from urllib.error import URLError, HTTPError, ContentTooShortError
from urllib import robotparser
from lxml.html import fromstring, tostring
from lxml import etree
def parse_region(file_name=None):
for i in range(1, 19):
url = 'http://wpp01.msss.gouv.qc.ca/appl/M02/M02ListeInstall.asp?cdRss=' + str(i).zfill(2) + '&CodeTri=Mct&Install=Mct'
download(url, 'regions/region' + str(i) + '.asp')
print('Download Complete')
def parse_directory(output_name='output', output_type='csv', region_name=None):
# if no region name specified, parse all region
dir_list = [item for item in os.listdir('../input/establishments') if re.search('.asp', item) is not None] if (region_name is None) else [region_name]
output_fname = output_name + '.' + output_type
csv_f = open("../output/" + output_fname, "w+") # Initialize csv file & writer (output_type is reserved for future use)
csv_writer = csv.writer(csv_f, lineterminator='\n')
for dir in dir_list:
tree = get_lxml_tree('../input/establishments/' + dir)
rows = tree.xpath('//td/a')
for row in rows:
installation_url = row.get('href')
if (installation_url is None or re.search('Installation', installation_url) is None or re.search('Etablissement', installation_url) is not None):
continue
installation_url = 'http://wpp01.msss.gouv.qc.ca/appl/M02/' + installation_url
installation_name = 'installation' + installation_url.split('=')[1]
csvrow = parse_establishment(dir, installation_name + '_' + dir)
csv_writer.writerow(csvrow)
csv_f.close()
print('Output file saved: ' + output_fname)
def parse_establishment(establishment_name, facility_name):
tree = get_lxml_tree("../input/establishments/" + establishment_name)
columns = tree.xpath('//td')
data = []
for column in columns:
data.append(column.xpath('string()').strip())
csv_row = parse_installation(facility_name)
csv_row.append(data[53]) # Tรฉlรฉcopieur / Fax
for i in range(65, 84, 2):
csv_row.append(data[i])
return csv_row
def parse_installation(facility_name):
tree = get_lxml_tree('../input/installations/' + facility_name)
columns = tree.xpath('//td')
data = []
for column in columns:
data.append(column.xpath('string()').strip().replace('\x96', '-').replace('\x92', '\u00ed').replace('\x8c', '\u00e5'))
installation_row = []
# parsing basic info
for i in range(37, 52, 2):
installation_row.append(data[i])
installation_row.append(data[57]) # Rรฉgion sociosanitaire / Health Region
# parsing special info
for i in range(61, 66, 2):
installation_row.append(data[i])
services = ''
# parsing services available
for i in range(70, len(data) - 4):
if re.search('Mission', data[i]):
continue
if (data[i].strip() is ''):
continue
services += data[i] + '; '
installation_row.append(services[:-2])
return installation_row
def download_establishments(file_name=None):
if (not ip_check()):
print('Running script from local IP, script terminating...')
sys.exit()
if file_name is None: # if no file name specified, parse all directories
dir_list = [item for item in os.listdir('../input/regions') if re.search('.asp', item) is not None]
else:
dir_list = [file_name]
for dir in dir_list:
tree = get_lxml_tree('../input/regions/' + dir)
rows = tree.xpath('//td/a')
for row in rows:
establishment_url = row.get('href')
if (establishment_url is None):
continue
if (re.search('Etablissement', establishment_url) is None):
continue
establishment_url = 'http://wpp01.msss.gouv.qc.ca/appl/M02/' + establishment_url
establishment_name = 'establishment'
establishment_name += establishment_url.split('=')[1]
download(establishment_url, "establishments/" + establishment_name + '_' + dir)
print('Download complete')
def download_installations(file_name=None):
# if no file name specified, parse all directories
dir_list = [item for item in os.listdir('../input/establishments') if re.search('.asp', item) is not None] if (file_name is None) else [file_name]
for dir in dir_list:
tree = get_lxml_tree('../input/establishments/' + dir)
rows = tree.xpath('//td/a')
for row in rows:
installation_url = row.get('href')
if (installation_url is None or re.search('Installation', installation_url) is None or re.search('Etablissement', installation_url) is not None):
continue
installation_url = 'http://wpp01.msss.gouv.qc.ca/appl/M02/' + installation_url
installation_name = 'installation'
installation_name += installation_url.split('=')[1]
download(installation_url, "installations/" + installation_name + '_' + dir)
print('Download complete')
def download(url, file_name='index', num_retries=3, user_agent='wswp'):
# Parsing robots.txt
rp = robotparser.RobotFileParser()
rp.set_url('http://www.msss.gouv.qc.ca/robots.txt')
#rp.read()
# begin of downloading
print('Downloading:', url)
#if rp.can_fetch(user_agent, url): # disable for non-unicode robots.txt
request = urllib.request.Request(url)
request.add_header('User-agent', user_agent)
try:
binary = urllib.request.urlopen(url).read()
except (URLError, HTTPError, ContentTooShortError) as e:
print('Download error:', e.reason)
binary = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# recursively retry 5xx HTTP errors
download(url, num_retries - 1)
# Output to binary file
f = open("../input/" + file_name, "wb+")
f.write(binary)
f.close()
print('File saved: ' + file_name)
def ip_check():
myip = ipgetter.myip()
print('Running script from:', myip)
if myip in ipcalc.Network('192.0.163.0/24'): # using netmask /24 for Teksavvy
return False
return True
def get_lxml_tree(dir):
f = open(dir, 'rb')
binary = f.read()
f.close()
return fromstring(binary)
|
# Generated by Django 2.2.5 on 2021-06-15 18:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('review', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='reviewOfFilm',
),
]
|
from c_s_app.models import *
import csv
import os
def data_to_db():
file_name = '10full_columns.csv'
pwd = os.path.dirname(__file__)
file_path = pwd + '/' + file_name
list_f = []
with open(file_path, 'r', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
folder_name = row['Folder_name']
if folder_name == '':
folder_name = None
mark = row['Mark']
if mark == '':
mark = None
model = row['Model']
if model == '':
model = None
gen = row['Generation']
if gen == '':
gen = None
mark_obj = Mark.objects.get(name=mark) # ะฟะพะปััะฐะตะผ ะพะฑัะตะบั ะะฐัะบะธ
mark_pk = int(mark_obj.pk)
# ะธะท ะะ ะฟะพะปััะฐะตะผ ัะฟะธัะพะบ ะผะพะดะตะปะตะน ะดะปั ะดะฐะฝะฝะพะน ะผะฐัะบะธ. ะคะธะปััััะตะผ ะฝะฐ ะฝะฐัะธะปะธัะต ะผะพะดะตะปะธ ะฒ ัะฟะธัะบะต ะธะท excel
list_models_objs = Model.objects.all().filter(mark=mark_pk).filter(name=model)
if len(list_models_objs) == 1: # ะตัะปะธ Model ัะถะต ะตััั ะฒ ะะ (ะดะปะธะฝะฐ ัะฟะธัะบะฐ = 1)
# print('------------------ WARNING!!! START ---------------------')
# print(len(list_models_objs), 'TAK NIE DOLZHNO BYC!!! DOLZHNO BYC = 0')
# print(model, mark, gen, folder_name)
# print('------------------ WARNING!!! FINISH ---------------------')
for model_element in list_models_objs:
model_pk = model_element.pk
model_obj = model_element
if len(list_models_objs) == 0: # ะตัะปะธ Model ะฝะตั ะฒ ะะ (ะดะปะธะฝะฐ ัะฟะธัะบะฐ = 0)
model_obj = Model.objects.create(name=model, mark=mark_obj) # ัะพะทะดะฐะตะผ ะพะฑัะตะบั ะะพะดะตะปะธ
model_pk = model_obj.pk
else:
print('------------------ WARNING!!! START ---------------------')
print(len(list_models_objs), 'TAK NIE DOLZHNO BYC!!! MOZHET BYC = 0 libo 1')
print(model, mark, gen, folder_name)
print('------------------ WARNING!!! FINISH ---------------------')
# ะฟะพะปััะฐะตะผ pk ะดะปั gen ะธะท GenerationList
genlist_obj = GenerationList.objects.get(name=gen)
genlist_pk = genlist_obj.pk
# ะธะท ะะ ะฟะพะปััะฐะตะผ ัะฟะธัะพะบ ะฟะพะบะพะปะตะฝะธะน ะดะปั ะบะพะฝะบัะตัะฝะพะน ะะฐัะบะธ ะธ ะะพะดะตะปะธ. ะคะธะปััััะตะผ ะฝะฐ ะฝะฐะปะธัะธะต ะผะพะดะตะปะธ ะฒ ัะฟะธัะบะต ะธะท excel
list_gens_objs = Generation.objects.all().filter(model=model_pk).filter(name=genlist_pk)
if len(list_gens_objs) == 1: # ะตัะปะธ ัะถะต ะตััั ะฒ ะะ (ะดะปะธะฝะฐ ัะฟะธัะบะฐ = 1)
print('------------------ WARNING!!! START ---------------------')
print(len(list_gens_objs), 'TAK NIE DOLZHNO BYC!!! DOLZHNO BYC = 0')
print(model, mark, gen, folder_name)
print('------------------ WARNING!!! FINISH ---------------------')
for gen_element in list_gens_objs:
gen_pk = gen_element.pk
print('---------------------------- WARNING START ---------------')
print('bylo: ', gen_element.path)
gen_element.path = folder_name
print('stalo: ', gen_element.path)
print('---------------------------- WARNING FINISH ---------------')
gen_element.save()
if len(list_gens_objs) == 0: # ะตัะปะธ ะฝะตั ะฒ ะะ (ะดะปะธะฝะฐ ัะฟะธัะบะฐ = 0)
new_generation_obj = Generation.objects.create(model=model_obj, name=genlist_obj, path=folder_name) # ัะพะทะดะฐะตะผ ะพะฑัะตะบั Generation
print('Added gen: ', mark, model, gen, folder_name)
else:
print('------------------ WARNING!!! START ---------------------')
print(len(list_gens_objs), 'TAK NIE DOLZHNO BYC!!! MOZHET BYC = 0 libo 1')
print(model, mark, gen, folder_name)
print('------------------ WARNING!!! FINISH ---------------------')
# ะะ ะะะะ ะะ ะะ ะ ะะะะะฆะฃ ะะะฃะฅ ะกะะะกะะะ
# list_f.append(folder_name)
# gen_obj = Generation.objects.all()
# list_db = [elem.path for elem in gen_obj]
# print(len(list_f), len(list_db))
# print(set(list_f) - set(list_db)) |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
from mock.mock import patch, MagicMock
from only_for_platform import get_platform, not_for_platform, os_distro_value, PLATFORM_WINDOWS
from ambari_commons.os_check import OSCheck
from resource_management.core import Environment, Fail
from resource_management.core.resources import Group
from resource_management.core.system import System
from resource_management.core.shell import preexec_fn
import os
import select
from ambari_commons import subprocess32
if get_platform() != PLATFORM_WINDOWS:
import grp
import pty
subproc_stdout = MagicMock()
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(os, "read", new=MagicMock(return_value=None))
@patch.object(select, "select", new=MagicMock(return_value=([subproc_stdout], None, None)))
@patch.object(os, "environ", new = {'PATH':'/bin'})
@patch("pty.openpty", new = MagicMock(return_value=(1,5)))
@patch.object(os, "close", new=MagicMock())
class TestGroupResource(TestCase):
@patch("grp.getgrnam")
@patch.object(subprocess32, "Popen")
def test_action_create_nonexistent(self, popen_mock, getgrnam_mock):
subproc_mock = MagicMock()
subproc_mock.returncode = 0
subproc_mock.stdout = subproc_stdout
popen_mock.return_value = subproc_mock
getgrnam_mock.side_effect = KeyError()
with Environment('/') as env:
Group('hadoop',
action='create',
password='secure'
)
self.assertEqual(popen_mock.call_count, 1)
popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh PATH=/bin -H -E groupadd -p secure hadoop"], shell=False, preexec_fn=preexec_fn, stderr=-2, stdout=-1, env={'PATH': '/bin'}, cwd=None, close_fds=True)
getgrnam_mock.assert_called_with('hadoop')
@patch("grp.getgrnam")
@patch.object(subprocess32, "Popen")
def test_action_create_existent(self, popen_mock, getgrnam_mock):
subproc_mock = MagicMock()
subproc_mock.returncode = 0
subproc_mock.stdout = subproc_stdout
popen_mock.return_value = subproc_mock
getgrnam_mock.return_value = _get_group()
with Environment('/') as env:
Group('mapred',
action='create',
gid=2,
password='secure'
)
self.assertEqual(popen_mock.call_count, 1)
popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh PATH=/bin -H -E groupmod -p secure -g 2 mapred"], shell=False, preexec_fn=preexec_fn, stderr=-2, stdout=-1, env={'PATH': '/bin'}, cwd=None, close_fds=True)
getgrnam_mock.assert_called_with('mapred')
@patch("grp.getgrnam")
@patch.object(subprocess32, "Popen")
def test_action_create_fail(self, popen_mock, getgrnam_mock):
subproc_mock = MagicMock()
subproc_mock.returncode = 1
subproc_mock.stdout = subproc_stdout
popen_mock.return_value = subproc_mock
getgrnam_mock.return_value = _get_group()
try:
with Environment('/') as env:
Group('mapred',
action='create',
gid=2,
password='secure'
)
self.fail("Action 'create' should fail when checked_call fails")
except Fail:
pass
self.assertEqual(popen_mock.call_count, 1)
popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', "ambari-sudo.sh PATH=/bin -H -E groupmod -p secure -g 2 mapred"], shell=False, preexec_fn=preexec_fn, stderr=-2, stdout=-1, env={'PATH': '/bin'}, cwd=None, close_fds=True)
getgrnam_mock.assert_called_with('mapred')
@patch("grp.getgrnam")
@patch.object(subprocess32, "Popen")
def test_action_remove(self, popen_mock, getgrnam_mock):
subproc_mock = MagicMock()
subproc_mock.returncode = 0
subproc_mock.stdout = subproc_stdout
popen_mock.return_value = subproc_mock
getgrnam_mock.return_value = _get_group()
with Environment('/') as env:
Group('mapred',
action='remove'
)
self.assertEqual(popen_mock.call_count, 1)
popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', 'ambari-sudo.sh PATH=/bin -H -E groupdel mapred'], shell=False, preexec_fn=preexec_fn, stderr=-2, stdout=-1, env={'PATH': '/bin'}, cwd=None, close_fds=True)
getgrnam_mock.assert_called_with('mapred')
@patch("grp.getgrnam")
@patch.object(subprocess32, "Popen")
def test_action_remove_fail(self, popen_mock, getgrnam_mock):
subproc_mock = MagicMock()
subproc_mock.returncode = 1
subproc_mock.stdout = subproc_stdout
popen_mock.return_value = subproc_mock
getgrnam_mock.return_value = _get_group()
try:
with Environment('/') as env:
Group('mapred',
action='remove'
)
self.fail("Action 'delete' should fail when checked_call fails")
except Fail:
pass
self.assertEqual(popen_mock.call_count, 1)
popen_mock.assert_called_with(['/bin/bash', '--login', '--noprofile', '-c', 'ambari-sudo.sh PATH=/bin -H -E groupdel mapred'], shell=False, preexec_fn=preexec_fn, stderr=-2, stdout=-1, env={'PATH': '/bin'}, cwd=None, close_fds=True)
getgrnam_mock.assert_called_with('mapred')
def _get_group():
group = MagicMock()
group.gr_name='mapred'
group.gr_passwd='x'
group.gr_gid=0
group.gr_mem=[]
return group
|
# n^e module p
def tinh_luy_thua(n,e,p):
result = 1
while e != 0:
if(e%2 == 1):
result = result*n%p
e = (e - 1)
n = n*n%p
e = e//2
return result
#phep chia da thuc trong GF(2)
# c(x) = a(x)*b(x) mod q(x)
# a(x) = a[1]x + a[0] and b(x) = b[1]x + b[0]
# q(x) = x^2 + q[1]x + q[0]
def division(a,b,q,p):
t0 = (a[0] * b[0]) % p
t1 = (a[0] * b[1] + a[1] * b[0]) % p
t2 = (a[1] * b[1]) % p
c1 = (t1 - (q[1]*t2)) % p
c0 = (t0 - (q[0]*t2)) % p
return (c0,c1)
# F(x)^e module q(x) in GF(2)
# q(x) = x**2 โ bx + c
# c(x) := x^e mod (x^2 โ bx + c) = c1*x + c0
# x^e = (x + 0)*(0x + 1)=>a=(1,0),b=(0,1)
#q(-b,c)
def tinh_luy_thua_F(e,a,q,p):
result = [1,0]
sq = a
while e != 0:
if(e%2 == 1):
result = division(a,result,q,p)
e = (e-1)
a = division(a, a, q, p)
e = e//2
return result
#Cipolla-Lehmer square root algorithm
#h := (b^2 โ 4c)^(pโ1)/2(mod p)
def CL(c, b, p):
h = (b*b - 4*c) % p
h1 = tinh_luy_thua(h,(p-1)//2,p)
s = 1
if(h1 == 1 or h1 == 0):
s = 0
b = (-b) % p
c = c % p
q = (c,b)
e = (p+1)//2
a = (0,1) ##x1 = x
y = tinh_luy_thua_F(e,a,q,p)
return s * y[0]
#a prime p where p > 2, a quadratic residue c in GF(p) and an integer
# b where 0 < b < p
def can_bac_hai(c,p):
b = 100
t = 0
c = c % p
for i in range(b):
y = CL(c, ((i + 1) % p), p)
t1 = (y * y) % p
if (t1 == c):
t = y
break
return (t)
# p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
# c = 54482837085503842457913951049488513539071978307250707216928124062793075562515
# t = can_bac_hai(c, p)
# print(t)
# thua toan tinh can bac hai Cipolla-Lehme
|
"""Tests for Action Classification with OTX CLI."""
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import json
import os
from pathlib import Path
from timeit import default_timer as timer
import pytest
from otx.cli.registry import Registry
from tests.regression.regression_test_helpers import (
REGRESSION_TEST_EPOCHS,
TIME_LOG,
get_result_dict,
get_template_performance,
load_regression_configuration,
)
from tests.test_suite.e2e_test_system import e2e_pytest_component
from tests.test_suite.run_test_command import (
otx_eval_compare,
otx_eval_e2e_eval_time,
otx_eval_e2e_train_time,
otx_eval_openvino_testing,
otx_export_testing,
otx_train_testing,
pot_eval_testing,
pot_optimize_testing,
)
# Configurations for regression test.
TASK_TYPE = "action_classification"
TRAIN_TYPE = "supervised"
LABEL_TYPE = "multi_class"
otx_dir = os.getcwd()
templates = Registry("otx/algorithms/action").filter(task_type=TASK_TYPE.upper()).templates
templates_ids = [template.model_template_id for template in templates]
result_dict = get_result_dict(TASK_TYPE)
result_dir = f"/tmp/regression_test_results/{TASK_TYPE}"
Path(result_dir).mkdir(parents=True, exist_ok=True)
action_cls_regression_config = load_regression_configuration(otx_dir, TASK_TYPE, TRAIN_TYPE, LABEL_TYPE)
action_cls_data_args = action_cls_regression_config["data_path"]
action_cls_data_args["train_params"] = ["params", "--learning_parameters.num_iters", REGRESSION_TEST_EPOCHS]
class TestRegressionActionClassification:
def setup_method(self):
self.label_type = LABEL_TYPE
self.performance = {}
def teardown(self):
with open(f"{result_dir}/result.json", "w") as result_file:
json.dump(result_dict, result_file, indent=4)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_train(self, template, tmp_dir_path):
self.performance[template.name] = {}
tmp_dir_path = tmp_dir_path / TASK_TYPE
train_start_time = timer()
otx_train_testing(template, tmp_dir_path, otx_dir, action_cls_data_args)
train_elapsed_time = timer() - train_start_time
infer_start_time = timer()
otx_eval_compare(
template,
tmp_dir_path,
otx_dir,
action_cls_data_args,
action_cls_regression_config["regression_criteria"]["train"],
self.performance[template.name],
)
infer_elapsed_time = timer() - infer_start_time
self.performance[template.name][TIME_LOG["train_time"]] = round(train_elapsed_time, 3)
self.performance[template.name][TIME_LOG["infer_time"]] = round(infer_elapsed_time, 3)
result_dict[TASK_TYPE][LABEL_TYPE][TRAIN_TYPE]["train"].append(self.performance)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_train_kpi_test(self, template):
results = result_dict[TASK_TYPE][self.label_type][TRAIN_TYPE]["train"]
performance = get_template_performance(results, template)
otx_eval_e2e_train_time(
train_time_criteria=action_cls_regression_config["kpi_e2e_train_time_criteria"]["train"],
e2e_train_time=performance[template.name][TIME_LOG["train_time"]],
template=template,
)
otx_eval_e2e_eval_time(
eval_time_criteria=action_cls_regression_config["kpi_e2e_eval_time_criteria"]["train"],
e2e_eval_time=performance[template.name][TIME_LOG["infer_time"]],
template=template,
)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_export_eval_openvino(self, template, tmp_dir_path):
self.performance[template.name] = {}
tmp_dir_path = tmp_dir_path / TASK_TYPE
export_start_time = timer()
otx_export_testing(template, tmp_dir_path)
export_elapsed_time = timer() - export_start_time
export_eval_start_time = timer()
otx_eval_openvino_testing(
template,
tmp_dir_path,
otx_dir,
action_cls_data_args,
threshold=1.0,
criteria=action_cls_regression_config["regression_criteria"]["export"],
reg_threshold=0.10,
result_dict=self.performance[template.name],
)
export_eval_elapsed_time = timer() - export_eval_start_time
self.performance[template.name][TIME_LOG["export_time"]] = round(export_elapsed_time, 3)
self.performance[template.name][TIME_LOG["export_eval_time"]] = round(export_eval_elapsed_time, 3)
result_dict[TASK_TYPE][self.label_type][TRAIN_TYPE]["export"].append(self.performance)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_pot_optimize_eval(self, template, tmp_dir_path):
self.performance[template.name] = {}
tmp_dir_path = tmp_dir_path / TASK_TYPE
pot_start_time = timer()
pot_optimize_testing(template, tmp_dir_path, otx_dir, action_cls_data_args)
pot_elapsed_time = timer() - pot_start_time
pot_eval_start_time = timer()
pot_eval_testing(
template,
tmp_dir_path,
otx_dir,
action_cls_data_args,
criteria=action_cls_regression_config["regression_criteria"]["pot"],
reg_threshold=0.10,
result_dict=self.performance[template.name],
)
pot_eval_elapsed_time = timer() - pot_eval_start_time
self.performance[template.name][TIME_LOG["pot_time"]] = round(pot_elapsed_time, 3)
self.performance[template.name][TIME_LOG["pot_eval_time"]] = round(pot_eval_elapsed_time, 3)
result_dict[TASK_TYPE][self.label_type][TRAIN_TYPE]["pot"].append(self.performance)
|
# Generated by Django 3.0.5 on 2020-09-07 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('doctors', '0012_remove_symptom_age'),
]
operations = [
migrations.AddField(
model_name='symptom',
name='age',
field=models.IntegerField(default=50),
preserve_default=False,
),
]
|
# Here, we are calculating the excess coalescent laod that is bround by the difference in models.
from spectra import *
from pathlib import Path
from tqdm import tqdm
if __name__ == "__main__":
tmp_store = Path("data")
n_range = [50, 100, 200]
N = 1000
mu = 1e-8
ns_range = [0, 1, 5, 10, 20, 50]
frequency_spectra = {n: {ns: None for ns in ns_range} for n in n_range}
for i, n in enumerate(n_range):
plot_range = np.arange(1, n)
for j,Ns in enumerate(ns_range):
# mtx_store = tmp_store / Path(f"mtx_n_{n}_Ns_{Ns}_N_{N}_J_1.txt")
mtx_store = tmp_store / Path(f"q_mat_{N}_{Ns}_{n}_3_5.txt")
z = np.zeros(n - 1)
z[0] = n * mu # Forward mutation
I = np.eye(n - 1)
M = np.loadtxt(mtx_store)
# solve for equilibrium
tmp = (M[1:-1, 1:-1] - I).T
pi = la.solve(tmp, -z)
frequency_spectra[n][Ns] = pi
numeric = (pi)
s = Ns / N
num_load = numeric @ plot_range * s / n
diffusion = (binomial_projection_full(n, N, s))
coal_load = diffusion @ plot_range * s / n
excess_load = (coal_load - num_load) / num_load * 100
print(f"${n}$ & ${N}$ & ${Ns}$ & ${num_load:#.3e}$ & ${coal_load:#.3e}$ & ${excess_load:#.3f}\%$ \\\\")
|
from easydict import EasyDict as edict
# init
__C_JHU = edict()
cfg_data = __C_JHU
__C_JHU.TRAIN_SIZE = (512, 1024)
__C_JHU.DATA_PATH = '../ProcessedData/JHU'
__C_JHU.TRAIN_LST = 'train.txt'
__C_JHU.VAL_LST = 'val.txt'
__C_JHU.VAL4EVAL = 'val_gt_loc.txt'
__C_JHU.MEAN_STD = ([0.42968395352363586, 0.4371049106121063, 0.4219788610935211], [0.23554939031600952, 0.2325684279203415, 0.23559504747390747])
__C_JHU.LABEL_FACTOR = 1
__C_JHU.LOG_PARA =1.
__C_JHU.RESUME_MODEL = ''#model path
__C_JHU.TRAIN_BATCH_SIZE = 6 #imgs
__C_JHU.VAL_BATCH_SIZE = 1 # must be 1
|
from PyQt5 import uic
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QSplashScreen, QMainWindow
from PyQt5.QtCore import QTime, QTimer, Qt, QThread, pyqtSignal
from PyQt5 import uic
import sys
import time
import logging
logger = logging.getLogger("root")
logger.setLevel(logging.DEBUG)
class Main(QMainWindow):
INTERVAL = 10
count = 0
thread_counter = 0
count1 = 0
finished = pyqtSignal(bool)
busy_thread = QThread()
def __init__(self):
super().__init__()
uic.loadUi(r"main.ui", self)
self.message = "Alert!"
self.initUi()
def initUi(self):
# this will never work because you shouldn't move the main gui to a thread
self.moveToThread(self.busy_thread)
self.finished.connect(self.busy_thread.quit)
self.busy_thread.started.connect(self.print_thread_msg)
self.busy_thread.finished.connect(self.busy_thread.start)
self.busy_thread.start()
self.loop()
self.loop3()
self.show()
# Timer without single shot
def loop(self):
self.timer = QTimer()
self.timer.setSingleShot(False)
self.timer.timeout.connect(self.print_msg)
self.timer.start(self.INTERVAL)
def loop2(self):
logger.debug("loop2")
QTimer.singleShot(self.INTERVAL, self.finished.emit)
# self.finished.emit()
# time.sleep(1)
# self.finished.emit()
# self.busy_thread.start()
# while True:
# time.sleep(1)
# self.finished.emit()
# Timer with single shot
def loop3(self):
self.count1 %= 15
self.count1 += 1
self.print3LineEdit.setText(f"print3 counter: {self.count1}")
QTimer.singleShot(self.INTERVAL, self.loop3)
def stop(self):
self.timer.stop()
def print_msg(self):
# logger.debug('printing msg')
if self.count > 100:
self.count = 0
else:
self.count += 1
self.printLineEdit.setText(f"hello: {self.count}")
return "reg done"
def print_thread_msg(self):
# logger.debug('printing from thread')
loop = 1000
for x in range(loop):
if self.thread_counter > loop:
self.thread_counter = 0
else:
time.sleep(1)
self.thread_counter += 1
self.threadLineEdit.setText(f"thread counter: {self.thread_counter}")
self.finished.emit(True)
# return "thread msg done"
def main():
# Enable logging on the console
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
sys._excepthook = sys.excepthook
def exception_hook(exctype, value, traceback):
print(exctype, value, traceback)
sys._excepthook(exctype, value, traceback)
sys.exit(1)
sys.excepthook = exception_hook
# Opens the app
app = QApplication(sys.argv)
App = Main()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
import app.routing
application = ProtocolTypeRouter({
"websocket": AuthMiddlewareStack(
URLRouter(
app.routing.websocket_urlpatterns
)
),
}) |
import numpy as np
from matplotlib import pyplot as plb
from utility import imageutil as im
from utility import constants as ct
from utility import util as ut
def invert_gamma_of_image(image, gamma_params, callback=None):
# requires parameter g, simply inverts gamma correction and returns new float64 image
# Optional callback whose Intended use is to help in conversion from float64 to other formats (like float32) ONLY
if gamma_params is None:
raise RuntimeError("You must pass in a list of gamma parameters as [g_b, g_g, g_r]")
channels = im.split_channels(image)
corrected_channels = [np.power(channels[0], gamma_params[0]),
np.power(channels[1], gamma_params[1]),
np.power(channels[2], gamma_params[2])]
new_image = im.combine_channels(corrected_channels)
if callback is not None:
new_image = callback(new_image)
return new_image
def learn_gamma_parameters_and_plot(exp_times=ct.GAMMA_EXPOSURE_TIMES, display=ct.DONT_DISPLAY_PLOT):
# If display is set to true, then popups of the plots are shown and written to the gamma output folder
# Returns g parameters
exp_times = np.log(exp_times)
g_b, g_g, g_r, fits, orig_brightness, images = __learn_gamma_correction_parameters__(exp_times)
g = [g_b, g_g, g_r]
# Reversing log that was applied
orig_brightness = np.exp(orig_brightness)
corrected_images = []
for image in images:
channels = im.split_channels(image)
corrected_channels = [np.power(channels[0], g_b), np.power(channels[1], g_g), np.power(channels[2], g_r)]
corrected_images.append(im.combine_channels(corrected_channels))
[b_brightness, g_brightness, r_brightness] = im.get_average_brightness_of_images(corrected_images)
b_brightness = ut.sort_and_reshape_to_1D(b_brightness)
g_brightness = ut.sort_and_reshape_to_1D(g_brightness)
r_brightness = ut.sort_and_reshape_to_1D(r_brightness)
new_brightness = [b_brightness, g_brightness, r_brightness]
# new_brightness = compute_real_brightness_of_image(orig_brightness, g)
__plot_brightness_of_channels__(1, "ChannelBrightness.png", "Channel Brightness v/s Exposure Time",
np.exp(exp_times), orig_brightness, display=display)
__plot_linear_fit__(2, "LinearRegression.png", "Linear Fit",
exp_times, np.log(orig_brightness), fits, display=display)
__plot_brightness_of_channels__(3, "CorrectedBrightness.png", "Real Channel Brightness v/s Exposure Time",
np.exp(exp_times), new_brightness, display=display)
return g
def __learn_gamma_correction_parameters__(exp_times):
# Learns the gamma correction applied by the camera.
# Reads images from the gamma correction images directory
images = im.load_images_from_folder(ct.GAMMA_READ_PATH, im.get_center_region)
# We consider the log of the brightness, so we pass the np.log to the function
[b_brightness, g_brightness, r_brightness] = im.get_average_brightness_of_images(images, np.log)
b_brightness = ut.sort_and_reshape_to_1D(b_brightness)
g_brightness = ut.sort_and_reshape_to_1D(g_brightness)
r_brightness = ut.sort_and_reshape_to_1D(r_brightness)
b_fit = __curve_fit__(exp_times, b_brightness)
g_fit = __curve_fit__(exp_times, g_brightness)
r_fit = __curve_fit__(exp_times, r_brightness)
c_b, m_b = b_fit
c_g, m_g = g_fit
c_r, m_r = r_fit
g_b = 1 / m_b
g_g = 1 / m_g
g_r = 1 / m_r
return g_b, g_g, g_r, [b_fit, g_fit, r_fit], [b_brightness, g_brightness, r_brightness], images
def __curve_fit__(X, Y):
poly = np.polynomial.Polynomial
c_min, c_max = min(Y), max(Y)
fit = poly.fit(X, Y, 1, full=False, window=(c_min, c_max), domain=(c_min, c_max))
return fit
def __plot_brightness_of_channels__(figure_number, file_name, title, X, Y, callback=None, display=ct.DONT_DISPLAY_PLOT):
plb.figure(figure_number)
for i in range(0, 3):
channel = Y[i]
if callback is not None:
channel = callback(Y[i])
X = callback(X)
plb.subplot(int("31" + str(i + 1)))
plb.plot(X, channel, color=ct.CHANNEL_COLOUR[i])
plb.ylabel(ct.CHANNEL[i] + ' Channel \n Brightness')
plb.xlabel('Exposure Time - T')
plb.tight_layout()
plb.suptitle(title)
plb.subplots_adjust(top=0.93)
plb.savefig(ct.GAMMA_WRITE_PATH + "/" + file_name)
if display is True:
plb.show()
def __plot_linear_fit__(figure_number, file_name, title, X, Y, fits, display=ct.DONT_DISPLAY_PLOT):
plb.figure(figure_number)
for i in range(0, 3):
channel_brightness = Y[i]
fit = fits[i]
plb.subplot(int("31" + str(i + 1)))
plb.plot(X, channel_brightness, 'o', color='k')
plb.plot(X, fit(X), color=ct.CHANNEL_COLOUR[i])
plb.xlabel('Exposure Time - log(T)')
plb.ylabel(ct.CHANNEL[i] + ' Channel \n Brightness $\mathrm{(B^{g})}$')
plb.suptitle(title)
plb.tight_layout()
plb.subplots_adjust(top=0.93)
plb.savefig(ct.GAMMA_WRITE_PATH + "/" + file_name)
if display is True:
plb.show()
|
students = {'Harry': 37.21, 'Berry': 37.21,
'Tina': 37.2, 'Akriti': 41, 'Harsh': 39}
print(*sorted(
[student for student, score in students.items() if score == (sorted(set(students.values()))[1])]), sep="\n")
|
from pybedtools import BedTool;
import numpy as np;
def generate_curve(bedfile, chromosome, region_start, region_stop):
bedtool = BedTool(bedfile);
region_of_interest = BedTool(chromosome + ' ' + str(region_start) + ' ' + str(region_stop), from_string=True);
plot_region = region_of_interest.intersect(bedtool);
domain = np.arange(region_start, region_stop+1);
values = np.zeros(domain.shape);
for interval in plot_region:
if(interval.start < region_start):
start = region_start;
else:
start = interval.start;
if(interval.end > region_stop):
finish = region_stop;
else:
finish = interval.end;
start_i = start - domain[0];
finish_i = finish - domain[-1];
values[start_i:finish_i] = values[start_i:finish_i] + 1;
return (domain, values);
|
__author__ = 'luiz'
import argparse
from collectors.default_collector import DefaultCollector
import os
from multiprocessing import Pool, Semaphore, Manager
import time
from threading import Thread, Event
import traceback
def import_code(code, name, add_to_sys_modules=0):
"""
Import dynamically generated code as a module. code is the
object containing the code (a string, a file handle or an
actual compiled code object, same types as accepted by an
exec statement). The name is the name to give to the module,
and the final argument says wheter to add it to sys.modules
or not. If it is added, a subsequent import statement using
name will return this module. If it is not added to sys.modules
import will try to load it in the normal fashion.
import foo
is equivalent to
foofile = open("/path/to/foo.py")
foo = importCode(foofile,"foo",1)
Returns a newly generated module.
"""
try:
import sys,imp
module = imp.new_module(name)
exec str(code) in module.__dict__
if add_to_sys_modules:
sys.modules[name] = module
return module
except:
print "*********** Exception in trying to load module '%s'" % name
print '-' * 60
print traceback.format_exc()
class URLGather(object):
def __init__(self, *args, **kwargs):
self.url = kwargs.get("url")
if not self.url:
raise Exception("No URL to gather")
self.max_depth = kwargs.get("depth", 1)
self.workers = kwargs.get("workers", 1)
self.max_errors = kwargs.get("acceptable_errors", None)
self.out = kwargs.get("out", "/tmp/")
if not self.out.endswith("/"):
self.out += "/"
self.out += "url_gather/"
if not os.path.exists(self.out):
os.makedirs(self.out)
self.collector_file = kwargs.get("collector_file")
self.collector_class = kwargs.get("collector_class")
self._load_collector()
self._gathered_urls = set()
# initiate multiprocessing resources
self._pool = Pool(self.workers)
self._semaphore = Semaphore(self.workers)
self._manager = Manager()
self._url_children = self._manager.dict()
self._url_errors = self._manager.dict()
self._url_events = {}
def _load_collector(self):
if self.collector_file:
if os.path.isfile(self.collector_file):
if self.collector_class:
# TODO load custom collector
with open(self.collector_file, "r") as custom_code:
import_code(custom_code.read(), "custom_collector", 1)
m = __import__("custom_collector")
self.collector = m.__getattribute__(self.collector_class)
else:
raise Exception("Undefined custom collector class name.")
else:
raise Exception("Custom collector file %s not found." % self.collector_file)
else:
self.collector = DefaultCollector
def run(self):
self._gather_url(self.url)
self._pool.close()
self._pool.join()
if self._exceed_max_errors():
error_log = "%serror.log" % self.out
with open(error_log, "w+") as f:
f.write(self._url_errors)
print "REACHED MAX ERRORS. SEE %s FILE TO MORE DETAILS" % error_log
def task_done(self, result):
self._semaphore.release()
def _exceed_max_errors(self):
return self.max_errors is not None and self.max_errors >= 0 and len(self._url_errors) > self.max_errors
def _wait_children_in_thread(self, url):
while url not in self._url_children:
time.sleep(0.1)
event = self._url_events.pop(url)
event.set()
def _wait_for_children(self, url):
event = Event()
self._url_events[url] = event
t = Thread(target=self._wait_children_in_thread, args=(url,))
t.start()
event.wait()
def _gather_url(self, url, current_depth=0):
if self._exceed_max_errors() or url in self._gathered_urls:
return
self._gathered_urls.add(url)
gather_children = current_depth < self.max_depth
self._semaphore.acquire()
if self._exceed_max_errors():
return
self._pool.apply_async(
run_in_child,
(url, self.collector, self._url_children, gather_children, self.out, self._url_errors),
callback=self.task_done
)
# gather children links
if gather_children:
self._wait_for_children(url)
if self._url_children[url]:
for child in self._url_children[url]:
self._gather_url(child, current_depth+1)
def run_in_child(url, collector, url_children_map, gather_children, out, url_errors_map):
max_tries = 5
tries = 1 # try each url 5 times in case of errors
while tries <= max_tries:
try:
print "Extracting URL %s (Try %i/%i)" % (url, tries, max_tries)
collector_instance = collector(url)
# load children only if needed
if gather_children:
url_children_map[url] = collector_instance.get_children_urls()
out_name = url.split("/", 2)[2].replace("/", "_")
with open("%s%s.out" % (out, out_name), "w+") as f:
f.write(collector_instance.extract_content())
return
except:
tries += 1
if tries > max_tries:
print traceback.format_exc()
url_errors_map[url] = traceback.format_exc()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", help="Initial URL to gather")
parser.add_argument("-d", "--depth", type=int, default=1, help="Gathering depth")
parser.add_argument("-w", "--workers", type=int, default=1, help="Number of parallel workers")
parser.add_argument("-ae", "--acceptable_errors", type=int, default=-1, help="Max acceptable errors to continue execution (-1=disabled)")
parser.add_argument("-o", "--out", default="/tmp/", help="Folder to save output files")
parser.add_argument("-cf", "--collector_file", help="Path to custom .py file to act as collector")
parser.add_argument("-cc", "--collector_class", help="Class name of custom collector")
args = parser.parse_args()
gather = URLGather(**args.__dict__)
gather.run()
|
# coding: utf-8
# ### (Classroom Section: Project 4 Advanced Lane Finding)
# ## L17: Undistort and Transform Quiz
# ### My Solution:
#
# In an effort to reuse the Quiz code in my local Environment, I will copy some files from "Camera Calibration Quiz" into the local root where this notebook runs.
#
# **Note**: *'wide_dist_pickle.p'* was created in *"camera Calibration Quiz"*
# Online Quiz has this file in root directory,
# so I will copy the file from *'./calibration_wide'* folder to root.
#
# The test image *"test_image2.png"* used in the quiz is the same as *'./calibration_wide/GOPR0032.jpg'" from the *"camera Calibration Quiz"*.
# I shall copy this file as well, changing the name to *'test_image2,png'* to this root directory, for the same reason.
#
# **Note2**:, if *"CameraCalibration.ibynb"* has not been run, then *'wide_dist_pickle.p'* may not exist anywhere locally.
#
#
# In[1]:
import pickle
import cv2
get_ipython().magic('matplotlib inline')
# This is a hack to copy pickle file from './calibtration_wide' folder to root
dist_pickle = pickle.load( open( "calibration_wide/wide_dist_pickle.p", "rb" ) )
pickle.dump( dist_pickle, open( "wide_dist_pickle.p", "wb" ) )
# This is a hack to copy jpg image file from './calibration_wide' folder to root
img = cv2.imread('calibration_wide/GOPR0032.jpg')
# save jpg as png file to root
cv2.imwrite('test_image2.png', img)
# In[2]:
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load( open( "wide_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# curiosity: see what these coefficients look like
# print(' mtx[0]: ', mtx[0])
# print('dist[0]: ', dist[0])
# print()
# Read in an image
img = cv2.imread('test_image2.png')
# store orig image's pixel dimensions (for use in warpPerspective)
height = img.shape[0]
width = img.shape[1]
image_size = (width, height)
# Chessboard grid "dimensions"
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
def corners_unwarp(img, nx, ny, mtx, dist):
# 1) Undistort using mtx and dist
undistorted = cv2.undistort(img, mtx, dist, None, mtx)
# 2) Convert to grayscale
gray_undistorted = cv2.cvtColor(undistorted, cv2.COLOR_BGR2GRAY)
# 3) Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray_undistorted, (nx,ny), None)
#print(corners.shape, '\n', corners)
# 4) If corners found:
if ret == True:
# a) draw corners
# create undistorted image, with corners drawn on it
undistorted_with_corners = cv2.drawChessboardCorners(undistorted, (nx,ny), corners, ret)
# Doesn't Work Online (either method that I tried: 1st is error, 2nd is invisible
# # plot undistorted_with_corners image:
# # cv2.imshow('undistorted_with_corners', undistorted_with_corners)
# f1, (ax1) = plt.subplots(1, 1, figsize=(24, 9))
# f1.tight_layout()
# ax1.imshow(undistorted_with_corners)
# ax1.set_title('Undistorted With Corners Drawn', fontsize=50)
# #plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# cv2.waitKey(1000)
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
#Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
#One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
#We recommend using the automatic detection of corners in your code
# I'll use grid coordinates [0,0], [0, nx-1], [ny-1, nx], [nx,ny]
# src = np.float32([corners[0][0], corners[7][0],
# corners[40][0], corners[47][0]])
# ie ordered as: TL,TR,BL,BR
# print(corners[0][0], corners[nx-1][0])
# print(corners[nx*(ny-1)][0], corners[nx*ny-1][0])
print(corners.shape)
print('4 coordinates chosen from undistorted image:')
src = np.float32([corners[nx*(0)][0], corners[(nx*1) -1][0],
corners[nx*(ny-1)][0], corners[(nx*ny)-1][0]])
print(src, '\n')
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# width and height of final image (pixels) == undistorted size = img size
width, height = (undistorted.shape[1], undistorted.shape[0])
print(width, height)
# width and height of squares in unwarped image (pixels)
grid_width_x = width/(nx+1) # nx lines ==> nx+1 equally spaced squares
grid_height_y = height/(ny+1)
print(grid_width_x, grid_height_y, 'grid width, height in undistorted, transformed image')
# margin = 0 #15 #px
dst = np.float32([[ 0+grid_width_x, 0+grid_height_y],
[width-grid_width_x, 0+grid_height_y],
[ 0+grid_width_x, height-grid_height_y],
[width-grid_width_x, height-grid_height_y]])
# dst = np.float32([[ 0+margin+grid_width_x, 0+margin+grid_height_y],
# [width-margin-grid_width_x, 0+margin+grid_height_y],
# [ 0+margin+grid_width_x, height-margin-grid_height_y],
# [width-margin-grid_width_x, height-margin-grid_height_y]])
print('4 corresponding coordinates on unWarped image:')
print(dst, '\n')
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# e) use cv2.warpPerspective() to warp your image to a top-down view
image_size = (undistorted.shape[1], undistorted.shape[0])
print(image_size)
# warped = cv2.warpPerspective(undistorted, M, image_size, flags=cv2.INTER_LINEAR)
warped = cv2.warpPerspective(undistorted_with_corners, M, image_size) #default: flags=cv2.INTER_LINEAR)
else:
print ("corners not found, could not perform warpPerspective on undistorted img")
print(ret, corners)
print(nx, ny, undistorted.shape)
# send back the grayscale undistorted image. This will show in odd colors, standing out !
warped = gray_undistorted #undistorted
M = 0
if (plot_as_online_quiz == False):
# return 2 altered images to plot
return warped, M, undistorted_with_corners #gray_undistorted, #undistorted
else:
# return only warped image
return warped, M
# Online Quiz requires two plots
plot_as_online_quiz = True
# # Personally, I like seeing 3 plots
# plot_as_online_quiz = False
# Get Unwarped Image
if plot_as_online_quiz:
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
# Plot Images:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Before
ax1.imshow(img)
ax1.set_title('Original', fontsize=50)
# After
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
else:
top_down, perspective_M, undist = corners_unwarp(img, nx, ny, mtx, dist)
# Plot Images:
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
# Original
ax1.imshow(img)
ax1.set_title('Original', fontsize=50)
# Undistorted
ax2.imshow(undist)
ax2.set_title('Undistort', fontsize=50)
# Perspective Warp Correction on Undistorted
ax3.imshow(top_down)
ax3.set_title('Perspective Warp', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# ### Undistort and Transform
#
# <img src='./L_17_Example_undist-and-warp.png' \>
# #### ReadMe - Quiz Instructions
#
# Here's a tricky quiz for you! You have now seen how to find corners, calibrate your camera, undistort an image, and apply a perspective transform. Now it's your chance to perform all these steps on an image. In the last quiz you calibrated the camera, so here I'm giving you the camera matrix, `mtx`, and the distortion coefficients `dist` to start with.
#
# Your goal is to generate output like the image shown above. To do that, you need to write a function that takes your distorted image as input and completes the following steps:
#
# - Undistort the image using cv2.undistort() with mtx and dist
# - Convert to grayscale
# - Find the chessboard corners
# - Draw corners
# - Define 4 source points (the outer 4 corners detected in the chessboard pattern)
# - Define 4 destination points (must be listed in the same order as src points!)
# - Use `cv2.getPerspectiveTransform()` to get `M`, the transform matrix
# - Use `cv2.warpPerspective()` to apply `M` and warp your image to a top-down view
# **HINT**: Source points are the x and y pixel values of any four corners on your chessboard, you can extract these from the `corners` array output from `cv2.findChessboardCorners()`. Your destination points are the x and y pixel values of where you want those four corners to be mapped to in the output image.
# If you run into any *errors* as you run your code, please refer to the **Examples of Useful Code** section in the previous video and make sure that your code syntax matches up! For this example, please also refer back to the examples in the *Calibrating Your Camera* video (L 10 of Project: Advanced Lane Finding Section)
# In[3]:
"""
#This is the Quiz Start File:
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load( open( "wide_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = cv2.imread('test_image2.png')
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
# MODIFY THIS FUNCTION TO GENERATE OUTPUT
# THAT LOOKS LIKE THE IMAGE ABOVE
def corners_unwarp(img, nx, ny, mtx, dist):
# Pass in your image into this function
# Write code to do the following steps
# 1) Undistort using mtx and dist
# 2) Convert to grayscale
# 3) Find the chessboard corners
# 4) If corners found:
# a) draw corners
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
#Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
#One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
#We recommend using the automatic detection of corners in your code
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
# e) use cv2.warpPerspective() to warp your image to a top-down view
#delete the next two lines
M = None
warped = np.copy(img)
return warped, M
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
"""
''
# In[ ]:
|
import numpy as np
import os
import pickle
import argparse
import re
''' kopp14_fit_oceandynamics.py
This runs the fitting stage for the ocean dynamics component of the Kopp14 workflow.
Parameters:
pipeline_id = Unique identifier for the pipeline running this code
'''
def kopp14_fit_oceandynamics(pipeline_id):
# Load the configuration file
configfile = "{}_config.pkl".format(pipeline_id)
try:
f = open(configfile, 'rb')
except:
print("Cannot open configuration file\n")
# Extract the configuration variables
my_config = pickle.load(f)
f.close()
datayears = my_config["datayears"]
mergeZOSZOSTOGA = my_config["mergeZOSZOSTOGA"]
smoothwin = my_config["smoothwin"]
driftcorr = my_config["driftcorr"]
baseyear = my_config["baseyear"]
# Load the ZOSTOGA file
zostogafile = "{}_ZOSTOGA.pkl".format(pipeline_id)
try:
f = open(zostogafile, 'rb')
except:
print("Cannot open ZOSTOGA file\n")
# Extract the ZOSTOGA variables
my_zostoga = pickle.load(f)
f.close()
sZOSTOGA = my_zostoga["sZOSTOGA"]
histGICrate = my_zostoga["histGICrate"]
zostoga_modellist = my_zostoga['zostoga_modellist']
selectyears = my_zostoga["selectyears"]
# Load the ZOS file
zosfile = "{}_ZOS.pkl".format(pipeline_id)
try:
f = open(zosfile, 'rb')
except:
print("Cannot open ZOS file\n")
# Extract the ZOS variables
my_zos = pickle.load(f)
f.close()
sZOS = my_zos["sZOS"]
sZOSTOGAadj = my_zos["sZOSTOGAadj"]
focus_site_lats = my_zos["focus_site_lats"]
focus_site_ids = my_zos["focus_site_ids"]
comb_modellist = my_zos["comb_modellist"]
#------------- Begin Thermal Expansion -----------------------------------------------
# Get the mean, std, and counts of models for the thermal expansion component
# Note: ZOSTOGA mean and std are converted into mm
ThermExpYears = datayears
ThermExpMean = np.nanmean(sZOSTOGA, axis=1)*1000
ThermExpStd = np.nanstd(sZOSTOGA, axis=1)*1000
def CountNonNAN(x):
return(len(np.flatnonzero(~np.isnan(x))))
ThermExpN = np.apply_along_axis(CountNonNAN, axis=1, arr=sZOSTOGA)
if(driftcorr):
# NOTE: THIS MAY BE A BUG IN THE ORIGINAL CODE!!!
# ThermExpStd has units of mm, but histGICrate has units of meters
ThermExpStd = np.sqrt(ThermExpStd**2 + (np.nanstd(histGICrate)*(ThermExpYears-selectyears[0]))**2) # Consistent with original code
#ThermExpStd = np.sqrt(ThermExpStd**2 + (np.nanstd(histGICrate*1000)*(ThermExpYears-selectyears[0]))**2) # Fix the unit mis-match
# Stretch out the thermal expansion metrics for one additional year
ThermExpMean = np.append(ThermExpMean, (ThermExpMean[-1]+(ThermExpMean[-1]-ThermExpMean[-2])))
ThermExpStd = np.append(ThermExpStd, (ThermExpStd[-1]+(ThermExpStd[-1]-ThermExpStd[-2])))
ThermExpYears = np.append(ThermExpYears, (ThermExpYears[-1]+(ThermExpYears[-1]-ThermExpYears[-2])))
ThermExpN = np.append(ThermExpN, (ThermExpN[-1]))
# Store the thermal expansion variables in a pickle
output = {'ThermExpMean': ThermExpMean, 'ThermExpStd': ThermExpStd,\
'ThermExpYears': ThermExpYears, 'ThermExpN': ThermExpN}
outfile = open(os.path.join(os.path.dirname(__file__), "{}_thermalexp_fit.pkl".format(pipeline_id)), 'wb')
pickle.dump(output, outfile)
outfile.close()
#-------------------- Begin Ocean Dynamics -------------------------------------------
# Pick out indices for year between 2000 and 2100 (consistent with K14) and trim data
year_idx = np.flatnonzero(np.logical_and(datayears > 2000, datayears < 2100))
sZOS = sZOS[year_idx,:,:]
# Calculate and remove "extremeness" as models whose year 2099 value is over
# 10x the median across models in year 2099
ext_num = np.abs(sZOS[-1,:,:])
ext_denom = np.nanmedian(np.abs(sZOS[-1,:,:]), axis=0)
extremeness = ext_num/ext_denom
with np.errstate(invalid='ignore'):
model_idx = extremeness < 10 # Wrapped in np.errstate call to surpress warning of 'nan' in less-than test
nan_mask = np.where(model_idx, 1, np.nan)
sZOS = sZOS * nan_mask[np.newaxis,:,:]
# Calculate and remove "extremeness" as models that in year 2099 have values
# (less the mean across models) are greater than 3 standard deviations across models
ext_num = sZOS[-1,:,:] - np.nanmean(sZOS[-1,:,:], axis=0)
ext_denom = np.nanstd(sZOS[-1,:,:], axis=0)
extremeness = np.abs(ext_num/ext_denom)
std_limit = np.where(ext_denom > 0.2, 1, 0)
with np.errstate(invalid='ignore'):
model_idx = extremeness > 3 # Wrapped in np.errstate call to surpress warning of 'nan' in greater-than test
nan_mask = np.where(model_idx * std_limit == 1, np.nan, 1)
sZOS = sZOS * nan_mask[np.newaxis,:,:]
# If the latitude for the sites of interest is greater than 50, remove the "miroc"
# and "giss" models
lat_limit = np.where(focus_site_lats > 50.0, 1, 0)
model_idx = np.array([bool(re.search(r"miroc|giss", x)) for x in comb_modellist])
nan_mask = np.where(model_idx[:,np.newaxis] * lat_limit == 1, np.nan, 1)
sZOS = sZOS * nan_mask[np.newaxis,:,:]
# Calculate the OD mean, std, and N
OceanDynMean = np.nanmean(sZOS, axis=1) * 1000
OceanDynStd = np.nanstd(sZOS, axis=1) * 1000
OceanDynN = np.nansum(~np.isnan(sZOS), axis=1)
# Trim sZOSTOGAadj to same year range as sZOS
sZOSTOGAadj = sZOSTOGAadj[year_idx,:]
# Calculate the correlation of ZOS with thermal expansion
zos_demean = sZOS - np.nanmean(sZOS, axis=1)[:,np.newaxis,:]
zostoga_demean = sZOSTOGAadj - np.nanmean(sZOSTOGAadj, axis=1)[:,np.newaxis]
corr_num = np.nansum(zos_demean * zostoga_demean[:,:,np.newaxis], axis=1)
corr_denom = np.sqrt(np.nansum(zos_demean**2, axis=1) * np.nansum(zostoga_demean**2, axis=1)[:,np.newaxis])
OceanDynTECorr = corr_num / corr_denom
# Extend the OceanDyn* variables to 2100
OceanDynMean = np.append(OceanDynMean, OceanDynMean[-1,:] + (OceanDynMean[-1,:] - OceanDynMean[-2,:])[np.newaxis,:], axis=0)
OceanDynStd = np.append(OceanDynStd, OceanDynStd[-1,:] + (OceanDynStd[-1,:] - OceanDynStd[-2,:])[np.newaxis,:], axis=0)
OceanDynN = np.append(OceanDynN, (OceanDynN[-1,:])[np.newaxis,:], axis=0)
OceanDynTECorr = np.append(OceanDynTECorr, OceanDynTECorr[-1,:] + (OceanDynTECorr[-1,:] - OceanDynTECorr[-2,:])[np.newaxis,:], axis=0)
# Ensure correlation remains within [-1,1]
OceanDynTECorr = np.maximum(-1.0, np.minimum(1.0, OceanDynTECorr))
# Define the years over which Ocean Dynamics mean, std, N, and TECorr are defined
OceanDynYears = np.append(datayears[year_idx], datayears[year_idx[-1]] + 1)
# Store the ocean dynamics variables in a pickle
output = {'OceanDynMean': OceanDynMean, 'OceanDynStd': OceanDynStd,\
'OceanDynYears': OceanDynYears, 'OceanDynN': OceanDynN, 'OceanDynTECorr': OceanDynTECorr}
outfile = open(os.path.join(os.path.dirname(__file__), "{}_oceandynamics_fit.pkl".format(pipeline_id)), 'wb')
pickle.dump(output, outfile)
outfile.close()
if __name__ == '__main__':
# Initialize the command-line argument parser
parser = argparse.ArgumentParser(description="Run the fitting stage for the Kopp14 ocean dynamics workflow",\
epilog="Note: This is meant to be run as part of the Kopp14 module within the Framework for the Assessment of Changes To Sea-level (FACTS)")
# Define the command line arguments to be expected
parser.add_argument('--pipeline_id', help="Unique identifier for this instance of the module")
# Parse the arguments
args = parser.parse_args()
# Run the preprocessing stage with the user defined RCP scenario
kopp14_fit_oceandynamics(args.pipeline_id)
# Done
exit() |
from django.shortcuts import render
from order.models import Order
# Create your views here.
def cart(request):
""" See contents of cart also queries if the user exist
otherwise creates one """
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
items = order.orderitem_set.all()
else:
items = []
order = {
'get_cart_total': 0,
'get_cart_items': 0,
}
context = {
'items': items,
'order': order,
}
return render(request, 'cart/cart.html', context)
|
import io
import base64
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def build_graph(data):
img = io.BytesIO()
plt.figure(figsize=(11,6.5))
subjects = list(data.keys())
def get_all_values(index, labels, data):
all_values = []
for i in range(len(data)):
label_values = []
values = data.get(labels[i])
for value in values:
label_values.append(value[index])
all_values.append(label_values)
return all_values
label_count = len(data)
if label_count == 0:
return None
x_values = get_all_values(1, subjects, data)
y_values = get_all_values(0, subjects, data)
for i in range(len(data)):
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d-%m-%Y'))
plt.plot(x_values[i], y_values[i], marker='o', markersize=8, linewidth=3, label=subjects[i])
plt.gcf().autofmt_xdate()
plt.legend()
plt.savefig(img, format='png')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return 'data:image/png;base64,{}'.format(graph_url)
|
# coding=utf-8
if __name__ == '__main__':
tagesmenus = []
while True:
answer = raw_input("\nWas mรถchtest du machen?\n"
"(1) Alle Gerichte anzeigen\n"
"(2) Set (Vorspeise, Hauptspeise, Nachspeise) hinzufรผgen\n"
"(3) Schreibe Menu in ein File\n"
"(4) Set lรถschen\n"
"(5) Menu File lesen\n"
"(q) Programm verlassen\n")
if answer.lower() == "q":
break
elif answer == "1":
print "\nGerichte werden angezeigt"
print "*" * 30
for menu_dict in tagesmenus:
for speise, info in menu_dict.iteritems():
print speise, info
print "*" * 301
elif answer == "2":
print "\nSet hinzufรผgen"
print "*"*30
menuset = dict()
speisentypen = ("Vorspeise", "Hauptspeise", "Nachspeise")
for speise in speisentypen:
name = raw_input("Was ist der Name der {}?\n".format(speise))
preis = raw_input("Was ist der Preis der {}?\n".format(speise))
menuset[speise] = (name, preis)
tagesmenus.append(menuset)
elif answer == "3":
# todo: implement
print "In File schreiben"
content = "Speisentyp,Name,Preis\n"
for menu_dict in tagesmenus:
for speise, info in menu_dict.iteritems():
content += "{},{},{}\n".format(speise,info[0],info[1])
with open("menukarte.txt","w") as f:
f.write(content)
elif answer == "4":
# todo: implement
print "Set lรถschen"
elif answer == "5":
print "File lesen"
with open("menukarte.txt","r") as f:
lines = f.readlines()
new_dict = dict()
for line in lines[1:]:
line = line.strip("\n")
speisentyp2, name2 ,preis2 = line.split(",")
new_dict[speisentyp2] = (name2,preis2)
tagesmenus.append(new_dict)
else:
print "Unknown Input..."
print "Exit Restaurant Program..." |
SOURCE = """/**
*
* Do something
*
* @dialect postgresql
* @name get_contacts
* @param contact_name: string - the name
* @param contact_origin: string - the origin
* @retmode tuples
*/
{
select * from contacts
where name=%(contact_name)s and origin=%(contact_origin)s
;
"""
FRAMES = []
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class Contact(models.Model):
# 3/25 ๅปบ็ซ่กจๆ ผๆฌไฝ ๅ็ ๅๅญ ่ชชๆ
_name = 'tekau_contacts'
name = fields.Char(string='ๅงๅ')
content = fields.Text(string='ๅ
งๅฎน')
phone = fields.Char(string='้ป่ฉฑ่็ขผ')
email = fields.Char(string="้ปๅญ้ตไปถ")
company_id = fields.Many2one('res.company', string='Company', index=True,
default=lambda self: self.env.user.company_id.id)
partner_id = fields.Many2one('res.partner', string='Customer', track_visibility='onchange', track_sequence=1,
index=True,
help="Linked partner (optional). Usually created when converting the lead. You can find a partner by its Name, TIN, Email or Internal Reference.")
@api.model
def create(self, values):
contact_id = super().create(values)
template = self.env.ref('tekau_web.new_customer_msg')
self.env['mail.template'].browse(template.id).send_mail(contact_id.id, force_send=True, raise_exception=True)
print(contact_id)
return contact_id
|
import torch.nn as nn
import torch.nn.functional as F
import torch
dropout_value=0.02
class Unet(nn.Module):
def __init__(self):
super(Unet, self).__init__()
self.convblockin = nn.Sequential(
nn.Conv2d(in_channels=6,out_channels=3,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(3)
)
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(32)
)
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=35,out_channels=64,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(64)
)
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=99,out_channels=128,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(128)
)
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=227,out_channels=256,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(256)
)
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=256,out_channels=64,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(in_channels=64,out_channels=32,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(32),
#nn.Conv2d(in_channels=64,out_channels=32,kernel_size=(1,1),bias=False),
#nn.ReLU(),
#nn.BatchNorm2d(32),
nn.Conv2d(in_channels=32,out_channels=3,kernel_size=(1,1),bias=False),
)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------
self.convunet1 = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(32)
)
self.pool1 = nn.MaxPool2d(2, 2)
self.convunet2 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(64)
)
self.pool2 = nn.MaxPool2d(2, 2)
self.convunet3 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=128,kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(128)
)
self.pool3 = nn.MaxPool2d(2, 2)
self.centerlayer = nn.Sequential(
nn.Conv2d(in_channels=128,out_channels=256, kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(256),
nn.Dropout(),
nn.ConvTranspose2d(256, 128, 2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(128)
)
#concat here ---torch.cat([centerlayer, F.upsample_bilinear(convblock4, centerlayer.size()[2:])], 1))
self.upsmaple1 = nn.Sequential(
nn.Conv2d(in_channels=256,out_channels=128, kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128, 64, 2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(64)
)
#concat here---torch.cat([upsample2, F.upsample_bilinear(convblock2, upsample2.size()[2:])], 1))
self.upsmaple2 = nn.Sequential(
nn.Conv2d(in_channels=128,out_channels=64, kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(64, 32, 2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(32))
# concat -- here --torch.cat([upsample2, F.upsample_bilinear(convblock2, upsample2.size()[2:])], 1))
self.finalconv = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=32, kernel_size=(3,3),bias=False,padding=1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(in_channels=32, out_channels=3, kernel_size=(3,3),bias=False,padding=1)
)
#----------------------------------------------------UNET MODEL OF DEPTH PREDICTION---------------------------------------------------------------------------------------------------
####################################Forward pass##########################################
def forward(self, x1,y1):
#x=torch.cat((x1,y1),1)
#x= self.convblockin(x)
conv1=self.convblock1(x1)
cat0=torch.cat((conv1,x1),1)
conv2 = self.convblock2(cat0)
cat1=torch.cat((conv1,conv2,x1),1)
conv3=self.convblock3(cat1)
cat2=torch.cat((conv1,conv2,conv3,x1),1)
conv4=self.convblock4(cat2)
conv5=self.convblock5(conv4)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------
x_dep=torch.cat((conv5,y1),1)
x= self.convblockin(x_dep)
#conv11=self.convblock1(x_dep)
convu1= self.convunet1(x)
convu2= self.pool1(convu1)
convu2 = self.convunet2(convu2)
convu3 = self.pool1(convu2)
convu3 = self.convunet3(convu3)
convu4 = self.pool3(convu3)
cent = self.centerlayer(convu4)
#--------------------------------------------------------------------------------------------------------------------
#mask=self.convcent(cent)
#---------------------------------------------------------------------------------------------------------------------
cat1=torch.cat([cent, F.upsample_bilinear(convu3, cent.size()[2:])], 1)
upsam1=self.upsmaple1(cat1)
cat2=torch.cat([upsam1, F.upsample_bilinear(convu2, upsam1.size()[2:])], 1)
upsam2=self.upsmaple2(cat2)
cat3=torch.cat([upsam2, F.upsample_bilinear(convu1, upsam2.size()[2:])], 1)
final=self.finalconv(cat3)
return conv5,final |
import psycopg2
import json
import sys
conn = psycopg2.connect(dbname="bookstore", user="bookstore", password="pass123", host="localhost")
def fetch_by_isbn(isbn):
cur = conn.cursor()
cur.execute("SELECT * FROM books WHERE ISBN = (%s);", (isbn,))
res = cur.fetchone()
if res is None:
return None
book = {
'ISBN': res[0],
'title': res[1],
'author': res[2]
}
cur.close()
return book
def fetch_all_books():
cur = conn.cursor()
cur.execute("SELECT * FROM books")
res = cur.fetchall()
print("RESULT", res)
books = []
for row in res:
book = {
'ISBN': row[0],
'title': row[1],
'author': row[2]
}
books.append(book)
cur.close()
return books
def get_some_books(offset, limit):
cur = conn.cursor()
cur.execute("SELECT * FROM books order by ISBN offset %s limit %s", (offset, limit))
res = cur.fetchall()
books = []
for row in res:
book = {
'ISBN': row[0],
'title': row[1],
'author': row[2]
}
books.append(book)
cur.close()
return books
def insert_book(new_book):
cur = conn.cursor()
try:
cur.execute("INSERT INTO books VALUES (%s, %s, %s, %s);", (new_book['ISBN'], new_book['title'], new_book['author'], None))
conn.commit()
except:
conn.rollback()
raise
finally:
cur.close()
def delete_book(isbn):
cur = conn.cursor()
cur.execute("DELETE FROM books WHERE ISBN = (%s)", (isbn,))
conn.commit()
cur.close()
|
// https://leetcode.com/problems/push-dominoes
class Solution(object):
def pushDominoes(self, dominoes):
"""
:type dominoes: str
:rtype: str
"""
while(True):
new = dominoes.replace('R.L', 'S')
new = new.replace('.L','LL').replace('R.','RR')
if new == dominoes:
break
else:
dominoes = new
return dominoes.replace('S', 'R.L') |
from simbatch.core import core as batch
import pytest
import os
@pytest.fixture(scope="module")
def sib():
# TODO pytest-datadir pytest-datafiles vs ( path.dirname( path.realpath(sys.argv[0]) )
settings_file = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + os.sep + "config_tests.ini"
sib = batch.SimBatch(5, ini_file=settings_file)
return sib
def test_prepare_data_directory_by_delete_all_files(sib):
assert sib.sts.store_data_mode is not None
if sib.sts.store_data_mode == 1:
assert sib.comfun.path_exists(sib.sts.store_data_json_directory_abs) is True
else:
# PRO version with sql
pass
# sib.tsk.clear_all_tasks_data(clear_stored_data=True)
sib.tsk.delete_json_tasks_file()
def test_no_task_data(sib):
assert len(sib.sts.store_data_json_directory_abs) > 0
assert len(sib.sts.JSON_TASKS_FILE_NAME) > 0
assert sib.comfun.file_exists(sib.sts.store_data_json_directory_abs + sib.sts.JSON_TASKS_FILE_NAME) is False
def test_create_example_tasks_data(sib):
assert sib.tsk.create_example_tasks_data(do_save=True) == sib.tsk.sample_data_checksum
assert sib.tsk.sample_data_checksum is not None
assert sib.tsk.sample_data_total is not None
assert sib.tsk.total_tasks == sib.tsk.sample_data_total
def test_exist_proj_data(sib):
assert sib.comfun.file_exists(sib.sts.store_data_json_directory_abs + sib.sts.JSON_TASKS_FILE_NAME) is True
def test_clear_all_tasks_data(sib):
assert sib.tsk.clear_all_tasks_data() is True
assert sib.tsk.total_tasks == 0
assert len(sib.tsk.tasks_data) == 0
def test_json_schemas_data(sib):
assert sib.sts.store_data_mode is not None
if sib.sts.store_data_mode == 1:
json_file = sib.sts.store_data_json_directory_abs + sib.sts.JSON_TASKS_FILE_NAME
json_tasks = sib.comfun.load_json_file(json_file)
json_keys = json_tasks.keys()
assert ("tasks" in json_keys) is True
def test_get_none_index_from_id(sib):
assert sib.tsk.get_index_by_id(2) is None
def test_load_tasks_from_json(sib):
json_file = sib.sts.store_data_json_directory_abs + sib.sts.JSON_TASKS_FILE_NAME
assert sib.comfun.file_exists(json_file) is True
assert sib.tsk.load_tasks_from_json(json_file=json_file) is True
assert sib.tsk.total_tasks == sib.tsk.sample_data_total
def test_get2_index_from_id(sib):
assert sib.tsk.get_index_by_id(2) == 1
def test_load_schemas(sib):
assert sib.tsk.clear_all_tasks_data() is True
assert sib.tsk.total_tasks == 0
assert sib.tsk.load_tasks() is True
def test_get3_index_from_id(sib):
assert sib.tsk.get_index_by_id(2) == 1
assert sib.tsk.get_index_by_id(3) == 2
def test_total_tasks(sib):
assert sib.tsk.total_tasks == sib.tsk.sample_data_total
assert len(sib.tsk.tasks_data) == sib.tsk.sample_data_total
def test_update_current_from_id(sib):
assert sib.tsk.current_task_id is None
assert sib.tsk.current_task_index is None
assert sib.tsk.update_current_from_id(2) == 1
assert sib.tsk.current_task_id == 2
assert sib.tsk.current_task_index == 1
assert sib.tsk.current_task.task_name == "tsk 2"
def test_update_current_from_index(sib):
sib.tsk.current_task_id = None
sib.tsk.current_task_index = None
assert sib.tsk.update_current_from_index(2) == 3
assert sib.tsk.current_task_id == 3
assert sib.tsk.current_task_index == 2
assert sib.tsk.current_task.task_name == "tsk 3"
def test_current_task_details(sib):
assert sib.tsk.current_task.id == 3
assert sib.tsk.current_task.task_name == "tsk 3"
assert sib.tsk.current_task.state_id == 1
assert sib.tsk.current_task.state == "INIT"
assert sib.tsk.current_task.project_id == 2
assert sib.tsk.current_task.schema_id == 3
assert sib.tsk.current_task.sequence == "02"
assert sib.tsk.current_task.shot == "004"
assert sib.tsk.current_task.take == "b"
assert sib.tsk.current_task.sim_frame_start == 7
assert sib.tsk.current_task.sim_frame_end == 28
assert sib.tsk.current_task.prev_frame_start == 8
assert sib.tsk.current_task.prev_frame_end == 22
assert sib.tsk.current_task.schema_ver == 4
assert sib.tsk.current_task.task_ver == 5
assert sib.tsk.current_task.queue_ver == 6
assert sib.tsk.current_task.options == "o"
assert sib.tsk.current_task.user_id == 1
assert sib.tsk.current_task.priority == 8
assert sib.tsk.current_task.description == "d"
def test_remove_single_schema_by_id(sib):
assert sib.tsk.remove_single_task(task_id=1) is True
assert sib.tsk.total_tasks == 4
assert len(sib.tsk.tasks_data) == 4
def test_remove_single_schema_by_index(sib):
assert sib.tsk.remove_single_task(index=1) is True
assert sib.tsk.total_tasks == 3
assert len(sib.tsk.tasks_data) == 3
def test_proxy_task(sib):
sib.tsk.clear_proxy_task()
assert sib.tsk.proxy_task is None
sib.tsk.update_proxy_task(from_task=sib.tsk.get_blank_task())
assert sib.tsk.proxy_task is not None
assert len(sib.tsk.proxy_task.task_name) == 0
sib.tsk.update_proxy_task(task_ver=4)
assert sib.tsk.proxy_task.task_ver == 4
sib.tsk.update_proxy_task(priority=5)
assert sib.tsk.proxy_task.priority == 5
sib.tsk.update_proxy_task(sim_frame_start=101)
assert sib.tsk.proxy_task.sim_frame_start == 101
sib.tsk.update_proxy_task(sim_frame_end=102)
assert sib.tsk.proxy_task.sim_frame_end == 102
sib.tsk.update_proxy_task(prev_frame_start=103)
assert sib.tsk.proxy_task.prev_frame_start == 103
sib.tsk.update_proxy_task(prev_frame_end=104)
assert sib.tsk.proxy_task.prev_frame_end == 104
sib.tsk.update_proxy_task(description="test")
assert sib.tsk.proxy_task.description == "test"
sib.tsk.print_task(sib.tsk.proxy_task)
def test_print_current(sib):
sib.tsk.print_current()
def test_print_all(sib):
sib.tsk.print_all()
|
import pytest
from Threat_Vault import Client, antivirus_signature_get, file_command, dns_get_by_id, antispyware_get_by_id, \
ip_geo_get, ip_command, antispyware_signature_search, signature_search_results
def test_antivirus_get_by_id(mocker):
"""
https://docs.paloaltonetworks.com/autofocus/autofocus-api/perform-direct-searches/get-antivirus-signature.html
Given:
- an antivirus signature ID
When:
- mocking the server response for an ID, running antivirus_signature_get
Then:
- validating the returned context data
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
return_data = {
"active": True,
"createTime": "2010-10-01 10:28:57 (UTC)",
"release": {
"antivirus": {
"firstReleaseTime": "2010-10-03 15:04:58 UTC",
"firstReleaseVersion": 334,
"latestReleaseVersion": 0
},
"wildfire": {
"firstReleaseVersion": 0,
"latestReleaseVersion": 0
}
},
"sha256": [
"7a520be9db919a09d8ccd9b78c11885a6e97bc9cc87414558254cef3081dccf8",
"9e12c5cdb069f74487c11758e732d72047b72bedf4373aa9e3a58e8e158380f8"
],
"signatureId": 93534285,
"signatureName": "Worm/Win32.autorun.crck"
}
mocker.patch.object(client, 'antivirus_signature_get_request', return_value=return_data)
command_results = antivirus_signature_get(client, args={'signature_id': '93534285'})
output = command_results.to_context()
expected_result = {
'ThreatVault.Antivirus(val.signatureId && val.signatureId == obj.signatureId)':
{
"active": True,
"createTime": "2010-10-01 10:28:57 (UTC)",
"release": {
"antivirus": {
"firstReleaseTime": "2010-10-03 15:04:58 UTC",
"firstReleaseVersion": 334,
"latestReleaseVersion": 0
},
"wildfire": {
"firstReleaseVersion": 0,
"latestReleaseVersion": 0
}
},
"sha256": [
"7a520be9db919a09d8ccd9b78c11885a6e97bc9cc87414558254cef3081dccf8",
"9e12c5cdb069f74487c11758e732d72047b72bedf4373aa9e3a58e8e158380f8"
],
"signatureId": 93534285,
"signatureName": "Worm/Win32.autorun.crck"
}
}
assert output.get('EntryContext') == expected_result
def test_antivirus_get_by_id_no_ids():
"""
https://docs.paloaltonetworks.com/autofocus/autofocus-api/perform-direct-searches/get-antivirus-signature.html
Given:
- no args
When:
- running antivirus_signature_get
Then:
- validating the raised error
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
with pytest.raises(Exception, match="Please submit a sha256 or a signature_id."):
antivirus_signature_get(client, args={})
def test_file_command(mocker):
"""
Given:
- sha256 representing an antivirus
When:
- running file_command command
Then
- Validate the reputation of the sha256 is malicious.
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
return_data = {
"active": True,
"createTime": "2010-10-01 10:28:57 (UTC)",
"release": {
"antivirus": {
"firstReleaseTime": "2010-10-03 15:04:58 UTC",
"firstReleaseVersion": 334,
"latestReleaseVersion": 0
},
"wildfire": {
"firstReleaseVersion": 0,
"latestReleaseVersion": 0
}
},
"sha256": [
"7a520be9db919a09d8ccd9b78c11885a6e97bc9cc87414558254cef3081dccf8",
"9e12c5cdb069f74487c11758e732d72047b72bedf4373aa9e3a58e8e158380f8"
],
"signatureId": 93534285,
"signatureName": "Worm/Win32.autorun.crck"
}
mocker.patch.object(client, 'antivirus_signature_get_request', return_value=return_data,
reliability='D - Not usually reliable')
command_results_list = file_command(
client, args={'file': '7a520be9db919a09d8ccd9b78c11885a6e97bc9cc87414558254cef3081dccf8'})
assert command_results_list[0].indicator.dbot_score.score == 3
def test_dns_get_by_id(mocker):
"""
https://docs.paloaltonetworks.com/autofocus/autofocus-api/perform-direct-searches/get-anti-spyware-signature.html
Given:
- a dns signature ID
When:
- mocking the server response for an ID, running dns_get_by_id
Then:
- validating the returned context data
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
return_data = {
'signatureId': 325235352, 'signatureName': 'generic:accounts.google.com.sign-google.com',
'domainName': 'accounts.google.com.sign-google.com', 'createTime': '2020-01-15 23:57:54 (UTC)',
'category': 'malware', 'active': True,
'release': {
'wildfire': {'latestReleaseVersion': 0, 'firstReleaseVersion': 0},
'antivirus': {'latestReleaseVersion': 0, 'firstReleaseVersion': 0}
}
}
mocker.patch.object(client, 'dns_signature_get_request', return_value=return_data)
command_results = dns_get_by_id(client, args={'dns_signature_id': '325235352'})
output = command_results.to_context()
expected_result = {
'ThreatVault.DNS(val.signatureId && val.signatureId == obj.signatureId)':
{
'signatureId': 325235352, 'signatureName': 'generic:accounts.google.com.sign-google.com',
'domainName': 'accounts.google.com.sign-google.com', 'createTime': '2020-01-15 23:57:54 (UTC)',
'category': 'malware', 'active': True,
'release': {
'wildfire': {'latestReleaseVersion': 0, 'firstReleaseVersion': 0},
'antivirus': {'latestReleaseVersion': 0, 'firstReleaseVersion': 0}
}
}
}
assert output.get('EntryContext') == expected_result
def test_antispyware_get_by_id(mocker):
"""
https://docs.paloaltonetworks.com/autofocus/autofocus-api/perform-direct-searches/get-vulnerability-signature.html
Given:
- a anti spyware signature ID
When:
- mocking the server response for an ID, running antispyware_get_by_id
Then:
- validating the returned context data
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
return_data = {
'metadata': {
'severity': 'medium',
'reference': 'http://www.microsoft.com/security/portal/Threat/Encyclopedia/Entry.aspx?Name=Win32/Autorun,'
'http://blogs.technet.com/b/mmpc/archive/2011/02/08/breaking-up-the-romance-between-malware-'
'and-autorun.aspx,http://nakedsecurity.sophos.com/2011/06/15/usb-autorun-malware-on-the-wane/',
'panOsMaximumVersion': '',
'description': 'This signature detects a variety of user-agents in HTTP request headers that have been'
' known to be used by the Autorun family of malicious software, and not known to be used by'
' legitimate clients. The request header should be inspected to investigate the suspect'
' user-agent. If the user-agent is atypical or unexpected, the endpoint should be inspected'
' to determine the user-agent used to generate the request on the machine'
' (typically malware).',
'panOsMinimumVersion': '6.1.0', 'action': 'alert', 'category': 'spyware', 'changeData': ''
},
'cve': '', 'signatureName': 'Autorun User-Agent Traffic', 'vendor': '', 'signatureType': 'spyware',
'firstReleaseTime': '2011-05-23 UTC', 'signatureId': 10001, 'latestReleaseTime': '2020-10-30 UTC',
'latestReleaseVersion': 8338, 'status': 'released', 'firstReleaseVersion': 248
}
mocker.patch.object(client, 'antispyware_get_by_id_request', return_value=return_data)
command_results = antispyware_get_by_id(client, args={'signature_id': '10001'})
output = command_results.to_context()
expected_result = {
'ThreatVault.AntiSpyware(val.signatureId && val.signatureId == obj.signatureId)':
{
'metadata':
{
'severity': 'medium',
'reference': 'http://www.microsoft.com/security/portal/Threat/Encyclopedia/Entry.aspx?Name='
'Win32/Autorun,http://blogs.technet.com/b/mmpc/archive/2011/02/08/breaking-up-'
'the-romance-between-malware-and-autorun.aspx,http://nakedsecurity.sophos.com/'
'2011/06/15/usb-autorun-malware-on-the-wane/', 'panOsMaximumVersion': '',
'description': 'This signature detects a variety of user-agents in HTTP request headers that'
' have been known to be used by the Autorun family of malicious software, and'
' not known to be used by legitimate clients. The request header should be'
' inspected to investigate the suspect user-agent. If the user-agent is atypical'
' or unexpected, the endpoint should be inspected to determine the user-agent'
' used to generate the request on the machine (typically malware).',
'panOsMinimumVersion': '6.1.0', 'action': 'alert', 'category': 'spyware', 'changeData': ''
},
'cve': '', 'signatureName': 'Autorun User-Agent Traffic', 'vendor': '', 'signatureType': 'spyware',
'firstReleaseTime': '2011-05-23 UTC', 'signatureId': 10001, 'latestReleaseTime': '2020-10-30 UTC',
'latestReleaseVersion': 8338, 'status': 'released', 'firstReleaseVersion': 248
}
}
assert output.get('EntryContext') == expected_result
def test_ip_geo_get(mocker):
"""
https://docs.paloaltonetworks.com/autofocus/autofocus-api/perform-direct-searches/get-geolocation.html
Given:
- an ip
When:
- mocking the server response for an IP, running ip_geo_get
Then:
- validating the returned context data
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
return_data = {'ipAddress': '1.1.1.1', 'countryCode': 'AU', 'countryName': 'Australia'}
mocker.patch.object(client, 'ip_geo_get_request', return_value=return_data)
command_results = ip_geo_get(client, args={'ip': '1.1.1.1'})
output = command_results.to_context()
expected_result = {
'ThreatVault.IP(val.ipAddress && val.ipAddress == obj.ipAddress)':
{
'ipAddress': '1.1.1.1', 'countryCode': 'AU', 'countryName': 'Australia'
}
}
assert output.get('EntryContext') == expected_result
def test_ip_command(mocker):
"""
https://docs.paloaltonetworks.com/autofocus/autofocus-api/perform-direct-searches/get-geolocation.html
Given:
- an ip
When:
- mocking the server response for an IP, running ip_command
Then:
- validating the generated indicator dbot score
- validating the generated indicator country
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
return_data = {'ipAddress': '8.8.8.8', 'countryCode': 'US', 'countryName': 'United States'}
mocker.patch.object(client, 'ip_geo_get_request', return_value=return_data)
command_results_list = ip_command(client, args={'ip': '8.8.8.8'})
assert command_results_list[0].indicator.dbot_score.score == 0
assert command_results_list[0].indicator.geo_country == 'United States'
def test_antispyware_signature_search_wrongful_arguments():
"""
Given:
- wrongful args to the antispyware_signature_search command
When:
- running antispyware_signature_search
Then:
- validating the raised error
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
wrong_args_err = 'Please provide either a signature_name or a cve or a vendor.'
with pytest.raises(Exception, match=wrong_args_err):
antispyware_signature_search(client, args={'signature_name': '1234', 'cve': 'CVE-2020'})
with pytest.raises(Exception, match=wrong_args_err):
antispyware_signature_search(client, args={'signature_name': '1234', 'vendor': 'panw'})
with pytest.raises(Exception, match=wrong_args_err):
antispyware_signature_search(client, args={'vendor': 'panw', 'cve': 'CVE-2020'})
def test_signature_search_results_dns(mocker):
"""
https://docs.paloaltonetworks.com/autofocus/autofocus-api/perform-autofocus-searches/search-signatures.html
Given:
- a search_request_id
When:
- mocking the server response for a search_request_id of a domainName, running signature_search_results
Then:
- validating the returned context data
- validating the returned human readable
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
return_data = {
"page_count": 1,
"signatures": [
{
"active": True,
"category": "malware",
"createTime": "2015-03-03 14:45:03 (UTC)",
"domainName": "mail-google.com.co",
"release": {
"antivirus": {
"firstReleaseTime": "2015-03-03 15:11:53 UTC",
"firstReleaseVersion": 1890,
"latestReleaseVersion": 0
},
"wildfire": {
"firstReleaseVersion": 0,
"latestReleaseVersion": 0
}
},
"signatureId": 44101494,
"signatureName": "generic:mail-google.com.co"
}
],
"total_count": 5306
}
mocker.patch.object(client, 'signature_search_results_request', return_value=return_data)
command_results = signature_search_results(client, args={'search_request_id': 'mock_domain', 'size': '1'})
output = command_results.to_context()
expected_context = {
'ThreatVault.Search(val.search_request_id && val.search_request_id == obj.search_request_id)':
{
"page_count": 1,
"signatures": [
{
"active": True,
"category": "malware",
"createTime": "2015-03-03 14:45:03 (UTC)",
"domainName": "mail-google.com.co",
"release": {
"antivirus": {
"firstReleaseTime": "2015-03-03 15:11:53 UTC",
"firstReleaseVersion": 1890,
"latestReleaseVersion": 0
},
"wildfire": {
"firstReleaseVersion": 0,
"latestReleaseVersion": 0
}
},
"signatureId": 44101494,
"signatureName": "generic:mail-google.com.co"
}
],
"total_count": 5306,
'search_request_id': 'mock_domain',
'status': 'completed'
}
}
expected_hr = '### Signature search are showing 1 of 5306 results:\n|signatureId|signatureName|domainName|' \
'category|\n|---|---|---|---|\n| 44101494 | generic:mail-google.com.co | mail-google.com.co |' \
' malware |\n'
assert output.get('EntryContext') == expected_context
assert output.get('HumanReadable') == expected_hr
def test_signature_search_results_anti_spyware_cve(mocker):
"""
https://docs.paloaltonetworks.com/autofocus/autofocus-api/perform-autofocus-searches/search-signatures.html
Given:
- a search_request_id
When:
- mocking the server response for a search_request_id of a cve, running signature_search_results
Then:
- validating the returned context data
- validating the returned human readable
"""
client = Client(api_key='XXXXXXXX-XXX-XXXX-XXXX-XXXXXXXXXXXX', verify=True, proxy=False,
reliability='D - Not usually reliable')
return_data = {
"page_count": 1,
"signatures": [
{
"cve": "CVE-2015-8650",
"firstReleaseTime": "2015-12-28 UTC",
"firstReleaseVersion": 548,
"latestReleaseTime": "2020-10-30 UTC",
"latestReleaseVersion": 8338,
"metadata": {
"action": "reset-both",
"category": "code-execution",
"changeData": "",
"description": "Adobe Flash Player is prone to an use after free vulnerability while parsing"
" certain crafted SWF files. The vulnerability is due to the lack of proper checks"
" on SWF file, leading to an use after free vulnerability. An attacker could"
" exploit the vulnerability by sending a crafted SWF file. A successful attack"
" could lead to remote code execution with the privileges of the current"
" logged-in user.",
"panOsMaximumVersion": "",
"panOsMinimumVersion": "7.1.0",
"reference": "https://helpx.adobe.com/security/products/flash-player/apsb16-01.html",
"severity": "high"
},
"signatureId": 38692,
"signatureName": "Adobe Flash Player Use After Free Vulnerability",
"signatureType": "vulnerability",
"status": "released",
"vendor": "APSB16-01"
}
],
"status": "completed",
"total_count": 1
}
mocker.patch.object(client, 'signature_search_results_request', return_value=return_data)
command_results = signature_search_results(client, args={'search_request_id': 'mock_cve', 'size': '1'})
output = command_results.to_context()
expected_context = {
'ThreatVault.Search(val.search_request_id && val.search_request_id == obj.search_request_id)':
{
"page_count": 1,
"search_request_id": "mock_cve",
"signatures": [
{
"cve": "CVE-2015-8650",
"firstReleaseTime": "2015-12-28 UTC",
"firstReleaseVersion": 548,
"latestReleaseTime": "2020-10-30 UTC",
"latestReleaseVersion": 8338,
"metadata": {
"action": "reset-both",
"category": "code-execution",
"changeData": "",
"description": "Adobe Flash Player is prone to an use after free vulnerability while"
" parsing certain crafted SWF files. The vulnerability is due to the lack"
" of proper checks on SWF file, leading to an use after free vulnerability."
" An attacker could exploit the vulnerability by sending a crafted SWF file."
" A successful attack could lead to remote code execution with the"
" privileges of the current logged-in user.",
"panOsMaximumVersion": "",
"panOsMinimumVersion": "7.1.0",
"reference": "https://helpx.adobe.com/security/products/flash-player/apsb16-01.html",
"severity": "high"
},
"signatureId": 38692,
"signatureName": "Adobe Flash Player Use After Free Vulnerability",
"signatureType": "vulnerability",
"status": "released",
"vendor": "APSB16-01"
}
],
"status": "completed",
"total_count": 1,
}
}
expected_hr = '### Signature search are showing 1 of 1 results:\n|signatureId|signatureName|cve|' \
'signatureType|status|firstReleaseTime|latestReleaseTime|\n|---|---|---|---|---|---|---|\n|' \
' 38692 | Adobe Flash Player Use After Free Vulnerability | CVE-2015-8650 |' \
' vulnerability | released | 2015-12-28 UTC | 2020-10-30 UTC |\n'
assert output.get('EntryContext') == expected_context
assert output.get('HumanReadable') == expected_hr
|
import os, time
import msgpack
from hashlib import sha1
import functools
import errno
def mkdirs_safe(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
# Defaults to 15 days (1,296,000 sec)
def cache_disk(cache_root="/tmp/cachepy", stale_seconds = 1296000):
def doCache(f):
#@functools.wraps(f)
def inner_function(*args, **kwargs):
# calculate a cache key based on the decorated method signature
key = sha1(msgpack.packb((f.__module__, f.__name__, args, kwargs))).hexdigest()
# Make sure the cach path exists
cache_path = os.path.join(cache_root, f.__name__)
mkdirs_safe(cache_path)
fn = os.path.join(cache_path, key)
# Check if the cached object exists and is fresh
if os.path.exists(fn):
modified = os.path.getmtime(fn)
age_seconds = time.time() - modified
if age_seconds < stale_seconds:
return msgpack.unpackb(open(fn, "rb").read())
# Otherwise call the decorated function
result = f(*args, **kwargs)
# Save the cached object for next time
with open(fn, 'wb+') as cachefile:
cachefile.write(msgpack.packb(result))
return result
return inner_function
return doCache
|
from submission import Submission
class SilvestreSubmission(Submission):
def run(self, s):
"""
:param s: input in string format
:return: solution in integer format
"""
result = 0
char_list = list(s)
char_list.append(char_list[0])
for i, char in enumerate(char_list[:-1]):
if char == char_list[i+1]:
result += int(char)
return result
|
import tensorflow as tf
import numpy as np
from tensorflow.contrib.rnn import LSTMCell, GRUCell
import sys
class character_rnn(object):
'''
sample character-level RNN by Shang Gao
parameters:
- seq_len: integer (default: 200)
number of characters in input sequence
- first_read: integer (default: 50)
number of characters to first read before attempting to predict next character
- rnn_size: integer (default: 200)
number of rnn cells
methods:
- train(text,iterations=100000)
train network on given text
'''
def __init__(self, seq_len=200, first_read=50, rnn_size=200):
self.seq_len = seq_len
self.first_read = first_read
# dictionary of possible characters
self.chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', \
'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '.', ',', '!', '?', '(', ')', '\'', '"',
' ']
self.num_chars = len(self.chars)
# dictionary mapping characters to indices
self.char2idx = {char: i for (i, char) in enumerate(self.chars)}
self.idx2char = {i: char for (i, char) in enumerate(self.chars)}
'''
#training portion of language model
'''
# input sequence of character indices
self.input = tf.placeholder(tf.int32, [1, seq_len])
# convert to one hot
one_hot = tf.one_hot(self.input, self.num_chars)
# rnn layer
self.gru = GRUCell(rnn_size)
outputs, states = tf.nn.dynamic_rnn(self.gru, one_hot, sequence_length=[seq_len], dtype=tf.float32)
outputs = tf.squeeze(outputs, [0])
# ignore all outputs during first read steps
outputs = outputs[first_read:-1]
# softmax logit to predict next character (actual softmax is applied in cross entropy function)
logits = tf.layers.dense(outputs, self.num_chars, None, True, tf.orthogonal_initializer(), name='dense')
# target character at each step (after first read chars) is following character
targets = one_hot[0, first_read + 1:]
# loss and train functions
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=targets))
self.optimizer = tf.train.AdamOptimizer(0.0002, 0.9, 0.999).minimize(self.loss)
'''
#generation portion of language model
'''
# use output and state from last word in training sequence
state = tf.expand_dims(states[-1], 0)
output = one_hot[:, -1]
# save predicted characters to list
self.predictions = []
# generate 100 new characters that come after input sequence
for i in range(100):
# run GRU cell and softmax
output, state = self.gru(output, state)
logits = tf.layers.dense(output, self.num_chars, None, True, tf.orthogonal_initializer(), name='dense',
reuse=True)
# get index of most probable character
output = tf.argmax(tf.nn.softmax(logits), 1)
# save predicted character to list
self.predictions.append(output)
# one hot and cast to float for GRU API
output = tf.cast(tf.one_hot(output, self.num_chars), tf.float32)
# init op
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def train(self, text, iterations=100000):
'''
train network on given text
parameters:
- text: string
string to train network on
- iterations: int (default: 100000)
number of iterations to train for
outputs:
None
'''
# convert characters to indices
print "converting text in indices"
text_indices = [self.char2idx[char] for char in text if char in self.char2idx]
# get length of text
text_len = len(text_indices)
# train
for i in range(iterations):
# select random starting point in text
start = np.random.randint(text_len - self.seq_len)
sequence = text_indices[start:start + self.seq_len]
# train
feed_dict = {self.input: [sequence]}
loss, _ = self.sess.run([self.loss, self.optimizer], feed_dict=feed_dict)
sys.stdout.write("iterations %i loss: %f \r" % (i + 1, loss))
sys.stdout.flush()
# show generated sample every 100 iterations
if (i + 1) % 100 == 0:
feed_dict = {self.input: [sequence]}
pred = self.sess.run(self.predictions, feed_dict=feed_dict)
sample = ''.join([self.idx2char[idx[0]] for idx in pred])
print "iteration %i generated sample: %s" % (i + 1, sample)
if __name__ == "__main__":
import re
# load sample text
with open('corpus-large.txt', 'r') as f:
text = f.read()
# clean up text
text = text.replace("\n", " ") # remove linebreaks
text = re.sub(' +', ' ', text) # remove duplicate spaces
text = text.lower() # lowercase
# train rnn
rnn = character_rnn()
rnn.train(text)
|
import asyncio
import aiohttp
import time
import logging
from selenium import webdriver
from selenium.webdriver.firefox import options
from selenium.common.exceptions import NoSuchElementException
try:
import uvloop
uvloop.install()
except ImportError:
pass
logger = logging.getLogger("render-engine")
logging.basicConfig(level=logging.INFO)
target_url = "http://auto-render:8000/rendered/{}"
options = options.Options()
options.headless = True
driver = webdriver.Firefox(options=options)
def get_html(render_id):
start = time.perf_counter()
driver.get(target_url.format(render_id))
resp = driver.find_element_by_id("render").screenshot_as_base64
stop = time.perf_counter() - start
logger.info(f"render took {stop * 1000}ms to render")
return resp
async def main():
loop = asyncio.get_running_loop()
async with aiohttp.ClientSession() as sess:
ws = await sess.ws_connect("ws://auto-render:8000/worker")
while not ws.closed:
msg = await ws.receive_json()
render_id = msg['id']
try:
bs64 = await loop.run_in_executor(None, get_html, render_id)
except NoSuchElementException:
bs64 = None
data = {
"id": render_id,
"render": bs64
}
await ws.send_json(data)
if __name__ == "__main__":
asyncio.run(main())
|
from locust import HttpUser, TaskSet, task, between
class IOSUserBehavior(TaskSet):
def on_start(self):
r = self.client.get("/", auth=('yar', 333))
self.client.headers.update({'Authorization': r.request.headers['Authorization']})
def on_stop(self):
self.client.get("/logout")
@task
def profile(self):
self.client.get("/profile")
class AndroidUserBehavior(TaskSet):
def on_start(self):
r = self.client.get("/", auth=('iliya', 123))
self.client.headers.update({'Authorization': r.request.headers['Authorization']})
def on_stop(self):
self.client.get("/logout")
@task
def shareware(self):
self.client.get("/shareware")
@task
def photo(self):
self.client.get("/photo")
class WebsiteUser(HttpUser):
tasks = [IOSUserBehavior, AndroidUserBehavior]
wait_time = between(1, 2)
|
"""
RMOption class
represents a single option.
It's better if you use the RMOptionHandler class, which automatically handles this options.
"""
class RMOption(object):
def __init__(self, long_name: str, description: str, required: bool = False,
default_value=None, short_name: str = None,
needs_value=False, multiple_values: bool = False,
mapper=None, main_option=False, multiple_word_string: bool = False):
self.short_name = short_name
self.long_name = long_name
self.description = description
self.required = required
self.default_value = default_value
self.needs_value = needs_value
self.multiple_values = multiple_values
self.value = (lambda: [] if multiple_values else None)()
self.in_use = False
self.mapper = mapper
self.multiple_word_string = multiple_word_string
'''
If main_option is True, the handler will stop the checking process,
and put all follow strings as a value to it.
It also set the multiple_values to true
'''
self.main_option = main_option
if self.main_option:
self.multiple_values = True
# usage of this option
def usage(self):
return "--{}{}: {}{}{}{}{}".format(self.long_name,
(lambda: " -" + self.short_name if self.short_name else "")(),
self.description,
(lambda: " {value needed}" if self.needs_value else "")(),
(lambda: " {{default: {}}}".format(
self.default_value) if self.default_value is not None else "")(),
(lambda: " {multiple values possible}" if self.multiple_values else "")(),
(lambda: " {{{}}}".format(
self.mapper().get_expected_input_format()) if self.mapper
and self.mapper().get_expected_input_format() else "")())
# check if the option has a value
def has_value(self):
return self.value is not None and self.value != []
# check if the input of the option was complete
def complete(self):
# check if the option is in use, and check also the required state
# in use means, that the check function detected the option in the inputted arguments
# if it's not in the arguments, but it's required and have no default values, it's a invalid
# input
if not self.in_use:
if self.required:
# set the default_value if it's required, but not given by user.
# because we have a default_value we don't need an input. #github issue #2
if self.default_value is not None:
if not self.has_value():
self.value = self.default_value
return self.has_value()
return False
return True
# if we don't have a value, but we have a default value, we set it to the value
# and return true
if self.default_value is not None and not self.has_value():
self.value = self.default_value
# if we don't need a value
if not self.needs_value:
return True
# check if the value isn't empty
if self.value is not None and self.value != []:
return True
return False
|
import time
import numpy as np
import torch
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.evaluators.client import FlatlandRemoteClient
from flatland.utils.rendertools import RenderTool
from torch import load
#####################################################################
# Settings
#####################################################################
from serpentrain.controllers.wait_if_occupied_anywhere_controller import WaitIfOccupiedAnywhereController
from serpentrain.models.linear_model import LinearModel
from serpentrain.reinforcement_learning.distributed.utils import create_model, create_controller
RENDER = True
USE_GPU = False
DQN_MODEL = False
CHECKPOINT_PATH = './checkpoints/submission/snapshot-20201104-2201-epoch-1.pt'
#####################################################################
# Define which device the controller should run on, if supported by
# the controller
#####################################################################
if USE_GPU and torch.cuda.is_available():
device = torch.device("cuda:0")
print("๐ Using GPU")
else:
device = torch.device("cpu")
print("๐ข Using CPU")
#####################################################################
# Instantiate a Remote Client
#####################################################################
remote_client = FlatlandRemoteClient()
#####################################################################
# Instantiate your custom Observation Builder
#
# You can build your own Observation Builder by following
# the example here :
# https://gitlab.aicrowd.com/flatland/flatland/blob/master/flatland/envs/observations.py#L14
#####################################################################
obs_tree_depth = 2
obs_max_path_depth = 20
predictor = ShortestPathPredictorForRailEnv(obs_max_path_depth)
obs_builder = TreeObsForRailEnv(max_depth=obs_tree_depth, predictor=predictor)
# Or if you want to use your own approach to build the observation from the env_step,
# please feel free to pass a DummyObservationBuilder() object as mentioned below,
# and that will just return a placeholder True for all observation, and you
# can build your own Observation for all the agents as your please.
# my_observation_builder = DummyObservationBuilder()
#####################################################################
# Define your custom controller
#
# which can take an observation, and the number of agents and
# compute the necessary action for this step for all (or even some)
# of the agents
#####################################################################
# Calculate the state size given the depth of the tree observation and the number of features
if DQN_MODEL:
n_features_per_node = obs_builder.observation_dim
n_nodes = sum([np.power(4, i) for i in range(obs_tree_depth + 1)])
state_size = n_features_per_node * n_nodes
model = LinearModel(device, state_size, [], 5)
checkpoint = load(CHECKPOINT_PATH, map_location=device)
state_dict_model = checkpoint.get("model")
model = create_model(state_dict=state_dict_model, device=device)
controller = create_controller(model=model)
print("Created model")
else:
controller = WaitIfOccupiedAnywhereController()
#####################################################################
# Main evaluation loop
#
# This iterates over an arbitrary number of env evaluations
#####################################################################
evaluation_number = 0
print("Starting evaluation")
while True:
evaluation_number += 1
# Switch to a new evaluation environment
#
# a remote_client.env_create is similar to instantiating a
# RailEnv and then doing a env.reset()
# hence it returns the first observation from the
# env.reset()
#
# You can also pass your custom observation_builder object
# to allow you to have as much control as you wish
# over the observation of your choice.
time_start = time.time()
observation, _ = remote_client.env_create(
obs_builder_object=obs_builder
)
env_creation_time = time.time() - time_start
if not observation:
#
# If the remote_client returns False on a `env_create` call,
# then it basically means that your agent has already been
# evaluated on all the required evaluation environments,
# and hence its safe to break out of the main evaluation loop
break
if RENDER:
env_renderer = RenderTool(remote_client.env)
env_renderer.reset()
print("Evaluation Number : {}".format(evaluation_number))
#####################################################################
# Access to a local copy of the environment
#
#####################################################################
# Note: You can access a local copy of the environment
# by using :
# remote_client.env
#
# But please ensure to not make any changes (or perform any action) on
# the local copy of the env, as then it will diverge from
# the state of the remote copy of the env, and the observations and
# rewards, etc will behave unexpectedly
#
# You can however probe the local_env instance to get any information
# you need from the environment. It is a valid RailEnv instance.
local_env = remote_client.env
number_of_agents = len(local_env.agents)
# Now we enter into another infinite loop where we
# compute the actions for all the individual steps in this episode
# until the episode is `done`
#
# An episode is considered done when either all the agents have
# reached their target destination
# or when the number of time steps has exceed max_time_steps, which
# is defined by :
#
# max_time_steps = int(4 * 2 * (env.width + env.height + 20))
#
time_taken_by_controller = []
time_taken_per_step = []
steps = 0
print("resetting round for controller")
time_start = time.time()
controller.start_of_round(obs=observation, env=local_env)
time_taken = time.time() - time_start
time_taken_by_controller.append(time_taken)
print("starting episode")
while True:
#####################################################################
# Evaluation of a single episode
#
#####################################################################
# Compute the action for this step by using the previously
# defined controller
time_start = time.time()
action, _ = controller.act(observation)
time_taken = time.time() - time_start
time_taken_by_controller.append(time_taken)
# Perform the chosen action on the environment.
# The action gets applied to both the local and the remote copy
# of the environment instance, and the observation is what is
# returned by the local copy of the env, and the rewards, and done and info
# are returned by the remote copy of the env
time_start = time.time()
observation, all_rewards, done, _ = remote_client.env_step(action)
steps += 1
time_taken = time.time() - time_start
time_taken_per_step.append(time_taken)
if RENDER:
env_renderer.render_env(show=True, show_observations=True, show_predictions=True)
if done['__all__']:
print("Reward : ", sum(list(all_rewards.values())))
#
# When done['__all__'] == True, then the evaluation of this
# particular Env instantiation is complete, and we can break out
# of this loop, and move onto the next Env evaluation
break
np_time_taken_by_controller = np.array(time_taken_by_controller)
np_time_taken_per_step = np.array(time_taken_per_step)
print("=" * 100)
print("=" * 100)
print("Evaluation Number : ", evaluation_number)
print("Current Env Path : ", remote_client.current_env_path)
print("Env Creation Time : ", env_creation_time)
print("Number of Steps : ", steps)
print("Mean/Std of Time taken by Controller : ", np_time_taken_by_controller.mean(),
np_time_taken_by_controller.std())
print("Mean/Std of Time per Step : ", np_time_taken_per_step.mean(), np_time_taken_per_step.std())
print("=" * 100)
print("Evaluation of all environments complete...")
########################################################################
# Submit your Results
#
# Please do not forget to include this call, as this triggers the
# final computation of the score statistics, video generation, etc
# and is necesaary to have your submission marked as successfully evaluated
########################################################################
print(remote_client.submit())
|
from collections import defaultdict
from heapq import heapify, heappop
class Solution:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
num_rows = len(matrix)
num_cols = len(matrix[0])
indices = defaultdict(list)
seen = []
for i in range(num_rows):
for j in range(num_cols):
indices[matrix[i][j]].append((i, j))
seen.append(-matrix[i][j])
dp = [x[:] for x in [[1] * num_cols] * num_rows]
heapify(seen)
while seen:
n = -heappop(seen)
if n in indices:
for i, j in indices[n]:
if i > 0 and matrix[i-1][j] > n:
dp[i][j] = max(dp[i-1][j]+1, dp[i][j])
if j > 0 and matrix[i][j-1] > n:
dp[i][j] = max(dp[i][j-1]+1, dp[i][j])
if i < num_rows-1 and matrix[i+1][j] > n:
dp[i][j] = max(dp[i+1][j]+1, dp[i][j])
if j < num_cols-1 and matrix[i][j+1] > n:
dp[i][j] = max(dp[i][j+1]+1, dp[i][j])
return max([max(row) for row in dp])
|
from lxml import etree
parser = etree.HTMLParser()
tree = etree.parse('test.html', parser)
titles = tree.xpath('/html/head/title')
# This is a demo website
if len(titles) > 0:
print( titles[0].text )
html = '''
<div>
<ul>
<li class="item1"><a href="https://www.google.com"> ๅคๅฅใ</a></li>
<li class="item2"><a href="https://www.pchome.com.tw"> PCHOME่ณผ็ฉๅใ</a></li>
<li class="item3"><a href="https://www.jd.com"> ไบฌๆฑๅๅ </a></li>
</ul>
</div>
'''
tree = etree.HTML( html )
target = tree.xpath("//li[@class='item2']")
# https://www.pchome.com.tw PCHOME่ณผ็ฉๅ
if len(target) > 0:
print( target[0][0].get('href'), target[0][0].text ) |
import os
import parser
import unittest
class TestParser(unittest.TestCase):
def test_membros_ativos(self):
self.maxDiff = None
expected = {
"reg": "128971",
"name": "ACHILES DE JESUS SIQUARA FILHO",
"role": "PROCURADOR DE JUSTICA",
"type": "membro",
"workplace": "PROCURADORIA DE JUSTICA CIVEL",
"active": True,
"income": {
"total": 79435.09,
"wage": 35462.22,
"other": {
"total": 42672.87,
"trust_position": 0.0,
"others_total": 42672.87,
"others": {
"Gratificaรงรฃo Natalina": 0.0,
"Fรฉrias (1/3 constitucional)": 35462.22,
"Abono de Permanรชncia": 4964.71,
"Substituiรงรฃo de Membros": 2245.94,
"Serviรงo Extraordinรกrio": 0.0,
"Substituiรงรฃo de Funรงรฃo": 0.0,
"Gratificaรงรฃo de Serviรงos Especiais": 0.0,
"Diferenรงa de Entrรขncia": 0.0,
},
},
"perks": {
"total": 1300.0,
"food": 1300.0,
"transportation": 0.0,
"housing_aid": 0.0,
"birth_aid": 0.0,
"subsistence": 0.0,
},
},
"discounts": {
"total": 22625.93,
"prev_contribution": 4964.71,
"ceil_retention": 0.0,
"income_tax": 17661.22,
},
}
files = (
"./output_test/Membros ativos-01-2020.ods",
"./output_test/Membros ativos-Verbas Indenizatorias-01-2020.ods",
)
employees = parser.parse(files)
# Verificaรงรตes
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
if __name__ == "__main__":
unittest.main()
|
from scrapy import log
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import SitemapSpider
from walmartproducts.items import WalmartproductsItem
class MySpider(SitemapSpider):
name = 'spiderSM'
log.ScrapyFileLogObserver(open('SiteMaplog.log','a'), level=log.INFO).start()
handle_httpstatus_list = [404, 500, 503, 504, 400, 408, 403]
def __init__(self, *args, **kwargs):
super(MySpider, self).__init__(*args, **kwargs)
self.sitemap_urls = [kwargs.get('smf')]
def parse(self, response):
item = WalmartproductsItem()
if response.status in self.handle_httpstatus_list:
item['id'] = response.url.split('/')[-1]
item['name'] = response.url.split('/')[-2].replace("-"," ")
return item
hxs = HtmlXPathSelector(response)
try:
item['id'] = response.url.split('/')[-1]
item['url'] = response.url
item['src'] = 'walmart'
item['name'] = hxs.select('*//h1[@class="productTitle"]/text()').extract()[0]
item['rating'] = 0
cat = ''
try:
for li in hxs.select('//*[@id="crumbs"]/li') :
cat = cat+li.select('.//text()').extract()[0]+': '
item['category'] = cat
except Exception as e:
log.msg(str(e)+' '+response.url, level=log.ERROR)
log.err()
try:
item['image'] = hxs.select('*//div[@class="columnOne"]/div[@class="BoxContent"]//a[@id="Zoomer"]/@href')[0].extract()
except Exception as e:
log.msg(str(e)+' '+response.url, level=log.ERROR)
log.err()
try:
item['price'] = float(((hxs.select('*//div[@class="columnTwo"]//span[@class="bigPriceText1"]/text()')[0].extract()+hxs.select('*//div[@class="columnTwo"]//span[@class="smallPriceText1"]/text()')[0].extract())[1:]).replace(",",""))
except Exception as e:
item['price'] = float((hxs.select('*//div[@class="columnTwo"]//span[@class="SubmapPrice"]/text()')[0].extract()[1:]).replace(",",""))
log.msg(str(e)+' '+response.url, level=log.ERROR)
log.err()
#rating = hxs.select('*//div[@class="columnTwo"]//div[@class="CustomerRatings"]//img[contains(@src, "rating.png")]/@title')
#if len(rating) is not 0:
# item['rating'] = float(rating[0].extract().split(" ")[0])
#item['description'] = node.xpath('description').extract()
return item
except Exception as e:
log.msg(str(e)+' '+response.url, level=log.ERROR)
log.err()
return item
|
"""delete duplicate node
exp: 1 1 2 3 3
1 2 3
"""
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def delete_duplicate(head):
cur = head
while cur:
while cur.next and cur.next.val == cur.val:
cur.next = cur.next.next
cur = cur.next # not duplicate of current node, move to next node
return head
if __name__ == "__main__":
node1 = ListNode(1)
node2 = ListNode(1)
node3 = ListNode(2)
node4 = ListNode(3)
node5 = ListNode(3)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
new_head = delete_duplicate(node1)
cur = new_head
while cur:
print(cur.val, "->", end=" ")
cur = cur.next |
# young 124ms, 166
n = int(input())
a=[]
for i in range(1,n+1) :
j = str(i)
t = j.count('3') + j.count('6') + j.count('9')
if t >0 :
j = '-'*t
print(j, end=' ') |
import numpy as np
import random
from ..gui.Objects import Material, MAX_MEMBER_LENGTHS
from ..lib.genotype import toGenotype, fromGenotype, stdbinToGray
class Bridge():
def __init__(self, state, node_weight=10, street_weight=20):
state = state.clone()
street_nodes = [node for node in state.nodes if (node in set(
[node for member in state.members if member.material == Material.STREET for node in [member.a, member.b]]))]
fixed_nodes = [node
for node in state.nodes if node not in street_nodes and (node.v_support or node.h_support)]
genotype_nodes = [
node for node in state.nodes if node not in street_nodes + fixed_nodes]
nodes = [*street_nodes, *fixed_nodes, *genotype_nodes]
assert len(nodes) > 0, "At least 1 Node is required"
nodes_array = np.array([[node.x, 255-node.y]
for node in genotype_nodes])
members_idx = np.array(
[[*sorted([nodes.index(member.a), nodes.index(member.b)])] for member in state.members])
members_array = np.full((len(nodes), len(nodes)), False)
if len(members_idx) > 0:
members_array[members_idx[:, 0], members_idx[:, 1]] = True
materials_idx = np.array([[*sorted([nodes.index(member.a), nodes.index(member.b)])]
for member in state.members if member.material == Material.STEEL])
materials_array = np.full((len(nodes), len(nodes)), False)
if len(materials_idx) > 0:
materials_array[materials_idx[:, 0], materials_idx[:, 1]] = True
self.supports = np.array([[i, int(node.h_support), int(node.v_support)] for i, node in enumerate(
nodes) if node.v_support or node.h_support], dtype=int).reshape((-1, 3))
self.fixed_nodes_pos = np.array(
[[node.x, 255-node.y] for node in street_nodes + fixed_nodes]).reshape((-1, 2))
self.street_members_idx = np.array(
[[*sorted([nodes.index(member.a), nodes.index(member.b)]), member.material.value] for member in state.members if member.a in street_nodes and member.b in street_nodes])
# Genotype
self.genotype = toGenotype(nodes_array, members_array, materials_array)
node_loads = np.hstack([np.arange(len(nodes)).reshape((-1, 1)), np.full((len(nodes), 2), [
0, -node_weight])]).reshape((-1, 3))
street_loads = np.array([[nodes.index(node), 0, -street_weight * member.length() / 2]
for member in state.members for node in [member.a, member.b] if member.material == Material.STREET]).reshape((-1, 3))
self.loads = np.vstack([node_loads, street_loads])
self.nodes, self.members, self.materials = self._unpackGenotype(
self.genotype)
def _unpackGenotype(self, genotype):
_nodes, _members, _materials = fromGenotype(*genotype)
nodes = np.vstack([self.fixed_nodes_pos, _nodes])
_members[self.street_members_idx[:, 0],
self.street_members_idx[:, 1]] = False
members = np.argwhere(_members)
tmat = np.transpose(nodes[members], axes=(0, 2, 1))
submat = np.subtract.reduce(tmat, axis=2)
member_length = np.hypot.reduce(
submat, axis=1, dtype=float).reshape(-1, 1)
materials = (_materials[_members]).astype(int).reshape(-1, 1)+1
max_member_lengths = np.array([MAX_MEMBER_LENGTHS[
int(m)] for m in materials]).reshape(-1, 1)
allowed_members = np.argwhere(
member_length <= max_member_lengths)[:, 0]
materials = np.vstack([self.street_members_idx[:, 2].reshape(-1, 1).astype(
int), materials[allowed_members]])
members = np.vstack(
[self.street_members_idx[:, 0:2], members[allowed_members]]).astype(int)
return nodes, members, materials
def setGenotype(self, genotype):
self.genotype = genotype
self.nodes, self.members, self.materials = self._unpackGenotype(
self.genotype)
def getGenotype(self):
return self.genotype
def randomGenotype(self):
_nodes, _members, _materials = self.genotype
# nodes = np.array(
# list(map(lambda x: round(random.uniform(0, 255)*4)/4, range(len(_nodes)))))
members = np.random.random(len(_members)) >= 0.5
materials = np.random.random(len(_materials)) >= 0.5
return (_nodes, members, materials)
|
import cv2
import numpy as np
import time
import datetime
from config import conf
import os
img = cv2.imread(conf.sample_folder + 'slika2.png')
img_hsv=cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# lower mask (0-10)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(img_hsv, lower_red, upper_red)
output_img = cv2.bitwise_and(img_hsv,img_hsv, mask= mask)
cv2.imshow("obradjena", output_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
def solution(n):
ans = 0
if n == 1 or n == 2:
return 1
if n%2==0:
return solution(n/2)
return solution((n-1)/2) + 1
|
# Generated by Django 3.1.7 on 2021-03-19 05:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0003_users_last_login'),
]
operations = [
migrations.RenameField(
model_name='userchannel',
old_name='channel_id',
new_name='channel',
),
migrations.RenameField(
model_name='userchannel',
old_name='user_id',
new_name='user',
),
]
|
from django.db import models
# Create your models here.
class Filier(models.Model):
filier = models.CharField(max_length=80)
def __str__(self):
return self.filier
class Students(models.Model):
f_name = models.CharField(max_length=250)
l_name = models.CharField(max_length=250)
filier = models.ForeignKey(Filier,name='filiers',on_delete=models.CASCADE)
def __str__(self):
return f"{self.f_name} {self.l_name}" |
from conans import ConanFile, AutoToolsBuildEnvironment, RunEnvironment, tools
import os
class PySideConan(ConanFile):
name = "PySide"
description = "PySide is a high dynamic-range (HDR) image file format developed by Industrial Light & " \
"Magic for use in computer imaging applications."
version = "2.2.0"
license = "BSD"
url = "https://github.com/jgsogo/conan-openexr.git"
settings = "os", "compiler", "build_type", "arch", "cppstd", "python"
exports = "*.tgz"
def requirements(self):
self.requires('Qt/5.6.1@aswf/vfx2018')
def source(self):
base = "PySide2-Maya2018Update5.tgz"
if os.path.exists(base):
self.output.info("Found local source tarball {}".format(base))
tools.unzip(base)
else:
url = "https://www.autodesk.com/content/dam/autodesk/www/Company/files/" + base
self.output.warn("Downloading source tarball {}".format(url))
tools.get(url)
def build(self):
# with tools.environment_append(self.env):
# pypath = os.path.join(self.package_folder, 'lib64/python2.7/site-packages')
# os.makedirs(pypath)
# with tools.environment_append({'PYTHONPATH': pypath}):
self.run('python setup.py build install --prefix {}'.format(self.package_folder),
cwd='pyside-setup', run_environment=True)
def package(self):
self.copy("*.egg")
def package_info(self):
self.env_info.PYTHONPATH.append(os.path.join(self.package_folder, 'lib64/python2.7/site-packages'))
|
# Generated by Django 3.1.3 on 2020-11-16 00:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tickets', '0016_auto_20201116_0044'),
]
operations = [
migrations.AlterField(
model_name='seat',
name='movie',
field=models.CharField(choices=[('Black Panther', 'Black Panther'), ('Moonligh', 'Moonlight'), ('Aladdin', 'Aladdin')], max_length=150),
),
migrations.AlterField(
model_name='ticket',
name='movie',
field=models.CharField(choices=[('Black Panther', 'Black Panther'), ('Moonligh', 'Moonlight'), ('Aladdin', 'Aladdin')], max_length=150),
),
]
|
import os
from setuptools import setup, find_packages
# exec (open('version.py').read())
setup(name='stackview',
version='0.0.1',
description='Stack(img) for viewing slices of ndarrays w ndim > 2.',
# url='https://github.com/maweigert/spimagine',
author='Coleman Broaddus',
author_email='broaddus@mpi-cbg.de',
license='BSD 3-Clause License',
packages=['stackview'],
# packages=find_packages(),
# zip_safe=False)
)
|
to_int = {'I' : 1,
'V' : 5,
'X' : 10,
'L' : 50,
'C' : 100,
'D' : 500,
'M' : 1000
}
# IV -> smaller, larger -> return larger - smaller
# XI -> larger, smaller -> return larger + smaller -> add the first, then
# vv
def roman_to_int(roman):
if len(roman) == 1:
return to_int[roman]
elif len(roman) < 1:
return -1
current_ptr = 0
next_ptr = 1
# return value initialize
res = 0
# case 0: when the next_ptr >= len(roman): return current_ptr
while True:
if current_ptr >= len(roman):
return res
elif next_ptr >= len(roman) and current_ptr < len(roman):
res += to_int[roman[current_ptr]]
current_ptr += 1
# case 1: when current_ptr > next_ptr -> just add to the result the value at the current_ptr, move current_ptr one step forward and the next_ptr another step forward
elif to_int[roman[current_ptr]] >= to_int[roman[next_ptr]]:
res += to_int[roman[current_ptr]]
current_ptr += 1
next_ptr += 1
# case 2: when current_ptr < next_ptr -> temp = next_ptr - current_ptr; res += temp; current_ptr = next_ptr + 1; next_ptr = next_ptr + 2
elif to_int[roman[current_ptr]] < to_int[roman[next_ptr]]:
temp = to_int[roman[next_ptr]] - to_int[roman[current_ptr]]
res += temp
current_ptr += 2
next_ptr += 2
return res
roman = 'CCCXLII'
print (roman_to_int(roman))
# <<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def int_to_roman (integer):
parts = []
for letter, value in TABLE:
while value <= integer:
integer -= value
parts.append(letter)
return ''.join(parts)
TABLE=[['M',1000],['CM',900],['D',500],['CD',400],['C',100],['XC',90],['L',50],['XL',40],['X',10],['IX',9],['V',5],['IV',4],['I',1]]
a = 342
print (int_to_roman(a))
# integer = 342
# 342 - 100 = 242 -> C |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import utils
# pylint: disable=arguments-differ
def initialize_weight(x):
nn.init.xavier_uniform_(x.weight)
if x.bias is not None:
nn.init.constant_(x.bias, 0)
class FeedForwardNetwork(nn.Module):
def __init__(self, hidden_size, filter_size, dropout_rate):
super(FeedForwardNetwork, self).__init__()
self.layer1 = nn.Linear(hidden_size, filter_size)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout_rate)
self.layer2 = nn.Linear(filter_size, hidden_size)
initialize_weight(self.layer1)
initialize_weight(self.layer2)
def forward(self, x):
x = self.layer1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer2(x)
return x
class GLU(nn.Module):
def __init__(self, in_features, dropout_rate):
super(GLU, self).__init__()
self.sigm = nn.Sigmoid()
self.W = nn.Linear(in_features, out_features=512, bias=True)
self.V = nn.Linear(in_features, out_features=512, bias=True)
initialize_weight(self.W)
initialize_weight(self.V)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
x = self.W(x) * self.sigm(self.V(x))
x = self.dropout(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, dropout_rate, head_size=8):
super(MultiHeadAttention, self).__init__()
self.head_size = head_size
# FIXME: is this correct? Why are we not counting in attention heads
self.att_size = att_size = hidden_size // head_size
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, head_size * att_size, bias=False)
self.linear_k = nn.Linear(hidden_size, head_size * att_size, bias=False)
self.linear_v = nn.Linear(hidden_size, head_size * att_size, bias=False)
initialize_weight(self.linear_q)
initialize_weight(self.linear_k)
initialize_weight(self.linear_v)
self.att_dropout = nn.Dropout(dropout_rate)
self.output_layer = nn.Linear(head_size * att_size, hidden_size,
bias=False)
initialize_weight(self.output_layer)
def forward(self, q, k, v, mask, cache=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
# head_i = Attention(Q(W^Q)_i, K(W^K)_i, V(W^V)_i)
q = self.linear_q(q).view(batch_size, -1, self.head_size, d_k)
if cache is not None and 'encdec_k' in cache:
k, v = cache['encdec_k'], cache['encdec_v']
else:
k = self.linear_k(k).view(batch_size, -1, self.head_size, d_k)
v = self.linear_v(v).view(batch_size, -1, self.head_size, d_v)
if cache is not None:
cache['encdec_k'], cache['encdec_v'] = k, v
q = q.transpose(1, 2) # [b, h, q_len, d_k]
v = v.transpose(1, 2) # [b, h, v_len, d_v]
k = k.transpose(1, 2).transpose(2, 3) # [b, h, d_k, k_len]
# Scaled Dot-Product Attention.
# Attention(Q, K, V) = softmax((QK^T)/sqrt(d_k))V
q.mul_(self.scale)
x = torch.matmul(q, k) # [b, h, q_len, k_len]
x.masked_fill_(mask.unsqueeze(1), -1e9)
x = torch.softmax(x, dim=3)
x = self.att_dropout(x)
x = x.matmul(v) # [b, h, q_len, attn]
x = x.transpose(1, 2).contiguous() # [b, q_len, h, attn]
x = x.view(batch_size, -1, self.head_size * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x
# See https://discuss.pytorch.org/t/using-optimised-depthwise-convolutions/11819/14
# Visual explanation https://towardsdatascience.com/a-basic-introduction-to-separable-convolutions-b99ec3102728
# this impl considers depthwise multiplier K=1
# FIXME: Please optimize this to use less memory
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels,
bias=bias)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)
initialize_weight(self.conv1)
initialize_weight(self.pointwise)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Swish(nn.Module):
def __init__(self, beta=1.0):
super(Swish, self).__init__()
self.beta = torch.tensor(beta)
def forward(self, x):
return x * F.sigmoid(self.beta * x)
class EncoderLayer(nn.Module):
def __init__(self, hidden_size, filter_size, dropout_rate):
super(EncoderLayer, self).__init__()
# dropout applied uniformly after each layer
self.self_attention_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.glu = GLU(hidden_size, dropout_rate)
self.self_attention_dropout = nn.Dropout(dropout_rate)
self.self_attention = MultiHeadAttention(hidden_size, dropout_rate)
self.ffn_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.ffn = FeedForwardNetwork(hidden_size, filter_size, dropout_rate)
self.ffn_dropout = nn.Dropout(dropout_rate)
self.conv1x1_2048 = nn.Linear(hidden_size, 2048)
self.relu_a = nn.ReLU()
self.relu_b = nn.ReLU()
self.dropout_1 = nn.Dropout(dropout_rate)
self.dropout_2 = nn.Dropout(dropout_rate)
self.dropout_3 = nn.Dropout(dropout_rate)
self.dropout_b = nn.Dropout(dropout_rate)
self.conv3x1_256 = nn.Conv2d(in_channels=512, out_channels=256,
kernel_size=(3, 1), stride=1, padding=(1, 0), bias=False)
self.layer_norm = nn.LayerNorm(2048, eps=1e-6)
self.sep_conv_9x1 = SeparableConv2d(in_channels=2048, out_channels=256,
kernel_size=(9, 1), padding=(4, 0))
initialize_weight(self.conv1x1_2048)
initialize_weight(self.conv3x1_256)
# 20 x 46 x 512
# 24 x 36 x 512
# batch x words? x emb
def forward(self, x, mask): # pylint: disable=arguments-differ
batch_size = x.shape[0]
sentence_len = x.shape[1]
# batch * emb * 512
y = self.self_attention_norm(x) # emb
y = self.glu(y) # -> 512
y = self.dropout_1(x) # dropout, residual, norm
x = x + y # residual connection (can only add if shapes are the same)
# -----
y = self.ffn_norm(x)
# L branch
ya = self.conv1x1_2048(y)
ya = self.relu_a(ya)
# R branch
yb = self.conv3x1_256(y.view(1, 512, sentence_len, batch_size)) #
yb = yb.view(batch_size, sentence_len, 256)
yb = self.relu_b(yb)
# Merge, note that channels of yb fit 8 times in ya
yb = yb.repeat(1, 1, 8)
y = ya + yb
y = self.dropout_2(y)
y = self.layer_norm(y)
y = self.sep_conv_9x1(y.view(1, 2048, sentence_len, batch_size)) # -> 256 channels
y = y.view(batch_size, sentence_len, 256)
y = y.repeat(1, 1, 2)
# Careful! dropout on something with 'duplicated memory'
y = self.dropout_3(y)
x = x + y
# Transformer self-att
y = self.self_attention_norm(x)
y = self.self_attention(y, y, y, mask)
y = self.self_attention_dropout(y)
x = x + y
y = self.ffn_norm(x)
y = self.ffn(y)
y = self.ffn_dropout(y)
x = x + y
return x
class DecoderLayer(nn.Module):
def __init__(self, hidden_size, filter_size, dropout_rate):
super(DecoderLayer, self).__init__()
self.self_attention_1 = MultiHeadAttention(hidden_size, dropout_rate, head_size=16)
self.enc_dec_attention_1 = MultiHeadAttention(hidden_size, dropout_rate)
self.self_attention_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.self_attention = MultiHeadAttention(hidden_size, dropout_rate)
self.self_attention_dropout = nn.Dropout(dropout_rate)
self.enc_dec_attention_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.enc_dec_attention = MultiHeadAttention(hidden_size, dropout_rate)
self.enc_dec_attention_dropout = nn.Dropout(dropout_rate)
self.ffn_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.ffn = FeedForwardNetwork(hidden_size, filter_size, dropout_rate)
self.ffn_dropout = nn.Dropout(dropout_rate)
self.layer1 = nn.Linear(hidden_size, filter_size)
self.swish = Swish()
self.dropout = nn.Dropout(dropout_rate)
self.layer2 = nn.Linear(filter_size, hidden_size)
initialize_weight(self.layer1)
initialize_weight(self.layer2)
self.conv1x1_2048 = nn.Linear(hidden_size, 2048)
self.relu_a = nn.ReLU()
self.relu_b = nn.ReLU()
self.dropout_1 = nn.Dropout(dropout_rate)
self.dropout_2 = nn.Dropout(dropout_rate)
self.dropout_3 = nn.Dropout(dropout_rate)
self.dropout_b = nn.Dropout(dropout_rate)
self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=1e-6)
self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=1e-6)
self.layer_norm_3 = nn.LayerNorm(1024, eps=1e-6)
self.conv3x1_256 = nn.Conv2d(in_channels=512, out_channels=256,
kernel_size=(3, 1), stride=1, padding=(1, 0), bias=False)
self.layer_norm = nn.LayerNorm(2048, eps=1e-6)
self.sep_conv11x1_1024 = SeparableConv2d(in_channels=512, out_channels=1024,
kernel_size=(11, 1), padding=(5, 0))
self.sep_conv_7x1_256 = SeparableConv2d(in_channels=512, out_channels=256,
kernel_size=(7, 1), padding=(3, 0))
self.sep_conv_7x1_512 = SeparableConv2d(in_channels=1024, out_channels=512,
kernel_size=(7, 1), padding=(3, 0))
initialize_weight(self.conv1x1_2048)
initialize_weight(self.conv3x1_256)
def forward(self, x, enc_output, self_mask, i_mask, cache):
assert enc_output is not None
batch_size = x.shape[0]
sentence_len = x.shape[1]
y = self.layer_norm_1(x)
# TODO: change from 8 -> 16 self attention heads
ya = self.self_attention_1(y, y, y, self_mask)
if enc_output is not None:
yb = self.enc_dec_attention_1(y, enc_output, enc_output, i_mask, cache)
y = ya + yb
else:
y = ya
# -----
x = x + y
x = self.dropout_1(x)
y = self.layer_norm_2(x)
# L branch
ya = self.sep_conv11x1_1024(y.view(1, 512, sentence_len, batch_size))
ya = ya.view(batch_size, sentence_len, 1024)
ya = self.relu_a(ya)
# R branch
yb = self.sep_conv_7x1_256(y.view(1, 512, sentence_len, batch_size)) #
yb = yb.view(batch_size, sentence_len, 256)
# Merge, note that channels of yb fit 8 times in ya
yb = yb.repeat(1, 1, 4)
y = ya + yb
y = self.dropout_2(y)
y = self.layer_norm_3(y)
y = self.sep_conv_7x1_512(y.view(1, 1024, sentence_len, batch_size)) # -> 256 channels
y = y.view(batch_size, sentence_len, 512)
y = self.dropout_3(y)
x = x + y
# --- original decoder start
y = self.self_attention_norm(x)
y = self.self_attention(y, y, y, self_mask)
y = self.self_attention_dropout(y)
x = x + y
if enc_output is not None:
y = self.enc_dec_attention_norm(x)
y = self.enc_dec_attention(y, enc_output, enc_output, i_mask,
cache)
y = self.enc_dec_attention_dropout(y)
x = x + y
y = self.ffn_norm(x)
# --- Inserted swish activation function
x = self.layer1(x)
x = self.swish(x)
x = self.dropout(x)
x = self.layer2(x)
# ---
y = self.ffn_dropout(y)
x = x + y
return x
class Encoder(nn.Module):
def __init__(self, hidden_size, filter_size, dropout_rate, n_layers):
super(Encoder, self).__init__()
encoders = [EncoderLayer(hidden_size, filter_size, dropout_rate)
for _ in range(n_layers)]
self.layers = nn.ModuleList(encoders)
self.last_norm = nn.LayerNorm(hidden_size, eps=1e-6)
def forward(self, inputs, mask):
encoder_output = inputs
for enc_layer in self.layers:
encoder_output = enc_layer(encoder_output, mask)
return self.last_norm(encoder_output)
class Decoder(nn.Module):
def __init__(self, hidden_size, filter_size, dropout_rate, n_layers):
super(Decoder, self).__init__()
decoders = [DecoderLayer(hidden_size, filter_size, dropout_rate)
for _ in range(n_layers)]
self.layers = nn.ModuleList(decoders)
self.last_norm = nn.LayerNorm(hidden_size, eps=1e-6)
def forward(self, targets, enc_output, i_mask, t_self_mask, cache):
decoder_output = targets
for i, dec_layer in enumerate(self.layers):
layer_cache = None
if cache is not None:
if i not in cache:
cache[i] = {}
layer_cache = cache[i]
decoder_output = dec_layer(decoder_output, enc_output,
t_self_mask, i_mask, layer_cache)
return self.last_norm(decoder_output)
class EvolvedTransformer(nn.Module):
"""
Big/Deep models have dropout 0.3, all others with input emb 768 have 0.2
Decoding beam size: 4, see So et al. 2019 for more details.
"""
def __init__(self, i_vocab_size, t_vocab_size,
n_layers=6,
hidden_size=512,
filter_size=2048,
dropout_rate=0.2,
share_target_embedding=True,
has_inputs=True,
src_pad_idx=None,
trg_pad_idx=None):
super(EvolvedTransformer, self).__init__()
self.hidden_size = hidden_size
self.emb_scale = hidden_size ** 0.5
self.has_inputs = has_inputs
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
self.t_vocab_embedding = nn.Embedding(t_vocab_size, hidden_size)
nn.init.normal_(self.t_vocab_embedding.weight, mean=0,
std=hidden_size**-0.5)
self.t_emb_dropout = nn.Dropout(dropout_rate)
self.decoder = Decoder(hidden_size, filter_size,
dropout_rate, n_layers)
if has_inputs:
if not share_target_embedding:
self.i_vocab_embedding = nn.Embedding(i_vocab_size,
hidden_size)
nn.init.normal_(self.i_vocab_embedding.weight, mean=0,
std=hidden_size**-0.5)
else:
self.i_vocab_embedding = self.t_vocab_embedding
self.i_emb_dropout = nn.Dropout(dropout_rate)
self.encoder = Encoder(hidden_size, filter_size,
dropout_rate, n_layers)
# For positional encoding
num_timescales = self.hidden_size // 2
max_timescale = 10000.0
min_timescale = 1.0
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
max(num_timescales - 1, 1))
inv_timescales = min_timescale * torch.exp(
torch.arange(num_timescales, dtype=torch.float32) *
-log_timescale_increment)
self.register_buffer('inv_timescales', inv_timescales)
def forward(self, inputs, targets):
enc_output, i_mask = None, None
if self.has_inputs:
i_mask = utils.create_pad_mask(inputs, self.src_pad_idx)
enc_output = self.encode(inputs, i_mask)
t_mask = utils.create_pad_mask(targets, self.trg_pad_idx)
target_size = targets.size()[1]
t_self_mask = utils.create_trg_self_mask(target_size,
device=targets.device)
return self.decode(targets, enc_output, i_mask, t_self_mask, t_mask)
def encode(self, inputs, i_mask):
# Input embedding
input_embedded = self.i_vocab_embedding(inputs)
input_embedded.masked_fill_(i_mask.squeeze(1).unsqueeze(-1), 0)
input_embedded *= self.emb_scale
input_embedded += self.get_position_encoding(inputs)
input_embedded = self.i_emb_dropout(input_embedded)
return self.encoder(input_embedded, i_mask)
def decode(self, targets, enc_output, i_mask, t_self_mask, t_mask,
cache=None):
# target embedding
target_embedded = self.t_vocab_embedding(targets)
target_embedded.masked_fill_(t_mask.squeeze(1).unsqueeze(-1), 0)
# Shifting
target_embedded = target_embedded[:, :-1]
target_embedded = F.pad(target_embedded, (0, 0, 1, 0))
target_embedded *= self.emb_scale
target_embedded += self.get_position_encoding(targets)
target_embedded = self.t_emb_dropout(target_embedded)
# decoder
decoder_output = self.decoder(target_embedded, enc_output, i_mask,
t_self_mask, cache)
# linear
output = torch.matmul(decoder_output,
self.t_vocab_embedding.weight.transpose(0, 1))
return output
def get_position_encoding(self, x):
max_length = x.size()[1]
position = torch.arange(max_length, dtype=torch.float32,
device=x.device)
scaled_time = position.unsqueeze(1) * self.inv_timescales.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
signal = F.pad(signal, (0, 0, 0, self.hidden_size % 2))
signal = signal.view(1, max_length, self.hidden_size)
return signal
|
# Schrijf (en test) de functie som() die 1 parameter heeft: getallenLijst.
# Ga ervan uit dat dit een list is met integers.
# De return-waarde van de functie moet de som (optelling) van de getallen in de lijst zijn!
# Tip: bekijk nog eens de list-functies (Perkovic, blz. 28).
def som(getallenLijst):
totaal = 0
for getal in getallenLijst:
totaal = totaal + getal
return totaal
print(som([1, 2, 3]))
# maar, stiekem kan ook, dus zou je nooit die functie maken:
def som(getallenLijst):
return sum(getallenLijst)
print(som([1, 2, 3]))
|
#!/usr/bin/python3
import smtplib
from string import Template
from email.mime.base import MIMEBase
from email import encoders
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Function to read the contacts from a given contact file and return a
# list of names and email addresses
def get_contacts(filename):
names = []
emails = []
with open(filename, mode='r', encoding='utf-8') as contacts_file:
for a_contact in contacts_file:
names.append(a_contact.split()[0])
emails.append(a_contact.split()[1])
return names, emails
MY_ADDRESS='<EMAIL ADDRESS>'
PASSWORD='<PASSWORD>'
def main():
names, emails = get_contacts('<TXT FILE WITH CONTACT LIST IN FORMAT NAME EMAIL>') # read contacts
print("\nLogging into SMTP server... ",end='',flush=True)
s = smtplib.SMTP('<SERVER ADDRESS>', <SERVER PORT>)
try:
# set up the SMTP server
s.ehlo_or_helo_if_needed()
s.starttls()
s.ehlo()
s.login(MY_ADDRESS, PASSWORD)
print("Successful Login!")
except:
print("Something went wrong")
# For each contact, send the email:
print("Creating Message... ",end='',flush=True)
#if sys.argv[1] == 'Sprints':
msg = MIMEMultipart() # create a message
# add in the actual person name to the message template
message = '<MESSAGE TO WRITE>'
# Prints out the message body for our sake
#print(message)
# setup the parameters of the message
msg['From']=MY_ADDRESS
msg['To']=",".join(emails)
msg['Subject']="<SUBJECT>"
# add in the message body
msg.attach(MIMEText(message, 'plain'))
print("Message Created and Added!")
#Add message attachment to excel created in previous script
print("Creating Attachtment... ",end='',flush=True)
filename = "<FILENAME OF ATTACHMENT>"
attachment = open("<ATTACHMENT TO ADD>", "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
print('Attachment Created and Added!')
# send the message via the server set up earlier.
print('Sending Mail... ',end="",flush=True)
s.send_message(msg)
print('Mail Sent!')
del msg
# Terminate the SMTP session and close the connection
s.quit()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
from gzip import open
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
def prepare_array(filename, fraction):
with open(filename) as myfile:
file = myfile.readlines()
length = int(len(file) * fraction)
normal = file[:length]
normal = np.asarray(normal)
return normal
def spam_detection(random_state=0, fraction=1.0):
ham = prepare_array('src/ham.txt.gz', fraction)
ham_label = np.zeros((len(ham), 1))
spam = prepare_array('src/spam.txt.gz', fraction)
spam_label = np.ones((len(spam), 1))
label = np.concatenate((ham_label, spam_label))
joined_list = np.concatenate((ham, spam))
vectorizer = CountVectorizer()
feature_matrix = vectorizer.fit_transform(joined_list)
matrix_train, matrix_test, label_train, label_test = train_test_split(feature_matrix, label, train_size=0.75, random_state=random_state)
label_train = np.ravel(label_train)
model = MultinomialNB()
model.fit(matrix_train, label_train)
prediction = model.predict(matrix_test)
correct = accuracy_score(label_test, prediction, normalize=False)
test_sample_size = len(label_test)
misclassified = test_sample_size - correct
score = correct / test_sample_size
return score, test_sample_size, misclassified
def main():
spam_detection()
accuracy, total, misclassified = spam_detection()
print("Accuracy score:", accuracy)
print(f"{misclassified} messages miclassified out of {total}")
if __name__ == "__main__":
main()
|
#Problem 2
lst = ["a", "b", "10", "bab", "a"]
val = "a"
def find_all(lst,val):
newlst = []
for i in range(len(lst)):
if lst[i] == val:
newlst.append(i)
#newlst += [i]
print(newlst)
(find_all(lst,val))
|
from django.urls import path, re_path
from board.views import *
from django.contrib import admin
app_name = 'board'
urlpatterns = [
path('', BoardView.as_view(), name='board'),
path('<int:pk>', BoardViewDV.as_view(), name='details'),
# Example: /board/add
path('add/', BoardCreateView.as_view(), name="add"),
# Example: /bookmark/change/
# path('change/', BoardChange.as_view(), name="change"),
# # Example: /bookmark/99/update/
path('<int:pk>/update/', BoardUpdateView.as_view(), name="update"),
# Example: /bookmark/99/delete/
path('<int:pk>/delete/', BoardDeleteView.as_view(), name="delete"),
] |
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
# ๋ชจ๋ธ(object)์ ์ ์ - ํน๋ณํ ํค์๋๋ก ๊ฐ์ฒด๋ฅผ ์ ์
# models.Model ์ Post๊ฐ ์ฅ๊ณ ๋ชจ๋ธ์์ ์๋ฏธ ์ด์ฝ๋ ๋๋ฌธ์
# ์ฅ๊ณ ๋ Post๊ฐ ๋ฐ์ดํฐ๋ฒ ์ด์ค์ ์ ์ฅ๋์ผํ๋ค๊ณ ์๊ฒ๋
# title, text, create_date, published_date, author
# ์์ฑ์ ์ ์ํ๊ธฐ ์ํด, ๊ฐ ํ๋๋ง๋ค ์ด๋ค ์ข
๋ฅ์ ๋ฐ์ดํฐ ํ์
์ ๊ฐ์ง๋์ง๋ฅผ ์ ํจ
# models.CharField : ๊ธ์์๊ฐ ์ ํ๋ ํ
์คํธ๋ฅผ ์ ์ํ ๋ ์ฌ์ฉ. ๊ธ ์ ๋ชฉ๊ฐ์ด ๋๋ถ๋ถ์ ์งง์ ๋ฌธ์์ด ์ ๋ณด๋ฅผ ์ ์ฅํ ๋ ์ฌ์ฉ
# models.TextField : ๊ธ์ ์๊ฐ ์ ํ์ด ์๋ ๊ธด ํ
์คํธ๋ฅผ ์ํ ์์ฑ. ๋ธ๋ก๊ทธ ์ฝํ
์ธ ๋ด๊ธฐ
# models.DateTimeField : ๋ ์ง์ ์๊ฐ์๋ฏธ
# models.ForeignKey : ๋ค๋ฅธ ๋ชจ๋ธ์ด ๋ํ ๋งํฌ๋ฅผ ์๋ฏธ |
from CalvertScreen import Screen
from CalvertObjects import *
from CalvertGame import Game
import random
pygame.init()
### create screen
actors = pygame.sprite.Group()
scenery = pygame.sprite.Group()
groups = [scenery,actors]
screen_width = 700
screen_height = 500
screen = Screen(screen_width,screen_height,groups)
class Zombie(OctagonallyBasedCharacter):
ATTACK = 2
DIE = 3
moveAnimation = OctagonalAnimation("exampleImages/zombie",OctagonallyBasedCharacter.MOVE,8,36,4,8,[2,3,4,5,6,7,0,1])
moveAnimation.isRecurring = True
idleAnimation = OctagonalAnimation("exampleImages/zombie",OctagonallyBasedCharacter.IDLE,8,36,0,4,[2,3,4,5,6,7,0,1])
idleAnimation.isRecurring = True
attackAnimation = OctagonalAnimation("exampleImages/zombie",ATTACK,8,36,12,10,[2,3,4,5,6,7,0,1])
attackAnimation.isRecurring = False
deathAnimation = OctagonalAnimation("exampleImages/zombie",DIE,8,36,22,2,[2,3,4,5,6,7,0,1])
animations = [idleAnimation,moveAnimation,attackAnimation]
def __init__(self,x,y):
super().__init__(Zombie.animations,x,y)
def wander(self):
if random.randint(1,2)==1:
self.move(random.randint(0,7))
else:
self.currentAnimation = OctagonallyBasedCharacter.IDLE
self.xVelocity = 0
self.yVelocity = 0
def munchOnNearThing(self,munchee):
self.currentAnimation = Zombie.ATTACK
def die(self):
self.currentAnimation = Zombie.DIE
def update(self):
super().update()
if random.randint(1,20) == 1:
self.wander()
if self.animations[self.currentAnimation].isFinished:
self.animations[self.currentAnimation].isFinished = False
self.currentAnimation = Zombie.IDLE
self.currentFrame = 0
if self.currentAnimation == Zombie.DIE and self.animations[self.currentAnimation].isFinished:
actors.remove(self)
class Cowboy(OctagonallyBasedCharacter):
ATTACK = 2
moveAnimation = OctagonalAnimation("exampleImages/cowboy",OctagonallyBasedCharacter.MOVE,10,14,1,10,[5,4,3,2,9,8,7,6])
moveAnimation.isRecurring = True
idleAnimation = OctagonalAnimation("exampleImages/cowboy",OctagonallyBasedCharacter.IDLE,10,14,0,1,[5,4,3,2,9,8,7,6])
idleAnimation.isRecurring = True
attackAnimation = OctagonalAnimation("exampleImages/cowboy",ATTACK,10,14,10,3,[5,4,3,2,9,8,7,6])
attackAnimation.isRecurring = False
animations = [idleAnimation,moveAnimation,attackAnimation]
def __init__(self):
super().__init__(Cowboy.animations,100,100)
self.width = 50
self.height = 50
for zombie in actors:
if isinstance(zombie,Zombie):
if math.abs(zombie.xPos - o.xPos)<25 or math.ab(zombie.yPos - o.yPos)< 25:
zombie.die()
def update(self):
super().update()
if self.animations[self.currentAnimation].isFinished:
self.currentAnimation = Cowboy.IDLE
def fire(self):
self.currentAnimation = Cowboy.ATTACK
self.currentFrame = 1
self.xVelocity = 0
self.yVelocity = 0
class Rocket(AnimatedObject):
GRAVITY = 0.00001
THRUST = -0.00002
MAXIMUM_VELOCITY_FOR_SAFE_LANDING = 2
#Animation Constants
NOT_FIRING = 0
FIRING = 1
EXPLODING = 2
def createAnimations(self):
firingAnimation = Animation("exampleImages/rocketFiring",1,Rocket.FIRING)
not_firingAnimation = Animation("exampleImages/rocket",1,Rocket.NOT_FIRING)
exploding = SingleAnimationFromSpriteSheet("exampleImages/explosion",Rocket.EXPLODING,5,5,25)
self.animations.append(not_firingAnimation)
self.animations.append(firingAnimation)
self.animations.append(exploding)
def __init__(self):
self.animations = []
self.createAnimations()
super().__init__(self.animations,screen_width*.5,screen_height*.1)
self.yVelocity = 0
self.xVelocity = 0
self.width = 400
self.height = 400
self.angle = 90 # presumes we are starting facing upward.
#so some of this is dealing in radians and some in degrees :(
self.xAcceleration = 0
self.yAcceleration = 0
self.isLanded = False
self.isFiring = False
def update(self):
super().update()
if not self.isLanded:
#self.angle += 2
self.xVelocity += self.xAcceleration
self.yVelocity += self.yAcceleration
self.xVelocity = round(self.xVelocity,4)
self.yVelocity = round(self.yVelocity,4)
self.xPos += self.xVelocity
self.yPos += self.yVelocity
self.applyEffectOfGravity()
if self.currentAnimation == Rocket.FIRING:
self.fireThruster()
self.handleLanding()
def applyEffectOfGravity(self):
self.yAcceleration += Rocket.GRAVITY
def fireThruster(self):
if not self.isLanded and not self.currentAnimation == Rocket.EXPLODING:
changeInYAcceleration = math.sin(self.angle*math.pi/180) * Rocket.THRUST # this may pose problems as angle may be measured in degrees and this may
self.yAcceleration += changeInYAcceleration
changeInXAcceleration = math.cos(self.angle*math.pi/180) * Rocket.THRUST # as above.
self.xAcceleration -= changeInXAcceleration
self.currentAnimation = Rocket.FIRING
def turnOffThruster(self):
if not self.isLanded and not self.currentAnimation == Rocket.EXPLODING:
self.currentAnimation = Rocket.NOT_FIRING
def handleLanding(self):
if self.rect.y > .55 * screen_height:
if self.yVelocity < Rocket.MAXIMUM_VELOCITY_FOR_SAFE_LANDING:
self.isLanded = True
else:
self.currentAnimation = Rocket.EXPLODING
print("boom")
if self.currentAnimation.isFinished:
actors.remove(self)
groupsToBeExited = []
for group in screen.drawnObjectGroups:
for item in group:
if item == self:
groupsToBeExited.append(group)
for group in groupsToBeExited:
group.remove(self)
def AI_ThrustBot(self):
if self.yVelocity > 1.5:
self.fireThruster()
#r = Rocket()
o = Cowboy()
actors.add(o)
def loop():
handleEvents()
def initializeGame():
global r
#actors.add(r)
for i in range(20):
actors.add(Zombie(random.randint(1,screen_width),random.randint(1,screen_height)))
backgroundImage = pygame.image.load("exampleImages/moonSurface.jpg")
background = DrawableObject(backgroundImage,screen_width/2,screen_height/2)
background.width = screen_width
background.height = screen_height
scenery.add(background)
def handleEvents():
# this is a good example of a time when it would be better to write a function than hard code each point.
for actor in actors:
for otherActor in actors:
if not actor == otherActor and isinstance(actor,Zombie) and actor.distanceFrom(otherActor)<50:
actor.munchOnNearThing(otherActor)
if pygame.key.get_pressed()[pygame.K_SPACE] != 0:
o.fire()
if pygame.key.get_pressed()[pygame.K_a] != 0:
o.move(OctagonallyBasedCharacter.WEST)
if pygame.key.get_pressed()[pygame.K_d] != 0:
o.move(OctagonallyBasedCharacter.EAST)
if pygame.key.get_pressed()[pygame.K_q] != 0:
o.move(OctagonallyBasedCharacter.NORTH_WEST)
if pygame.key.get_pressed()[pygame.K_w] != 0:
o.move(OctagonallyBasedCharacter.NORTH)
if pygame.key.get_pressed()[pygame.K_e] != 0:
o.move(OctagonallyBasedCharacter.NORTH_EAST)
if pygame.key.get_pressed()[pygame.K_c] != 0:
o.move(OctagonallyBasedCharacter.SOUTH_EAST)
if pygame.key.get_pressed()[pygame.K_x] != 0:
o.move(OctagonallyBasedCharacter.SOUTH)
if pygame.key.get_pressed()[pygame.K_z] != 0:
o.move(OctagonallyBasedCharacter.SOUTH_WEST)
game = Game(screen,loop,initializeGame)
game.run()
|
from django.core.management import setup_environ
import os
import sys
sys.path.append(os.path.dirname(__file__))
import settings
setup_environ(settings)
#==================================#
from mybioregions.models import *
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.auth.models import User, Group
from madrona.analysistools.grass import Grass
from madrona.analysistools.utils import linear_regression
import random
import threading
import time
fields = ['temp','precip','veg','lang','elev']
user = User.objects.get(username='demo')
fldr, created = Folder.objects.get_or_create(name="Errors", user=user)
errors = [
{
'name': 'tim-test-a',
'x': -119.80364990234375, 'y': 51.49445343017578,
'marine': 10, 'veg': 1, 'temp': 50, 'lang': 47, 'precip': 0, 'size': 'S'
},
{
'name': 'tim-test-b',
'x': -119.80364990234375, 'y': 51.49445343017578,
'marine': 0, 'veg': 1, 'temp': 50, 'lang': 47, 'precip': 0, 'size': 'S'
},
]
def delete():
a = Folder.objects.filter(user=user, name="Errors")
a.delete()
class MyThread (threading.Thread):
# Override Thread's __init__ method to accept the parameters needed:
def __init__ ( self, e ):
self.e = e
threading.Thread.__init__ ( self )
def run (self):
e = self.e
print e
for f in fields:
if not e.has_key(f):
e[f] = 50
g = GEOSGeometry('SRID=4326;POINT(%s %s)' % (e['x'],e['y']))
bio = MyBioregion(user=user,
name=e['name'],
input_temp_weight = e['temp'],
input_precip_weight = e['precip'],
input_biomass_weight = e['veg'],
input_lang_weight = e['lang'],
input_elev_weight = e['elev'],
input_marine_weight = e['marine'],
input_starting_point = g,
input_bioregion_size= e['size']
)
bio.save()
bio.add_to_collection(fldr)
print "!!! DONE", bio
del bio
def main():
for i in range(5):
for e in errors:
MyThread(e).start()
time.sleep(0.01)
if __name__ == '__main__':
delete()
main()
|
from sklearn.datasets import load_boston
from dnpy.layers import *
from dnpy.net import *
from dnpy.optimizers import *
from dnpy import metrics, losses
# For debugging
np.random.seed(1)
def main():
# Get data
X = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Y = np.array([[-1],
[1],
[1],
[-1]])
x_train, y_train = X, Y
x_test, y_test = X, Y
# Params *********************************
batch_size = len(x_train)
epochs = 60000
# Define architecture
l_in = Input(shape=x_train[0].shape)
l = l_in
l = Dense(l, 4)
l = Tanh(l)
l = Dense(l, 1)
l_out = l
# Build network
mymodel = Net()
mymodel.build(
l_in=[l_in],
l_out=[l_out],
optimizer=SGD(lr=1.0, momentum=0.9, nesterov=False),
losses=[losses.Hinge()],
metrics=[[metrics.BinaryAccuracy(threshold=0.0), metrics.MAE()]],
debug=False
)
# Print model
mymodel.summary()
# Train
mymodel.fit([x_train], [y_train],
x_test=[x_test], y_test=[y_test],
batch_size=batch_size, epochs=epochs,
evaluate_epoch=True,
print_rate=100)
# Evaluate
m = mymodel.evaluate([x_test], [y_test], batch_size=batch_size)
if __name__ == "__main__":
main()
|
import gensim
from gensim.corpora.wikicorpus import WikiCorpus
from gensim.models import Phrases, TfidfModel
from gensim.models.phrases import Phraser
from gensim.test.utils import datapath
from gensim.models.word2vec import Word2Vec, Text8Corpus
import json
import logging
class ModelWord2Vec:
"""path รจ la stringa che indica o il corpus su cui lavorare o il modello da caricare """
def __init__(self, path, existModel=False):
"""se abbiamo il modello pronto existModel va segnato a true"""
if existModel == False:
self.model = self.createModel(path)
else:
self.model = Word2Vec.load(path)
def createModel(self, pathCorpus, min_count=5, size=300, workers=8, window=5, iter=5, sg=1, negative=10):
sentences = Text8Corpus(datapath(pathCorpus))
model = Word2Vec(sentences,
min_count=min_count, # Ignore words that appear less than this
size=size, # Dimensionality of word embeddings
workers=workers, # Number of processors
window=window, # Context window for words during training
iter=iter, # Number of epochs training over corpus
sg=sg, # skip gram true
negative=negative)
return model
def predict(self, listOfWord, probability=0, topn=50):
listOfWord = self.checkList(listOfWord)
if listOfWord == []:
return []
else:
predict = self.model.predict_output_word(listOfWord, topn=topn)
return list(filter(lambda x: x[1] > probability, predict))
def train(self, pathCorpus, epochs=60, compute_loss=True):
sentences = Text8Corpus(datapath(pathCorpus))
self.model.train(sentences, epochs=epochs, total_examples=self.model.corpus_count, compute_loss=compute_loss)
def trainMoreSentence(self, moreSentence, epochs=60, compute_loss=True):
self.model.build_vocab(moreSentence, update=True)
self.model.train(moreSentence, epochs=epochs, total_examples=self.model.corpus_count, compute_loss=compute_loss)
def checkList(self, listGoodW):
newList = []
for el in listGoodW:
if el in list(self.model.wv.vocab.keys()):
newList.append(el)
return newList
"""funzioni utili"""
def cleanTextBadWord(text, key, badWord):
"""pulizia messaggi da file json passando il file json dei messaggi, la chiave su cui cercare
e il file json della badWord"""
from string import punctuation
clean_text = {}
bad_word = {}
with open(text) as file:
clean_text = json.load(file)
with open(badWord) as file:
bad_word = json.load(file)
clean_text = clean_text(key)
tmp = []
for text in clean_text:
tmp.append(" ".join(text.split()))
clean_text = []
clean_text = [''.join([c for c in text if c not in punctuation]) for text in tmp]
bad_word = bad_word.get('bad')
for b in bad_word:
for m in clean_text:
if b in m:
clean_text.pop(clean_text.index(m))
return clean_text
def createCorpusFromCleanText(clean_text, name='clean_text.txt'):
with open(name, 'w') as f:
for m in clean_text:
f.writelines(m)
|
"""interface functions for model_fitting of CNNs"""
from itertools import product
from collections import OrderedDict
from copy import deepcopy
import time
import json
import numpy as np
import h5py
from .configs import cnn_opt, cnn_arch, cnn_init
from .cnn import CNN
from .training_aux import train_one_case, count_params
# sets of opt configs to use.
# based on
# https://github.com/leelabcnbc/tang_jcompneuro_revision/blob/master/results_ipynb/single_neuron_exploration/cnn_initial_exploration.ipynb
def save_one_model(model: CNN, group: h5py.Group):
for x, y in model.named_parameters():
print(x, y.size())
data_y = y.data.cpu().numpy()
if x not in group:
group.create_dataset(x, data=data_y)
else:
data_y_ref = group[x][...]
assert data_y.shape == data_y_ref.shape
# print(abs(data_y - data_y_ref).max())
assert np.array_equal(data_y, data_y_ref)
# for some reason. I can't use array_equal, even if cudnn is disabled.
# NO such thing. just my bug in the code.
# assert abs(data_y - data_y_ref).max() < 1e-4
group.file.flush()
def _opt_configs_to_explore_1layer(num_layer=1):
"""set of opt configs to use.
based on
https://github.com/leelabcnbc/tang_jcompneuro_revision/blob/master/results_ipynb/single_neuron_exploration/cnn_initial_exploration.ipynb
"""
def layer_gen(x):
return cnn_opt.generate_one_layer_opt_config(l1=0.0, l2=x, l1_bias=0.0, l2_bias=0.0)
# generate all conv stuff.
conv_dict = OrderedDict()
conv_dict['1e-3L2'] = [layer_gen(0.001) for _ in range(num_layer)]
conv_dict['1e-4L2'] = [layer_gen(0.0001) for _ in range(num_layer)]
fc_dict = OrderedDict()
fc_dict['1e-3L2'] = layer_gen(0.001)
opt_dict = OrderedDict()
opt_dict['sgd'] = cnn_opt.generate_one_optimizer_config('sgd')
opt_dict['adam002'] = cnn_opt.generate_one_optimizer_config('adam', lr=0.002)
# maybe I should also check out loss
loss_dict = OrderedDict()
loss_dict['mse'] = 'mse'
# not doable, because I don't have a final nonlinearity.
# loss_dict['poisson'] = 'poisson'
result_dict = OrderedDict()
for (conv_name, conv_val), (fc_name, fc_val), (opt_name, opt_val), (loss_name, loss_val) in product(
conv_dict.items(), fc_dict.items(), opt_dict.items(), loss_dict.items()
):
result_dict[f'{conv_name}_{fc_name}_{opt_name}_{loss_name}'] = cnn_opt.generate_one_opt_config(
deepcopy(conv_val), deepcopy(fc_val), loss_val, deepcopy(opt_val)
)
return result_dict
def gen_on_conv_config_k9(num_channel, pool_config, bn=False):
return cnn_arch.generate_one_conv_config(
9, num_channel, bn=bn, pool=pool_config
)
def _model_configs_to_explore_1layer_bn():
"""set of archs to use.
based on
# https://github.com/leelabcnbc/tang_jcompneuro_revision/blob/master/results_ipynb/single_neuron_exploration/cnn_initial_exploration.ipynb
"""
num_channel_list = (
9,
)
fc_config = cnn_arch.generate_one_fc_config(False, None)
pool_config_max = cnn_arch.generate_one_pool_config(6, 2)
pool_dict = [(None, pool_config_max), ]
channel_detail = 9
result_dict = OrderedDict()
for num_channel, (pool_name, pool_config), act_fn in product(num_channel_list,
pool_dict, ('relu',)):
if pool_name is None:
name_this = f'b_bn.{num_channel}'
else:
assert isinstance(pool_name, str)
name_this = f'b_bn.{num_channel}_{pool_name}'
if act_fn is None:
name_this = f'{name_this}_linear'
elif act_fn != 'relu':
name_this = f'{name_this}_{act_fn}'
# b means baseline
if num_channel != channel_detail and name_this != f'b_bn.{num_channel}':
# I won't check it.
continue
result_dict[name_this] = cnn_arch.generate_one_config(
[gen_on_conv_config_k9(num_channel, deepcopy(pool_config), bn=True),
], deepcopy(fc_config), act_fn, True
)
return result_dict
def _model_configs_to_explore_1layer():
"""set of archs to use.
based on
# https://github.com/leelabcnbc/tang_jcompneuro_revision/blob/master/results_ipynb/single_neuron_exploration/cnn_initial_exploration.ipynb
"""
num_channel_list = (
1, 2,
3,
4, 5,
7, 8,
10, 11,
6,
9,
12,
15,
18,
)
fc_config = cnn_arch.generate_one_fc_config(False, None)
pool_config_max = cnn_arch.generate_one_pool_config(6, 2)
pool_config_avg = cnn_arch.generate_one_pool_config(6, 2, pool_type='avg')
pool_dict = [(None, pool_config_max),
('avg', pool_config_avg)]
channel_detail = 9
result_dict = OrderedDict()
for num_channel, (pool_name, pool_config), act_fn in product(num_channel_list,
pool_dict, ('relu', None, 'halfsq',
'sq', 'abs')):
if pool_name is None:
name_this = f'b.{num_channel}'
else:
assert isinstance(pool_name, str)
name_this = f'b.{num_channel}_{pool_name}'
if act_fn is None:
name_this = f'{name_this}_linear'
elif act_fn != 'relu':
name_this = f'{name_this}_{act_fn}'
# b means baseline
if num_channel != channel_detail and name_this != f'b.{num_channel}':
# I won't check it.
continue
result_dict[name_this] = cnn_arch.generate_one_config(
[gen_on_conv_config_k9(num_channel, deepcopy(pool_config)),
], deepcopy(fc_config), act_fn, True
)
# finally, add MLP stuff
for k in (4, # so that we have roughly 144 units.
20, 40, 60, 80, 100, 120,
145, # > 95% variance preserved.
# check
# https://github.com/leelabcnbc/tang_jcompneuro_revision/blob/master/results_ipynb/debug/cnn/cnn_wrapper.ipynb
):
name_this = f'mlp.{k}' # k is dim to keep.
# this is because baseline model has 883 parameters.
# (k+1)*mlp + mlp + 1 = 883
# (k + 2) * mlp = 882
mlp_this = 882 // (k + 2)
result_dict[name_this] = cnn_arch.generate_one_config(
[], cnn_arch.generate_one_fc_config(False, None, mlp_this), 'relu', True
)
return result_dict
def _generate_all_2L_conv_config():
"""same as the one in `cnn_exploration`,
except that naming is more convenient.
"""
# either use dilation or not
# too many parameters. for 9 and 12
num_channel_list = (7,
# 9,
# 12
)
l1_kd_pairs = [
(4, 2), # 7x7 effectively, 14x14
(5, 2), # 9x9 effectively, 12x12
(5, 1), # 5x5, 16x16
(7, 1), # 7x7, 14x14
]
l2_kdp_pairs = [
# (5, 1, 2), # too many parameters.
(3, 1, 1),
]
conv_dict = OrderedDict()
# then all using k6s2 setup.
for num_channel in num_channel_list:
for l1_kd, l2_kdp in product(l1_kd_pairs, l2_kdp_pairs):
l1_k, l1_d = l1_kd
l2_k, l2_d, l2_p = l2_kdp
name_this = f'2l_k{l1_k}d{l1_d}_k{l2_k}d{l2_d}p{l2_p}.{num_channel}'
conv_dict[name_this] = [cnn_arch.generate_one_conv_config(
l1_k, num_channel, dilation=l1_d,
), cnn_arch.generate_one_conv_config(
l2_k, num_channel, dilation=l2_d, padding=l2_p,
pool=cnn_arch.generate_one_pool_config(6, 2)
),
]
return conv_dict
def _model_configs_to_explore_2layer():
"""set of archs to use.
based on
# https://github.com/leelabcnbc/tang_jcompneuro_revision/blob/master/results_ipynb/single_neuron_exploration/cnn_initial_exploration_2L.ipynb
# basically all of them.
"""
fc_config = cnn_arch.generate_one_fc_config(False, None)
conv_config_dict = _generate_all_2L_conv_config()
result_dict = OrderedDict()
for name_this, conv_config_this in conv_config_dict.items():
result_dict[name_this] = cnn_arch.generate_one_config(
conv_config_this, deepcopy(fc_config), 'relu', True
)
return result_dict
def init_config_to_use_fn():
return cnn_init.legacy_generator()
def get_trainer(model_subtype, cudnn_enabled=True, cudnn_benchmark=False,
show_every=10000000, show_arch_config=False,
max_epoch=20000, hack_arch_config_fn=None,
catch_inf_error=True):
if '@' in model_subtype:
model_subtype_real, scale_hack = model_subtype.split('@')
scale_hack = float(scale_hack)
assert scale_hack in {0.05, 0.005}
else:
model_subtype_real = model_subtype
scale_hack = None
if model_subtype_real.startswith('2l_'):
opt_configs_to_explore_this = opt_configs_to_explore_2l
arch_config = models_to_train_2l[model_subtype_real]
elif model_subtype_real.startswith('b_bn.'):
opt_configs_to_explore_this = opt_configs_to_explore
arch_config = models_to_train_bn[model_subtype_real]
else:
assert model_subtype_real.startswith('b.') or model_subtype_real.startswith('mlp.')
opt_configs_to_explore_this = opt_configs_to_explore
arch_config = models_to_train[model_subtype_real]
if hack_arch_config_fn is not None:
arch_config = hack_arch_config_fn(deepcopy(arch_config))
print('arch config hacked!')
if show_arch_config:
print(model_subtype_real, 'scale hack', scale_hack)
print(json.dumps(arch_config, indent=2))
def trainer(datasets):
# best performance in my experiments.
from torch.backends import cudnn
cudnn.enabled = cudnn_enabled
cudnn.benchmark = cudnn_benchmark
# print(cudnn.enabled, cudnn.benchmark)
assert cudnn.enabled == cudnn_enabled and cudnn.benchmark == cudnn_benchmark
best_val = -np.inf
best_config = None
best_y_test_hat = None
best_corr = None
inf_counter = 0
# check input size
if 'mlp' not in model_subtype_real:
assert datasets[0].ndim == 4 and datasets[0].shape[1:] == (1, 20, 20)
input_size = 20
else:
assert datasets[0].ndim == 2
input_size = (datasets[0].shape[1], 1)
if show_arch_config:
# show dataset detail
assert len(datasets) == 6
print([x.shape for x in datasets])
for opt_config_name, opt_config in opt_configs_to_explore_this.items():
# train this config
# print('seed changed')
# print('scale hacked')
model = CNN(arch_config, init_config_to_use_fn(), mean_response=datasets[1].mean(axis=0),
# change seed if you get unlucky for unstable input...
# this is the case especially for MkE2_Shape.
# i think this was an issue before as well.
# except that pytorch 0.2.0 doesn't report such errors.
# check /inf_debug_script.py
# seed=42,
seed=0,
# last ditch
# for some avg_sq
# scale_hack=0.9,
# for other avg_sq
# as well as other models.
scale_hack=scale_hack,
# for MLP model, use PCAed data.
input_size=input_size,
# scale_hack = 0.0
)
if show_arch_config:
print(model)
print('# of params', count_params(model))
# print('change trainer seed')
model.cuda()
t1 = time.time()
try:
y_val_cc, y_test_hat, new_cc = train_one_case(model, datasets, opt_config,
seed=0, show_every=show_every,
return_val_perf=True,
max_epoch=max_epoch)
except RuntimeError as e:
# just zero.
if catch_inf_error and e.args == ('value cannot be converted to type double without overflow: inf',):
y_val_cc = 0.0
new_cc = 0.0
y_test_hat = np.zeros_like(datasets[3], dtype=np.float32)
inf_counter += 1
else:
# print('we will not handle it')
raise
t2 = time.time()
print(opt_config_name, y_val_cc, f'{t2-t1} sec')
if y_val_cc > best_val:
best_config = opt_config_name
best_val = y_val_cc
best_y_test_hat = y_test_hat
best_corr = new_cc
assert best_config is not None and best_y_test_hat is not None and best_corr is not None
print('best config {} with val {:.6f} and test {:.6f}'.format(best_config, best_val, best_corr))
return {
'y_test_hat': best_y_test_hat,
'corr': best_corr,
'attrs': {
'best_val': best_val,
'best_config': best_config,
# use this to check how many such tragedies happen.
'inf_counter': inf_counter,
},
}
return trainer
models_to_train = _model_configs_to_explore_1layer()
models_to_train_bn = _model_configs_to_explore_1layer_bn()
models_to_train_2l = _model_configs_to_explore_2layer()
models_to_train_detailed_keys = [x for x in models_to_train if x.startswith('b.9')]
models_to_train_mlp = [x for x in models_to_train if x.startswith('mlp.')]
opt_configs_to_explore = _opt_configs_to_explore_1layer()
opt_configs_to_explore_2l = _opt_configs_to_explore_1layer(2)
|
# -*- coding: utf-8 -*-
'''
otsu.fun - SSWA Utils
@version: 0.1
@author: PurePeace
@time: 2020-01-07
@describe: a treasure house!!!
'''
import time, datetime
# way to return utInfo, decorator
def messager(func):
def wrapper(*args, **kwargs):
data, message, info, status = func(*args,**kwargs)
if message == '': message = info
return messageMaker(data, message, info + statusInfo.get(status,'็ถๆๆช็ฅ'), status)
return wrapper
# make messager
def messageMaker(data=None, message=None, info=None, status=None):
return {'message':message, 'data':data, 'status':status, 'info': info, 'time':getTime(1)}
# get now timeString or timeStamp
def getTime(needFormat=0, formatMS=True):
if needFormat != 0:
return datetime.datetime.now().strftime(f'%Y-%m-%d %H:%M:%S{r".%f" if formatMS else ""}')
else:
return time.time()
# timeString to timeStamp
def toTimeStamp(timeString):
if '.' not in timeString: getMS = False
else: getMS=True
timeTuple = datetime.datetime.strptime(timeString, f'%Y-%m-%d %H:%M:%S{r".%f" if getMS else ""}')
return float(f'{str(int(time.mktime(timeTuple.timetuple())))}' + (f'.{timeTuple.microsecond}' if getMS else ''))
# timeStamp to timeString
def toTimeString(timeStamp):
if type(timeStamp) == int: getMS = False
else: getMS = True
timeTuple = datetime.datetime.utcfromtimestamp(timeStamp + 8 * 3600)
return timeTuple.strftime(f'%Y-%m-%d %H:%M:%S{r".%f" if getMS else ""}')
# generate method docs str
def docsParameter(sub):
def dec(obj):
obj.__doc__ = sub
return obj
return dec
# make text include time
def logtext(text):
logtext = f'[{getTime(1)}]: {text}'
print(logtext)
return logtext
# make request record info
def makeRequestInfo(request):
return {
'remote_addr': request.remote_addr,
'system': request.headers.get('system_info'),
'request': {
'environ': request.environ,
'url': request.url
}
}
# make authorize info
def makeAuthorizeInfo(request):
otsuToken, osuid = request.headers.get('X-Otsutoken'), request.headers.get('osuid')
if otsuToken == None or osuid == None: status = -1
else: status = 1
return {'otsuToken': otsuToken, 'osuid': osuid, 'path': request.path.strip('/')}, status
statusInfo = {
1: 'ๆๅ',
-1: 'ๅคฑ่ดฅ'
}
# run? not.
if __name__ == '__main__':
print('wow, you find a treasure house!!! so it dosent work')
|
from flask import Flask, render_template, url_for, Response, request, redirect
from os import path
app = Flask(__name__)
@app.route('/')
def homepage():
return render_template('homepage.html')
@app.route('/sketch/')
def sketch():
return render_template('sketch.html')
if __name__ == '__main__':
app.run(debug=True) |
i = 0 # counter
while i < 40: # loop to reach first 20 numbers only
if i % 2 == 0: # in case if the reminder equal 0 the number will be even
print(i)
i = i + 1 # going to add 1 each time on i
|
import sys
from fastNLP import Optimizer
import torch as tc
import torch.nn as nn
import torch.optim as optim
import numpy as np
from config import logger
class WarmAdam(optim.Optimizer):
def __init__(self, params , d_model, n_warmup_steps , init_steps = 0 , step_size = 1):
self.init_lr = np.power(d_model, -0.5)
self._optimizer = optim.Adam(params = params , lr = self.init_lr , betas = (0.9,0.98))
self.n_warmup_steps = n_warmup_steps
self.now_step = init_steps
self.step_size = step_size
def step(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
self._optimizer.zero_grad()
def _get_lr_scale(self):
return np.min([
np.power(self.now_step, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.now_step
])
def _update_learning_rate(self):
self.now_step += self.step_size
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
class MySGD(optim.Optimizer):
def __init__(self, params , lr):
self._optimizer = optim.SGD(params = params , lr = lr , momentum = 0.9 , weight_decay = 1e-4)
self.now_step = 0
self.now_lr = lr
self.barriers = [32000 , 48000]
def step(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
self._optimizer.zero_grad()
def _update_learning_rate(self):
self.now_step += 1
if self.now_step in self.barriers:
self.now_lr *= 0.1
logger.log("now lr changing.... new lr = %.4f" % (self.now_lr))
for param_group in self._optimizer.param_groups:
param_group['lr'] = self.now_lr
class StepAdam(optim.Optimizer):
def __init__(self, params , lr):
self._optimizer = optim.Adam(params = params , lr = lr , weight_decay = 1e-4)
self.now_step = 0
self.now_lr = lr
self.barriers = [2000 , 4000]
def step(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
self._optimizer.zero_grad()
def _update_learning_rate(self):
self.now_step += 1
if self.now_step in self.barriers:
self.now_lr *= 0.1
logger.log("now lr changing.... new lr = %.4f" % (self.now_lr))
for param_group in self._optimizer.param_groups:
param_group['lr'] = self.now_lr
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gmuwork.shortcuts import quick_pfp1_file_reader
import matplotlib.pyplot as plt
from matplotlib import animation,rc
from IPython.display import HTML, Image
import numpy as np
from sklearn.decomposition import PCA
from gmuwork.shortcuts import moving_mean_smoothing
import time
from random import randint
def one_portion(data_row):
from pandas import rolling_std,rolling_mean
mov = rolling_std(data_row,200)
means = []
for i in range(0,len(mov),100):
means.append(np.mean(mov[i:i+500]))
index1 = np.argmax(means)
return data_row[index1*100:(index1+1)*100]
def interesting_parts(data):
new_data = []
for x in data:
new_data.append(one_portion(x))
return np.array(new_data)
if __name__=="__main__":
# fig, ax = plt.subplots()
s1 = quick_pfp1_file_reader("C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/AllData/dataSet3/Vector0000Path0000Iter00")[0:20]
s2 = quick_pfp1_file_reader("C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/AllData/dataSet3/Vector0001Path0001Iter00")[0:20]
s3 = quick_pfp1_file_reader("C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/AllData/dataSet3/Vector0002Path0002Iter00")[0:20]
s4 = quick_pfp1_file_reader("C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/AllData/dataSet3/Vector0003Path0003Iter00")[0:20]
sT = quick_pfp1_file_reader("C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/AllData/dataSet3/Vector0004Path0004Iter00")[0:20]
c = np.concatenate((s1,s2,s3,s4,sT),axis=0)
c = moving_mean_smoothing(c,1000)
start_time = time.time()
print(time.time()-start_time)
s1,s2,s3,s4,sT = np.vsplit(c,5)
s1 =s1[0:10]
s2 =s2[0:10]
s3 =s3[0:10]
s4 =s4[0:10]
sT =sT[0:10]
d = np.concatenate((s1,s2,s3,s4,sT),axis=0)
times = [x for x in range(0,len(d[0]))]
ln, = plt.plot([], [], animated=True)
fig =plt.figure(1)
def animate(i):
ln.set_data(times,d[i])
print(i)
if i >-1 and i<10:
fig.suptitle("State1_Vector000")
elif i==10:
fig.suptitle("State2_Vector110")
plt.draw()
plt.pause(.00001)
elif i==20:
fig.suptitle("State3_Vector220")
plt.draw()
plt.pause(.00001)
elif i==30:
fig.suptitle("State4_Vector330")
plt.draw()
plt.pause(.00001)
elif i==40:
fig.suptitle("StateTamper_Vector440")
plt.draw()
plt.pause(.00001)
return ln,
anim = animation.FuncAnimation(fig, animate,frames=50, interval=2000, blit=True)
plt.xlim([0,len(times)])
plt.ylim([-4,4])
anim.save('C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/tests/smoothed_Data_animation.mp4', writer="ffmpeg")
print("Done") |
#!/usr/bin/env python
from itertools import product
from combinatorics import combinator
C = combinator([0,1,2,3,4,5,6,7,8,9],6)
confs = [tuple(c) for c in C]
squares = ["%02d"%(n**2) for n in range(1,10)]
digit_pairs = [(int(c[0]),int(c[1])) for c in squares]
def check(c1,c2):
testc1 = c1
testc2 = c2
if 9 in c1 or 6 in c1:
testc1 = testc1 + (6,9)
if 9 in c2 or 6 in c2:
testc2 = testc2 + (6,9)
for i,j in digit_pairs:
if not((i in testc1 and j in testc2) or (j in testc1 and i in testc2)):
return False
return True
def main():
solns = []
n = 0
for i,j in product(range(len(confs)),range(len(confs))):
if i >= j:
continue
n+=1
if check(confs[i],confs[j]):
l = [confs[i],confs[j]]
l.sort()
t = tuple(l)
if t not in solns:
solns += [t]
n_solns = len(solns)
print("%d considered"%(n))
print("%d solutions"%(n_solns))
#for c1,c2 in solns:
# print("%s and %s"%(''.join([str(c) for c in c1]),
# ''.join([str(c) for c in c2])))
if __name__ == "__main__":
main()
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# class Solution:
# def isPalindrome(self, head: ListNode) -> bool:
# node = head
# isPalindrom = True
# # get all elements in array by traversing linked list
# temp = []
# while (node is not None):
# temp.append(node.val)
# node = node.next
#
# # pop from array and check again by traversing the array
# node = head
# i = len(temp) - 1
# while (node is not None and isPalindrom):
# if (node.val != temp[i]):
# isPalindrom = False
# i = i - 1
# node = node.next
#
# return isPalindrom
import math
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
node = head
isPalindrom = True
mid = ListNode()
ptr2 = ListNode()
ptr3 = ListNode()
# get all elements in array by traversing linked list
size = 0
while (node is not None):
node = node.next
size = size + 1
# start again
node = head
countr = 0
while (node is not None):
if (countr == math.ceil(size / 2)):
ptr2.val = node.val
mid = node
ptr3.val = node.val
if (countr > math.ceil(size / 2)):
temp = ListNode()
temp.val = node.val
temp.next = ptr2
ptr3.val = node.val
ptr3.next = ptr2
ptr2 = temp
node = node.next
countr = countr + 1
# traverse again
node = head
countr = 0
while (node is not None and isPalindrom and countr < math.floor(size / 2)):
print (node.val)
print (ptr3.val)
isPalindrom = ptr3.val == node.val
node = node.next
ptr3 = ptr3.next
countr = countr + 1
return isPalindrom |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2 17:59:06 2017
@author: oliver.cairns
"""
import hashlib
input_str = "yzbqklnj"
input_str = "abcdef"
num = "609043"
def lowest_5_zero_num(input_str):
num = -1
flag = True
while flag:
num += 1
num_string = str(num)
test = hex(input_str, num_string)
if test[:5] == "00000":
flag = False
return num
def lowest_6_zero_num(input_str):
num = -1
flag = True
while flag:
num += 1
num_string = str(num)
test = hex(input_str, num_string)
if test[:6] == "000000":
flag = False
return num
def hex(input_str, num_string):
combined_str = input_str + num_string
return hashlib.md5(combined_str.encode("utf-8")).hexdigest()
# Test 1
assert lowest_5_zero_num("abcdef") == 609043
# Test 2
assert lowest_5_zero_num("pqrstuv") == 1048970
print("4.a", lowest_5_zero_num("yzbqklnj"))
print("4.b", lowest_6_zero_num("yzbqklnj"))
|
# -*- coding: utf-8 -*-
def insertion_sort(data):
for index in range(len(data)-1):
for index2 in range(index+1, 0, -1):
if data[index2] < data[index2-1]:
data[index2], data[index2-1] = data[index2-1], data[index2]
else:
break
return data
#######################################
# ์ฌ์ฉ!
import random
data_list = random.sample(range(100), 50)
print(insertion_sort(data_list)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.